Using bound promises to ease database querying in node.js

Handling a request in a node application using a database typically involves the following step :

  • asynchronously obtain a connection from a pool
  • execute asynchronous queries, sequentially or not, probably through a facade you made
  • release the connection

Those steps must be accomplished parallel to other requests.

Without promises, this is a major source of callback hell (if you made such an application, you see what I mean) and, maybe more importantly, it makes it hard to read in the code how errors are handled and to check the connection is always released.

But with a promise library (I use Bluebird) and a few tricks, this can be made as simple as those examples taken from Miaou :

Example 1 :

db.on(userId)          // get a connection from the pool
.then(db.getUser)      // use it to issue an asynchronous query
.then(function(user){  // then, with the result of the query
    ui.showUser(user); // do something
}).finally(db.off);    // and return the connection to the pool 

Example 2 :

db.on([req.rid, req.uid])        // get the connection
.spread(db.fetchRoomAndUserAuth) // issue a request to get the room
.then(function(room){            // use the room object
    return [ // to issue 3 asynchronous queries
        this.listZebras(room.stripes),
        this.listOpenAccessRequests(room.id),
        this.listRecentUsers(room.id, 50)
    ];
}).spread(function(zebras, requests, recentUsers) {
    // use the 3 arrays returned by the queries
    render({ r:room, z:zebras, req:requests, ru:recentUsers });
}).catch(db.NoRowError, function(err){
    // handle a room not found
    res.render('error.jade', { error: "room not found" });
}).catch(function(err){
    // or an error
    send501(err);
}).finally(db.off); // always release the connection

The sequence of operations is easy to read. This user code shows the use of the API and isn’t cluttered by how the API works. No more callback hell, and no doubt the connection is released, whatever the errors.

Here’s how it works :

  1. First of all, this uses promises. then is the fundamental function of promises according to the spec, catch and finally are other useful ones. If you’re not at least a little familiar with them, it’s time to read an introduction
  2. db is my database facade that I get with a simple db = require('./pgdb.js') followed by an initialization of the pool
  3. The db module defines a Con class, which embeds a connection and on which are called all querying functions
  4. db.on(anyArgument) returns a promise which is resolved when a connection is obtained from the pool and a Con object is created with this connection
  5. This promise is bound to the instance of Con. This makes the connection the context of all the functions used in the promise chain (passed to then/spread/finally)
  6. The argument passed to db.on is passed by value of the resolution, and thus passed to the first function of the chain
  7. All functions of Con.prototype are made available in db
  8. Those tricks allow this simple expression : db.on(userId).then(db.getUser)

db.on is implemented like this :

// returns a promise bound to a connection, available to issue queries
//  The connection must be released using off
exports.on = function(val){
	var con = new Con(), resolver = Promise.defer();
	pool.connect(function(err, client, done){
		if (err) {
			resolver.reject(err);
		} else {
			// the instance of Con embeds the connection
			//  and the releasing function
			con.client = client;
			con.done = done;
			// val is passed as value in the resolution so that it's available
			//  in the next step of the promise chain
			resolver.resolve(val);
		}
	});
	// the promise is bound to the Con instance and returned
	return resolver.promise.bind(con);
}

Here’s all the relevant code of my DB facade (see the complete file on GitHub) :


var pg = require('pg').native,
	Promise = require("bluebird"),
	pool;

// The connection object which is used for all postgress accesses from other files
function Con(){}
var proto = Con.prototype;

var NoRowError = exports.NoRowError = function(){};
NoRowError.prototype = Object.create(Error.prototype);

// some querying functions

proto.getUserById = function(id){
	return this.queryRow('select id, name, oauthdisplayname, email from player where id=$1', [id]);
}

proto.listRecentUsers = function(roomId, N){
	return this.queryRows(
		"select message.author as id, min(player.name) as name, max(message.created) as mc from message join player on player.id=message.author"+
		" where message.room=$1 group by message.author order by mc desc limit $2", [roomId, N]
	);
}

// must be called before any call to connect
exports.init = function(dbConfig, cb){
	var conString = dbConfig.url;
	pg.defaults.parseInt8 = true;
	pg.connect(conString, function(err, client, done){
		if (err) {
			console.log('Connection to PostgreSQL database failed');
			return;
		}
		done();
		console.log('Connection to PostgreSQL database successful');
		pool = pg.pools.all[JSON.stringify(conString)];
		cb();
	})
}

// returns a promise bound to a connection, available to issue queries
//  The connection must be released using off
exports.on = function(val){
	var con = new Con(), resolver = Promise.defer();
	pool.connect(function(err, client, done){
		if (err) {
			resolver.reject(err);
		} else {
			con.client = client;
			con.done = done;
			resolver.resolve(val);
		}
	});
	return resolver.promise.bind(con);
}

// releases the connection which returns to the pool
// It's ok to call this function more than once
proto.off = function(){
	if (this.done) {
		this.done();
		this.done = null;
	}
}

// throws a NoRowError if no row was found (select) or affected (insert, delete, update)
proto.queryRow = function(sql, args){
	var resolver = Promise.defer();
	this.client.query(sql, args, function(err, res){
		if (err) {
			resolver.reject(err);
		} else if (res.rows.length) {
			resolver.resolve(res.rows[0]);
		} else if (res.rowCount) {
			resolver.resolve(res.rowCount);
		} else {
			resolver.reject(new NoRowError());
		}
	});
	return resolver.promise.bind(this);
}

proto.queryRows = function(sql, args){
	var resolver = Promise.defer();
	this.client.query(sql, args, function(err, res){
		if (err) resolver.reject(err);
		else resolver.resolve(res.rows);
	});
	return resolver.promise.bind(this);
}

for (var fname in proto) {
	if (proto.hasOwnProperty(fname) && typeof proto[fname] === "function") {
		exports[fname] = proto[fname];
	}
}

Detect the browser’s timezone

Sometimes, you need to know the user time zone and send it to your server so that it may do relevant computations for the user (imagine you want to query in a base the products of a given day according to the user’s timezone). And you don’t want to worry your users by asking him authorization to use the geolocation API. Your server will probably do the computations using the Olson tables so what you want is the id of a relevant time zone according to those tables.

Finding the user’s time offset from UTC is easy using the Date class. But this gives only the current offset, it doesn’t let you know what will be the offset in six months (will daylight saving time apply ?).

In the future, you’ll probably be able to use the internationalization API whose design is in progress. For example on Chrome you can do

var tzid = Intl.DateTimeFormat().resolved.timeZone;

But this doesn’t work on IE9, IE10 or Firefox.

If you handle dates seriously in your browser, chances are that you already use the Moment.js library which optionally includes functions to deal with time zones as well as the Olson tables.

But Moment.js doesn’t propose any function for the time zone detection. So I made one : tzdetect.js on GitHub.

You use it like this for example :

var tzid = tzdetect.matches()[0]

Demonstration and download

The right mime type for JSON

Let’s immediately answer : the right mime type to use is

application/json

Reference

When you look for the mime type to use when you serve JSON files, chances are you’ll see many answers, like

application/x-javascript
application/json
text/javascript

And chances are you’ll take any of them without thinking too much once you verified it looks OK at first tests.

But it does matter, especially when you use jQuery client side.

Let’s suppose you have this JSON file content :

{"Error":"Site already existing : \"bigbadaboum\""}

Normally, there’s no problem. It’s a perfectly valid JSON file.

If you use a XMLHttRequest and you fetch it then parse it, everything’s good. The code might look like this (with more error handling and argument passing, probably) :


var hr = new XMLHttpRequest();
hr.onreadystatechange = function() {
	if (hr.readyState === 4) {
		if (hr.status === 200) {
			var msg = JSON.parse(hr.responseText);
			// use msg
		}
	}
};
hr.open('GET', url);
hr.send();

Now suppose you use jQuery $.ajax or one of the shortcuts, for example this :


$.post('/json', JSON.stringify(m)).done(success).fail(function(){
	console.log("post failed", arguments)
});

Then it doesn’t work anymore if you use one of the bad mime types I cite : jQuery first reads it as JavaScript. As a JavaScript source file, the \" in what looks like a string literal is replaced by ", which breaks the parsing when the file is, after that, parsed using $.parseJSON (JSON.parse wouldn’t do it better).

In my opinion it should not be automatically parsed as JSON by jQuery when received with a mime type which doesn’t refer to JSON. jQuery does the wrong guess here. But the most fundamental error is server side : use the right mime type !

SpaceBullet

It’s hard to show what you can do, or what new web technologies can do, when all the most impressive programs you make are restrained to some enterprise networks.

That’s why I decided to dedicate a few evenings to the making of an original game which illustrates the power of JavaScript and the Canvas and how the no install, no wait feeling of the web applies to smart games.

Here’s SpaceBullet.

It’s open-source. Apart JavaScript and Canvas that I already mentioned, it also uses EaselJS to slightly ease canvas drawing, jQuery to make it faster to code the few DOM manipulations, a go server to manage user submissions and Google’s Closure compiler for the compact « production » version. I drew the sprites in SVG which is probably why I have a few problems on Safari and IE. For now I encountered no performance problem even while there are expensive gravitation path computing parallel to the 30 FPS screen updating.

The game lets the user make and share his own missions, I’m curious to see if the users who expressed their interest for this feature will really use it.

 

 

 

 

 

 

 

Parse NaN and infinities in JSON strings in JavaScript

The official JSON specification doesn’t allow IEEE754′s NaN or infinities, which is  unfortunate if you happen to work on scientific or industrial matters.

For infinities, you might use some trick, like passing 1e999, which evaluates as Infinity, but  it forces you to build the JSON by hand and you can’t do this for NaN.

For  simple and well defined objects you might use a specific value that you’d replace after parsing. But this is very inconvenient with deep and changing structures.

It’s so evidently unfortunate that some JSON builders like Google’s gson overcome it by allowing them.

But you’re still with the problem of parsing  this « JSON » in JavaScript if you’re sending it to the browser.

There’s the usual solution of calling eval('('+json+')') but it fails on IE with big strings.

That’s why I forked Crockford’s code to add support for NaN, Infinity and -Infinity.

Test it online : JSON.parseMore tester

Check it on GitHub : JSON.parseMore

Parse, transform and write HTML files in Go

A recent task I had was to revive a 1 GB web site, made of more than 15000 files written between 1996 and 1998 with random case as it used to run on Windows.

Solutions in this case might be to mount a FAT disk or to play with Apache settings to ignore case but they’re obviously short sighted. So I wrote a small Go program copying a site, renaming all files to lower case and fixing all internal links.

I could have changed the links with regular expressions but I wanted to test the Go HTML parser, just recently renommed from exp/html to go.net/html.

The result is impressively fast. 20 seconds are enough to parse, transform and rewrite 5800 HTML files and also  copy 11000 other files (mostly images). And as with other Go libraries, it’s very simple.

Install the library :

go get code.google.com/p/go.net/html

As I found no documentation online, I simply launched the godoc tool :

godoc -http :6060

which lets me read the documentation of all the packages I have by pointing my browser to

http://localhost:6060/

Import the go.net/html package into a program :

import "code.google.com/p/go.net/html"

Open and parse an HTML file into a DOM object (a html.Node) :

r, _ := os.Open(path)
defer r.Close()
doc, err := html.Parse(r)

Lowercase all the internal links of a page :

func FixLinks(n *html.Node) {
	if n.Type==html.ElementNode {
		for i, attr := range n.Attr {
			if attr.Key == "href" || attr.Key == "src" {
				n.Attr[i].Val = GetFixedURL(attr.Val)
				break
			}
		}
	}
	for c:= n.FirstChild; c!=nil; c=c.NextSibling {
		FixLinks(c)
	}
	return
}

Write the fixed file :

w, _ := os.Create(dest)
defer w.Close()
html.Render(w, doc)

The whole code is available here : https://github.com/Canop/weblowercaser

Open in POST from JavaScript

If you want to open a page or start a download from JavaScript, you can do

window.open('http://canop.org/blog/?p=220')

but sometimes you don’t want to make a GET request but a POST one, for example when you have many parameters to pass. There is no equivalent simple function for POST requests, that’s why I rolled my own, which builds an in-memory form and submit it :


// Arguments :
//  verb : 'GET'|'POST'
canop.open = function(verb, url, data, target) {
  var form = document.createElement("form");
  form.action = url;
  form.method = verb;
  form.target = target || "_self";
  if (data) {
    for (var key in data) {
      var input = document.createElement("input");
      input.name = key;
      input.value = typeof data[key] === "object" ? JSON.stringify(data[key]) : data[key];
      form.appendChild(input);
    }
  }
  form.style.display = 'none';
  document.body.appendChild(form);
  form.submit();
};

As I mainly send arguments in JSON to my servers, this function automatically stringifies the data arguments when they’re objects.

Usage examples :


// open a link in the current tab
canop.open('GET', 'http://canop.org/blog/', {p:220});

// download a file
canop.open('POST', 'fileServer.jsp', {request: {key:"42", cols:[2, 3, 34]}});

Note that the only reason the form is added to the page is because of a bug in IE : IE, even IE10, can’t submit a form if it’s not in the page.

Préchargement efficace et indolore de ressources

Mon site de recettes contient de nombreuses images, souvent présentées ensemble, et affichées en haute résolution au passage de la souris. Un diaporama de photos appétissantes est affiché lorsque cela ne risque plus de gêner le chargement des images nécessaires pour les menus ou la consultation agréable des pages.

Cela fait longtemps que je voulais un système de préchargement des images

  • invisible : cela ne sert à rien de précharger si un sablier s’affiche et donne à l’utilisateur le sentiment qu’il doit attendre
  • gérant à la fois des images prioritaires et des images pouvant attendre
  • acceptant que les listes d’images, tant prioritaires que secondaires, puissent être enrichies n’importe quand
  • simple
J’avais en particulier besoin d’accorder une plus haute priorité aux images apparaissant dans les menus, et de ne précharger qu’ensuite celles qui sont utilisées dans le reste du site.
La solution la plus courante est basée sur la création d’éléments Image, mais elle pose un problème rédhibitoire : elle entraîne l’affichage d’un sablier qui provoque l’attente de l’utilisateur.
J’ai préféré l’usage de XmlHttpRequest, la gestion d’une queue à deux entrées (prioritaire ou non) et le parallélisme des chargements (4 téléchargements en parallèle, on pourrait sans doute monter à 6).
Voici le résultat :
// Préchargement de ressources
// usage :  preload( url [ ,priority [,callback] ] )
//   preload(url);  // précharge la ressource en priorité normale
//   preload(url, true); // précharge la ressource en haute priorité
//   preload(url, false, callback); // appelle un callback quand la ressource est disponible
var preload = (function(){
	var queue = [], nbActives = 0;
	function bip(){
		if (queue.length==0 || nbActives>=4) return;
		nbActives++;
		var req = new XMLHttpRequest(), task=queue.shift();
		req.open("GET", task.src, true);
		req.onload = function () {
			nbActives--;
			bip();
			if (task.callback) task.callback(task.src);
		};
		req.send();
	}
	return function(src, priority, callback) {
		queue[priority?'unshift':'push']({src:src, callback:callback});
		bip();
	}
})();

Le site obtient maintenant la note de 99/100 sur Google PageSpeed. Obtenir la note maximale nécessiterait de mettre en place une minimisation de la page HTML, une gestion fine du cache par Apache et l’abandon de jQuery, opérations fastidieuses que le nombre de consultations de mon site ne justifie pas.

Visibilité d’une page

L’API de visibilité (lien en Anglais) n’est pas encore très connue, sans doute parce qu’elle n’est pas pleinement normalisée.

Elle permet de savoir si une page est visible (l’onglet est au premier plan, dans un navigateur lui même au premier plan) ou non. Elle permet aussi de s’abonner aux changements de cet état.

Comme je suis en train d’écrire un chat en javascript (il faut prévenir l’utilisateur qui reçoit un message) et que je voulais une API pratique qui pallie aux incohérences entre navigateurs, je me suis écrit un petit utilitaire.

Usage :

var visible = vis(); // donne l'état courant
vis(aFunction);   // s'abonne aux changements de visibilité

Exemple :

vis(function(){
	document.title = vis() ? 'Visible' : 'Pas visible';
});

Page de démonstration

 

Code complet :

Full Hipster code...Et au cas où le bloc de code ci dessus ne serait pas pleinement visible :

// "librairie" de gestion de la visibilité
//  var visible = vis(); // donne l'état courant
//  vis(function(){});   // enregistre un callback
var vis = (function(){
	var stateKey, eventKey, keys = {
		hidden: "visibilitychange",
		webkitHidden: "webkitvisibilitychange",
		mozHidden: "mozvisibilitychange",
		msHidden: "msvisibilitychange"
	};
	for (stateKey in keys) {
		if (stateKey in document) {
			eventKey = keys[stateKey];
			break;
		}
	}
	return function(c) {
		if (c) document.addEventListener(eventKey, c);
		return !document[stateKey];
	}
})();

 

 

Remote console.log, JSON based serialization and instant log

I can hardly code in Javascript without the console, especially the ability to dump and browse objects using console.log.

That’s why I had a really hard time debugging touch events on an Android browser. I always wanted to do

console.log(event);

but there was no easy way to do it and browse my object.

This made me develop a remote console.log tool, enabling me to simply log in the console of another browser on another computer.

Here it is : rol : a Remote Object Log

You can use it like this :

rol(event); // the event appears on any connected browser

In the making, I encountered a few unexpected problems, among them the fact you can’t serialize any object with JSON.stringify, especially the ones I wanted the most, the events.

You may see the problem by typing this in your console :

JSON.stringify(window);

The reasons are

  • that the event structure is cyclic : some property values points to properties up in the tree
  • that the event is very big. It points for example to the window object
  • that some properties can’t always be requested : the getter throws an exception
So I took Crockford’s stringify code and I changed it to address those concerns, at the cost of pruning some of the properties. The result is a strong function allowing you to execute, for example, this code :
JSON.prune(window)

This utility is free too and can be found and tested here : JSON.prune

 

A strong serialization function makes the basis for a deep (but not too deep) clone function, as you can simply do JSON.parse(JSON.prune(someObject));.

This was the opportunity to add another small function, this one aiming at fixing another debugging problem of console.log : the delay between logging and console building by the browser, delay during which the objects can change, leading to many programmer headaches (see this question on SO for example). So I added a JSON.prune.log function :

// make sure someObject is logged as it was at logging time
JSON.prune.log(someObject);