-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.js
115 lines (100 loc) · 3.24 KB
/
app.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
/**
* Module dependencies.
*/
var ClimService = require("./lib/climService"),
worker = require('./lib/worker.js'),
cluster = require("cluster"),
config = require('config'),
Queue = require('bull'),
redis = require("redis"),
Sidekiq = require("sidekiq");
// Initilize console
var clim = new ClimService(config.server.port);
// Jobs queue
var workQueue = Queue("jobs_phantomjs", config.redis.port, config.redis.host);
var resultQueue = Queue("errors_phantomjs", config.redis.port, config.redis.host);
// Application exceptions
process.on('uncaughtException', function (err) {
clim.console.error("[Exception]", err);
clim.console.error(err.stack);
process.exit(1);
});
// Listen for dying workers
cluster.on('exit', function (worker, code, signal) {
// Replace the dead worker, we're not sentimental
clim.console.log('Worker ' + worker.id + ' died :(');
cluster.fork();
});
if (cluster.isMaster) {
// Count the machine's CPUs
var cpuCount = require('os').cpus().length;
// Create a worker for each CPU
for (var i = 0; i < cpuCount; i += 1) {
var new_worker_env = {};
new_worker_env["PHANTOMJS_PORT"] = config.rasterizer.port + i;
cluster.fork(new_worker_env);
}
} else {
// Include other modules
var express = require('express');
var RasterizerService = require('./lib/rasterizerService');
clim.addPrefix("[PhantomJS:" + process.env['PHANTOMJS_PORT'] + "]");
// Redis
var redis_client = redis.createClient(config.redis.port, config.redis.host);
redis_client.select(12);
// Sidekiq
var sidekiq = new Sidekiq(redis_client, 'bgjobs');
// Costumizing job queue events
workQueue.on('completed', function(job){
//clim.console.log("Job #%s completed!", job.jobId);
})
.on('failed', function(job, err){
clim.console.error("Job #%s failed! => " + err.toString(), job.jobId);
resultQueue.add({jobId: job.jobId, msg: err.toString()});
})
.on('progress', function(job, progress){
clim.console.info('\r job #' + job.jobId + ' ' + progress + '% complete');
})
.on('paused', function(){
clim.console.warn("Work queue paused");
})
.on('resumed', function(job){
clim.console.warn("Work queue resumed");
});
// App instance
var app = express();
app.configure(function(){;
app.set('climService', clim);
app.set('rasterizerService', new RasterizerService(config.rasterizer, process.env['PHANTOMJS_PORT'], clim).startService(function(port){
clim.console.log("Worker %s and PhantomJS server http://localhost:" + port + " running!", cluster.worker.id);
}));
});
app.configure('development', function() {
app.use(express.errorHandler({ dumpExceptions: true, showStack: true }));
});
workQueue.process(function(job, jobDone){
worker.processing(app, config.server, job.data, function(err){
var state;
var filePath;
if (err instanceof Error) {
jobDone(err);
state = job.data.error_state
filePath = null;
} else {
state = job.data.success_state
filePath = err;
}
try {
sidekiq.enqueue(job.data.class_name, [state, job.data.note_id, job.data.share, filePath],{
retry: 5,
queue: 'default'
});
}
catch (err){
clim.console.error(err.toString())
}
clim.console.log("Job done by worker", cluster.worker.id, "jobId",job.jobId);
jobDone();
});
});
}