mirror of
https://github.com/flynx/ImageGrid.git
synced 2025-12-26 13:01:58 +00:00
added merging of multiple indexes on load (not final) + some work on logging...
Signed-off-by: Alex A. Naanou <alex.nanou@gmail.com>
This commit is contained in:
parent
078aaf0888
commit
de083ab8e2
@ -169,10 +169,17 @@ function loadJSON(path){
|
|||||||
var loadIndex =
|
var loadIndex =
|
||||||
module.loadIndex =
|
module.loadIndex =
|
||||||
function(path, logger){
|
function(path, logger){
|
||||||
|
// XXX should this be interactive (a-la EventEmitter) or as it is now
|
||||||
|
// return the whole thing as a block (Promise)...
|
||||||
|
// NOTE: one way to do this is use the logger, it will get
|
||||||
|
// each index data on an index event
|
||||||
return new Promise(function(resolve, reject){
|
return new Promise(function(resolve, reject){
|
||||||
// we've got an index...
|
// we've got an index...
|
||||||
// XXX do we need to check if if it's a dir???
|
// XXX do we need to check if if it's a dir???
|
||||||
if(pathlib.basename(path) == INDEX_DIR){
|
if(pathlib.basename(path) == INDEX_DIR){
|
||||||
|
|
||||||
|
logger && logger.emit('path', path)
|
||||||
|
|
||||||
listJSON(path)
|
listJSON(path)
|
||||||
// XXX handle errors...
|
// XXX handle errors...
|
||||||
.on('error', function(err){
|
.on('error', function(err){
|
||||||
@ -182,6 +189,9 @@ function(path, logger){
|
|||||||
var res = {}
|
var res = {}
|
||||||
var index = {}
|
var index = {}
|
||||||
var root = {}
|
var root = {}
|
||||||
|
var queued = 0
|
||||||
|
|
||||||
|
logger && logger.emit('files-found', files.length, files)
|
||||||
|
|
||||||
// group by keyword...
|
// group by keyword...
|
||||||
//
|
//
|
||||||
@ -231,12 +241,14 @@ function(path, logger){
|
|||||||
if(index[k] == null){
|
if(index[k] == null){
|
||||||
index[k] = [[d, n]]
|
index[k] = [[d, n]]
|
||||||
logger && logger.emit('queued', n)
|
logger && logger.emit('queued', n)
|
||||||
|
queued += 1
|
||||||
|
|
||||||
// do not add anything past the latest non-diff
|
// do not add anything past the latest non-diff
|
||||||
// for each keyword...
|
// for each keyword...
|
||||||
} else if(index[k].slice(-1)[0][0] == true){
|
} else if(index[k].slice(-1)[0][0] == true){
|
||||||
index[k].push([d, n])
|
index[k].push([d, n])
|
||||||
logger && logger.emit('queued', n)
|
logger && logger.emit('queued', n)
|
||||||
|
queued += 1
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -249,14 +261,17 @@ function(path, logger){
|
|||||||
if(index[k] == null){
|
if(index[k] == null){
|
||||||
index[k] = [[false, n]]
|
index[k] = [[false, n]]
|
||||||
logger && logger.emit('queued', n)
|
logger && logger.emit('queued', n)
|
||||||
|
queued += 1
|
||||||
|
|
||||||
// add root file if no base is found...
|
// add root file if no base is found...
|
||||||
} else if(index[k].slice(-1)[0][0] == true){
|
} else if(index[k].slice(-1)[0][0] == true){
|
||||||
index[k].push([false, n])
|
index[k].push([false, n])
|
||||||
logger && logger.emit('queued', n)
|
logger && logger.emit('queued', n)
|
||||||
|
queued += 1
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
logger && logger.emit('files-queued', queued)
|
||||||
|
|
||||||
// load...
|
// load...
|
||||||
Promise
|
Promise
|
||||||
@ -276,7 +291,7 @@ function(path, logger){
|
|||||||
// load latest...
|
// load latest...
|
||||||
return loadJSON(latest)
|
return loadJSON(latest)
|
||||||
.then(function(data){
|
.then(function(data){
|
||||||
logger && logger.emit('loaded', latest)
|
logger && logger.emit('loaded', keyword, latest)
|
||||||
|
|
||||||
var loading = {}
|
var loading = {}
|
||||||
|
|
||||||
@ -311,7 +326,7 @@ function(path, logger){
|
|||||||
data[n] = json[n]
|
data[n] = json[n]
|
||||||
}
|
}
|
||||||
|
|
||||||
logger && logger.emit('loaded', p)
|
logger && logger.emit('loaded', keyword+'-diff', p)
|
||||||
})
|
})
|
||||||
|
|
||||||
res[keyword] = data
|
res[keyword] = data
|
||||||
|
|||||||
@ -624,6 +624,21 @@ module.ImagesPrototype = {
|
|||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
|
clone: function(){
|
||||||
|
return (new Images()).join(this)
|
||||||
|
},
|
||||||
|
// NOTE: this will join the other data into the current object in-place,
|
||||||
|
// use .clone() to preserve current data...
|
||||||
|
join: function(other){
|
||||||
|
var that = this
|
||||||
|
|
||||||
|
other.forEach(function(gid, img){
|
||||||
|
that[gid] = img
|
||||||
|
})
|
||||||
|
|
||||||
|
return this
|
||||||
|
},
|
||||||
|
|
||||||
// serialization...
|
// serialization...
|
||||||
loadJSON: function(data){
|
loadJSON: function(data){
|
||||||
data = typeof(data) == typeof('str')
|
data = typeof(data) == typeof('str')
|
||||||
|
|||||||
@ -1467,7 +1467,7 @@ module.Journal = ImageGridFeatures.Feature({
|
|||||||
var PartialRibbonsActions = actions.Actions({
|
var PartialRibbonsActions = actions.Actions({
|
||||||
// NOTE: this will not work from chrome when loading from a local fs...
|
// NOTE: this will not work from chrome when loading from a local fs...
|
||||||
// XXX experimental...
|
// XXX experimental...
|
||||||
startCacheWorker: [
|
startCacheWorker: ['Interface/',
|
||||||
function(){
|
function(){
|
||||||
// a worker is started already...
|
// a worker is started already...
|
||||||
if(this.cacheWorker != null){
|
if(this.cacheWorker != null){
|
||||||
@ -1501,7 +1501,7 @@ var PartialRibbonsActions = actions.Actions({
|
|||||||
this.cacheWorker = new Worker(url)
|
this.cacheWorker = new Worker(url)
|
||||||
this.cacheWorker.url = url
|
this.cacheWorker.url = url
|
||||||
}],
|
}],
|
||||||
stopCacheWorker: [
|
stopCacheWorker: ['Interface/',
|
||||||
function(){
|
function(){
|
||||||
if(this.cacheWorker){
|
if(this.cacheWorker){
|
||||||
this.cacheWorker.terminate()
|
this.cacheWorker.terminate()
|
||||||
@ -3235,23 +3235,84 @@ var FileSystemLoaderActions = actions.Actions({
|
|||||||
var that = this
|
var that = this
|
||||||
|
|
||||||
// XXX get a logger...
|
// XXX get a logger...
|
||||||
|
logger = logger || this.logger
|
||||||
|
|
||||||
// XXX this will not work for explicit path (path to a dir
|
|
||||||
// that contains the index)
|
|
||||||
file.loadIndex(path, logger)
|
file.loadIndex(path, logger)
|
||||||
.then(function(res){
|
.then(function(res){
|
||||||
// XXX if res is empty load raw...
|
// XXX if res is empty load raw...
|
||||||
|
|
||||||
// XXX res may contain multiple indexes, need to
|
|
||||||
// combine them...
|
|
||||||
|
|
||||||
var k = Object.keys(res)[0]
|
|
||||||
var index = res[k]
|
|
||||||
|
|
||||||
// XXX use the logger...
|
// XXX use the logger...
|
||||||
console.log('LOADING:', k, res)
|
//console.log('FOUND INDEXES:', Object.keys(res).length)
|
||||||
|
|
||||||
that.load(file.buildIndex(index, k))
|
// skip nested paths...
|
||||||
|
// XXX make this optional...
|
||||||
|
// XXX this is best done BEFORE we load all the
|
||||||
|
// indexes, e.g. in .loadIndex(..)
|
||||||
|
var paths = Object.keys(res)
|
||||||
|
var skipped = []
|
||||||
|
paths.forEach(function(p){
|
||||||
|
// already removed...
|
||||||
|
if(skipped.indexOf(p) >= 0){
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
paths
|
||||||
|
// get all paths that fully contain p...
|
||||||
|
.filter(function(o){
|
||||||
|
return o != p && o.indexOf(p) == 0
|
||||||
|
})
|
||||||
|
// drop all longer paths...
|
||||||
|
.forEach(function(e){
|
||||||
|
skipped.push(e)
|
||||||
|
delete res[e]
|
||||||
|
})
|
||||||
|
})
|
||||||
|
//console.log('SKIPPING NESTED:', skipped.length)
|
||||||
|
|
||||||
|
var index
|
||||||
|
|
||||||
|
// NOTE: res may contain multiple indexes...
|
||||||
|
for(var k in res){
|
||||||
|
|
||||||
|
|
||||||
|
// skip empty indexes...
|
||||||
|
// XXX should we rebuild or list here???
|
||||||
|
if(res[k].data == null || res[k].images == null){
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var part = file.buildIndex(res[k], k)
|
||||||
|
|
||||||
|
// load the first index...
|
||||||
|
if(index == null){
|
||||||
|
// XXX use the logger...
|
||||||
|
//console.log('LOADING:', k, res)
|
||||||
|
logger && logger.emit('base index', k, res)
|
||||||
|
|
||||||
|
index = part
|
||||||
|
|
||||||
|
// merge indexes...
|
||||||
|
// XXX need to skip sub-indexes in the same sub-tree...
|
||||||
|
// ...skip any path that fully contains an
|
||||||
|
// already loaded path..
|
||||||
|
// XXX load data in chunks rather than merge...
|
||||||
|
} else {
|
||||||
|
//console.log('MERGING:', k, part)
|
||||||
|
logger && logger.emit('merge index', k, res)
|
||||||
|
|
||||||
|
// merge...
|
||||||
|
// XXX this appears to lose bookmarks and other tags...
|
||||||
|
index.data.join(part.data)
|
||||||
|
index.images.join(part.images)
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX do a better merge and remove this...
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
logger && logger.emit('load index', index)
|
||||||
|
|
||||||
|
that.load(index)
|
||||||
})
|
})
|
||||||
}],
|
}],
|
||||||
})
|
})
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user