/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/bin/java -Xms512m -Xmx1g -Xss256k -Djava.awt.headless=true -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -XX:+HeapDumpOnOutOfMemoryError -Delasticsearch -Des.foreground=yes -Dcluster.name=eslee -Didea.launcher.port=7532 "-Didea.launcher.bin.path=/Applications/IntelliJ IDEA 14.app/Contents/bin" -Dfile.encoding=UTF-8 -classpath "/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/lib/ant-javafx.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/lib/dt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/lib/javafx-mx.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/lib/jconsole.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/lib/sa-jdi.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/lib/tools.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/charsets.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/deploy.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/javaws.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/jce.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/jfr.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/jfxswt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/jsse.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/management-agent.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/plugin.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/resources.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/rt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/ext/cldrdata.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/ext/dnsns.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/ext/jfxrt.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/ext/localedata.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/ext/nashorn.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/ext/sunec.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/ext/sunjce_provider.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/ext/sunpkcs11.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_31.jdk/Contents/Home/jre/lib/ext/zipfs.jar:/Users/hinmanm/src/elasticsearch/target/classes:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-core/5.1.0-snapshot-1656366/lucene-core-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-backward-codecs/5.1.0-snapshot-1656366/lucene-backward-codecs-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-analyzers-common/5.1.0-snapshot-1656366/lucene-analyzers-common-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-queries/5.1.0-snapshot-1656366/lucene-queries-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-memory/5.1.0-snapshot-1656366/lucene-memory-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-highlighter/5.1.0-snapshot-1656366/lucene-highlighter-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-queryparser/5.1.0-snapshot-1656366/lucene-queryparser-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-sandbox/5.1.0-snapshot-1656366/lucene-sandbox-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-suggest/5.1.0-snapshot-1656366/lucene-suggest-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-misc/5.1.0-snapshot-1656366/lucene-misc-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-join/5.1.0-snapshot-1656366/lucene-join-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-grouping/5.1.0-snapshot-1656366/lucene-grouping-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-spatial/5.1.0-snapshot-1656366/lucene-spatial-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/apache/lucene/lucene-expressions/5.1.0-snapshot-1656366/lucene-expressions-5.1.0-snapshot-1656366.jar:/Users/hinmanm/.m2/repository/org/antlr/antlr-runtime/3.5/antlr-runtime-3.5.jar:/Users/hinmanm/.m2/repository/org/ow2/asm/asm/4.1/asm-4.1.jar:/Users/hinmanm/.m2/repository/org/ow2/asm/asm-commons/4.1/asm-commons-4.1.jar:/Users/hinmanm/.m2/repository/com/spatial4j/spatial4j/0.4.1/spatial4j-0.4.1.jar:/Users/hinmanm/.m2/repository/com/vividsolutions/jts/1.13/jts-1.13.jar:/Users/hinmanm/.m2/repository/com/github/spullara/mustache/java/compiler/0.8.13/compiler-0.8.13.jar:/Users/hinmanm/.m2/repository/com/google/guava/guava/18.0/guava-18.0.jar:/Users/hinmanm/.m2/repository/com/carrotsearch/hppc/0.6.0/hppc-0.6.0.jar:/Users/hinmanm/.m2/repository/joda-time/joda-time/2.3/joda-time-2.3.jar:/Users/hinmanm/.m2/repository/org/joda/joda-convert/1.2/joda-convert-1.2.jar:/Users/hinmanm/.m2/repository/com/fasterxml/jackson/core/jackson-core/2.4.2/jackson-core-2.4.2.jar:/Users/hinmanm/.m2/repository/com/fasterxml/jackson/dataformat/jackson-dataformat-smile/2.4.2/jackson-dataformat-smile-2.4.2.jar:/Users/hinmanm/.m2/repository/com/fasterxml/jackson/dataformat/jackson-dataformat-yaml/2.4.2/jackson-dataformat-yaml-2.4.2.jar:/Users/hinmanm/.m2/repository/com/fasterxml/jackson/dataformat/jackson-dataformat-cbor/2.4.2/jackson-dataformat-cbor-2.4.2.jar:/Users/hinmanm/.m2/repository/io/netty/netty/3.10.0.Final/netty-3.10.0.Final.jar:/Users/hinmanm/.m2/repository/com/ning/compress-lzf/1.0.2/compress-lzf-1.0.2.jar:/Users/hinmanm/.m2/repository/com/tdunning/t-digest/3.0/t-digest-3.0.jar:/Users/hinmanm/.m2/repository/org/apache/commons/commons-lang3/3.3.2/commons-lang3-3.3.2.jar:/Users/hinmanm/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/hinmanm/.m2/repository/org/codehaus/groovy/groovy-all/2.4.0/groovy-all-2.4.0-indy.jar:/Users/hinmanm/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/hinmanm/.m2/repository/log4j/apache-log4j-extras/1.2.17/apache-log4j-extras-1.2.17.jar:/Users/hinmanm/.m2/repository/org/slf4j/slf4j-api/1.6.2/slf4j-api-1.6.2.jar:/Users/hinmanm/.m2/repository/net/java/dev/jna/jna/4.1.0/jna-4.1.0.jar:/Users/hinmanm/.m2/repository/org/fusesource/sigar/1.6.4/sigar-1.6.4.jar:/Applications/IntelliJ IDEA 14.app/Contents/lib/idea_rt.jar" com.intellij.rt.execution.application.AppMain org.elasticsearch.bootstrap.Bootstrap [2015-02-02 16:00:41,658][INFO ][node ] [Xemu] version[2.0.0-SNAPSHOT], pid[93700], build[0f405e9/2015-02-02T19:02:41Z] [2015-02-02 16:00:41,659][INFO ][node ] [Xemu] initializing ... [2015-02-02 16:00:41,668][INFO ][plugins ] [Xemu] loaded [], sites [] [2015-02-02 16:00:45,764][INFO ][node ] [Xemu] initialized [2015-02-02 16:00:45,765][INFO ][node ] [Xemu] starting ... [2015-02-02 16:00:45,921][INFO ][transport ] [Xemu] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.0.0.16:9300]} [2015-02-02 16:00:45,984][INFO ][discovery ] [Xemu] elasticsearch/eJj0uF6YQIGQ0610E9-Rqg [2015-02-02 16:00:49,792][INFO ][cluster.service ] [Xemu] new_master [Xemu][eJj0uF6YQIGQ0610E9-Rqg][Xanadu.local][inet[/10.0.0.16:9300]]{add_id_to_custom_path=false, enable_custom_paths=true}, reason: zen-disco-join (elected_as_master) [2015-02-02 16:00:49,817][INFO ][http ] [Xemu] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.0.0.16:9200]} [2015-02-02 16:00:49,818][INFO ][node ] [Xemu] started [2015-02-02 16:00:49,840][INFO ][gateway ] [Xemu] recovered [0] indices into cluster_state [2015-02-02 16:01:37,129][INFO ][cluster.service ] [Xemu] added {[Jane Kincaid][KaR0Q_QFSs2Capu6kCBPaw][Xanadu.local][inet[/10.0.0.16:9301]]{add_id_to_custom_path=false, enable_custom_paths=true},}, reason: zen-disco-receive(join from node[[Jane Kincaid][KaR0Q_QFSs2Capu6kCBPaw][Xanadu.local][inet[/10.0.0.16:9301]]{add_id_to_custom_path=false, enable_custom_paths=true}]) [2015-02-02 16:01:45,991][INFO ][cluster.service ] [Xemu] added {[Flambe][MqFW42-ZT8G6AL8zR2krTg][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true},}, reason: zen-disco-receive(join from node[[Flambe][MqFW42-ZT8G6AL8zR2krTg][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true}]) [2015-02-02 16:02:05,180][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: execute [2015-02-02 16:02:05,180][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: no change in cluster_state [2015-02-02 16:02:05,181][DEBUG][cluster.service ] [Xemu] processing [cluster_update_settings]: done applying updated cluster_state (version: 5) [2015-02-02 16:02:05,181][DEBUG][cluster.service ] [Xemu] processing [reroute_after_cluster_update_settings]: execute [2015-02-02 16:02:05,181][DEBUG][cluster.service ] [Xemu] processing [reroute_after_cluster_update_settings]: no change in cluster_state [2015-02-02 16:02:05,235][DEBUG][cluster.service ] [Xemu] processing [create-index [myindex], cause [api]]: execute [2015-02-02 16:02:05,236][DEBUG][indices ] [Xemu] creating Index [myindex], shards [1]/[1] [2015-02-02 16:02:05,768][DEBUG][index.mapper ] [Xemu] [myindex] using dynamic[true], default mapping: default_mapping_location[null], loaded_from[file:/Users/hinmanm/src/elasticsearch/target/classes/org/elasticsearch/index/mapper/default-mapping.json], default percolator mapping: location[null], loaded_from[null] [2015-02-02 16:02:05,769][DEBUG][index.cache.query.parser.resident] [Xemu] [myindex] using [resident] query cache with max_size [100], expire [null] [2015-02-02 16:02:05,777][DEBUG][index.store.fs ] [Xemu] [myindex] using index.store.throttle.type [none], with index.store.throttle.max_bytes_per_sec [0b] [2015-02-02 16:02:05,846][INFO ][cluster.metadata ] [Xemu] [myindex] creating index, cause [api], templates [], shards [1]/[1], mappings [] [2015-02-02 16:02:05,882][DEBUG][indices ] [Xemu] [myindex] closing ... (reason [cleaning up after validating index on master]) [2015-02-02 16:02:05,885][DEBUG][indices ] [Xemu] [myindex] closing index service (reason [cleaning up after validating index on master]) [2015-02-02 16:02:05,885][DEBUG][indices ] [Xemu] [myindex] closing index cache (reason [cleaning up after validating index on master]) [2015-02-02 16:02:05,886][DEBUG][index.cache.filter.weighted] [Xemu] [myindex] full cache clear, reason [close] [2015-02-02 16:02:05,886][DEBUG][index.cache.bitset ] [Xemu] [myindex] clearing all bitsets because [close] [2015-02-02 16:02:05,886][DEBUG][indices ] [Xemu] [myindex] clearing index field data (reason [cleaning up after validating index on master]) [2015-02-02 16:02:05,886][DEBUG][indices ] [Xemu] [myindex] closing analysis service (reason [cleaning up after validating index on master]) [2015-02-02 16:02:05,886][DEBUG][indices ] [Xemu] [myindex] closing mapper service (reason [cleaning up after validating index on master]) [2015-02-02 16:02:05,886][DEBUG][indices ] [Xemu] [myindex] closing index query parser service (reason [cleaning up after validating index on master]) [2015-02-02 16:02:05,886][DEBUG][indices ] [Xemu] [myindex] closing index service (reason [cleaning up after validating index on master]) [2015-02-02 16:02:05,887][DEBUG][indices ] [Xemu] [myindex] closed... (reason [cleaning up after validating index on master]) [2015-02-02 16:02:05,887][DEBUG][cluster.service ] [Xemu] cluster state updated, version [6], source [create-index [myindex], cause [api]] [2015-02-02 16:02:05,887][DEBUG][cluster.service ] [Xemu] publishing cluster state version 6 [2015-02-02 16:02:05,917][DEBUG][cluster.service ] [Xemu] set local cluster state to version 6 [2015-02-02 16:02:05,918][DEBUG][indices.cluster ] [Xemu] [myindex] creating index [2015-02-02 16:02:05,918][DEBUG][indices ] [Xemu] creating Index [myindex], shards [1]/[1] [2015-02-02 16:02:05,942][DEBUG][index.mapper ] [Xemu] [myindex] using dynamic[true], default mapping: default_mapping_location[null], loaded_from[file:/Users/hinmanm/src/elasticsearch/target/classes/org/elasticsearch/index/mapper/default-mapping.json], default percolator mapping: location[null], loaded_from[null] [2015-02-02 16:02:05,942][DEBUG][index.cache.query.parser.resident] [Xemu] [myindex] using [resident] query cache with max_size [100], expire [null] [2015-02-02 16:02:05,943][DEBUG][index.store.fs ] [Xemu] [myindex] using index.store.throttle.type [none], with index.store.throttle.max_bytes_per_sec [0b] [2015-02-02 16:02:05,944][DEBUG][indices.cluster ] [Xemu] [myindex][0] creating shard [2015-02-02 16:02:05,945][TRACE][env ] [Xemu] acquiring node shardlock on [[myindex][0]], timeout [5000] [2015-02-02 16:02:05,946][TRACE][env ] [Xemu] successfully acquired shardlock for [[myindex][0]] [2015-02-02 16:02:05,946][DEBUG][index ] [Xemu] [myindex] creating shard_id [myindex][0] [2015-02-02 16:02:06,147][DEBUG][index.store.fs ] [Xemu] [myindex] using [/tmp/foo/myindex/0/index] as shard's index location [2015-02-02 16:02:06,151][DEBUG][index.merge.scheduler ] [Xemu] [myindex][0] using [concurrent] merge scheduler with max_thread_count[2], max_merge_count[7], auto_throttle[true] [2015-02-02 16:02:06,152][DEBUG][index.store.fs ] [Xemu] [myindex] using [/tmp/foo/myindex/0/translog] as shard's translog location [2015-02-02 16:02:06,157][DEBUG][index.deletionpolicy ] [Xemu] [myindex][0] Using [keep_only_last] deletion policy [2015-02-02 16:02:06,159][DEBUG][index.merge.policy ] [Xemu] [myindex][0] using [tiered] merge mergePolicy with expunge_deletes_allowed[10.0], floor_segment[2mb], max_merge_at_once[10], max_merge_at_once_explicit[30], max_merged_segment[5gb], segments_per_tier[10.0], reclaim_deletes_weight[2.0] [2015-02-02 16:02:06,160][DEBUG][index.shard ] [Xemu] [myindex][0] state: [CREATED] [2015-02-02 16:02:06,161][DEBUG][index.translog ] [Xemu] [myindex][0] interval [5s], flush_threshold_ops [2147483647], flush_threshold_size [512mb], flush_threshold_period [30m] [2015-02-02 16:02:06,171][DEBUG][index.shard ] [Xemu] [myindex][0] state: [CREATED]->[RECOVERING], reason [from gateway] [2015-02-02 16:02:06,173][DEBUG][index.gateway ] [Xemu] [myindex][0] starting recovery from shard_gateway ... [2015-02-02 16:02:06,174][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: execute [2015-02-02 16:02:06,174][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: no change in cluster_state [2015-02-02 16:02:06,177][DEBUG][cluster.service ] [Xemu] processing [create-index [myindex], cause [api]]: done applying updated cluster_state (version: 6) [2015-02-02 16:02:06,276][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IFD: init: current segments file is "segments"; deletionPolicy=org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy@4bd0c70d [2015-02-02 16:02:06,276][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IFD: now checkpoint "" [0 segments ; isCommit = false] [2015-02-02 16:02:06,276][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IFD: 0 msec to checkpoint [2015-02-02 16:02:06,276][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: init: create=true [2015-02-02 16:02:06,277][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: dir=store(least_used[niofs(/private/tmp/foo/myindex/0/index)]) index= version=5.1.0 analyzer=org.elasticsearch.index.mapper.MapperAnalyzer ramBufferSizeMB=64.0 maxBufferedDocs=-1 maxBufferedDeleteTerms=-1 mergedSegmentWarmer=org.elasticsearch.index.engine.internal.InternalEngine$5@209f748e delPolicy=org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy commit=null openMode=CREATE similarity=org.elasticsearch.index.similarity.SimilarityService$PerFieldSimilarity mergeScheduler=CustomConcurrentMergeScheduler: maxThreadCount=2, maxMergeCount=8, ioThrottle=true default WRITE_LOCK_TIMEOUT=1000 writeLockTimeout=5000 codec=Lucene50 infoStream=org.elasticsearch.common.lucene.LoggerInfoStream mergePolicy=ElasticsearchMergePolicy([TieredMergePolicy: maxMergeAtOnce=10, maxMergeAtOnceExplicit=30, maxMergedSegmentMB=5120.0, floorSegmentMB=2.0, forceMergeDeletesPctAllowed=10.0, segmentsPerTier=10.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.1) indexerThreadPool=org.apache.lucene.index.DocumentsWriterPerThreadPool@c025ff7 readerPooling=false perThreadHardLimitMB=1945 useCompoundFile=true commitOnClose=false writer=org.apache.lucene.index.IndexWriter@52fc09ac [2015-02-02 16:02:06,280][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: commit: start [2015-02-02 16:02:06,280][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: commit: enter lock [2015-02-02 16:02:06,280][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: commit: now prepare [2015-02-02 16:02:06,280][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: prepareCommit: flush [2015-02-02 16:02:06,280][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: index before flush [2015-02-02 16:02:06,280][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] DW: startFullFlush [2015-02-02 16:02:06,280][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: apply all deletes during flush [2015-02-02 16:02:06,281][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: now apply all deletes for all segments maxDoc=0 [2015-02-02 16:02:06,281][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] BD: prune sis=segments: minGen=9223372036854775807 packetCount=0 [2015-02-02 16:02:06,281][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] DW: elasticsearch[Xemu][generic][T#4] finishFullFlush success=true [2015-02-02 16:02:06,282][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: startCommit(): start [2015-02-02 16:02:06,282][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: startCommit index= changeCount=2 [2015-02-02 16:02:06,283][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: done all syncs: [] [2015-02-02 16:02:06,283][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: commit: pendingCommit != null [2015-02-02 16:02:06,284][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IFD: now checkpoint "" [0 segments ; isCommit = true] [2015-02-02 16:02:06,286][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IFD: 1 msec to checkpoint [2015-02-02 16:02:06,286][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: commit: wrote segments file "segments_1" [2015-02-02 16:02:06,287][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: commit: took 6.2 msec [2015-02-02 16:02:06,287][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: commit: done [2015-02-02 16:02:06,288][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: flush at getReader [2015-02-02 16:02:06,288][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] DW: startFullFlush [2015-02-02 16:02:06,288][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: apply all deletes during flush [2015-02-02 16:02:06,288][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: now apply all deletes for all segments maxDoc=0 [2015-02-02 16:02:06,288][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] BD: prune sis=segments: minGen=9223372036854775807 packetCount=0 [2015-02-02 16:02:06,290][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: return reader version=1 reader=StandardDirectoryReader(segments:1:nrt) [2015-02-02 16:02:06,290][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] DW: elasticsearch[Xemu][generic][T#4] finishFullFlush success=true [2015-02-02 16:02:06,290][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#4] IW: getReader took 2 msec [2015-02-02 16:02:06,317][DEBUG][index.shard ] [Xemu] [myindex][0] scheduling refresher every 1s [2015-02-02 16:02:06,317][DEBUG][index.shard ] [Xemu] [myindex][0] state: [RECOVERING]->[POST_RECOVERY], reason [post recovery from gateway, no translog for id [-1]] [2015-02-02 16:02:06,318][TRACE][index.gateway ] [Xemu] [myindex][0] recovery completed from shard_gateway, took [145ms] index : files [0] with total_size [0b], took[11ms] : recovered_files [0] with total_size [0b] : reusing_files [0] with total_size [0b] start : took [132ms], check_index [0s] translog : number_of_operations [0], took [0s] [2015-02-02 16:02:06,318][DEBUG][cluster.action.shard ] [Xemu] sending shard started for [myindex][0], node[eJj0uF6YQIGQ0610E9-Rqg], [P], s[INITIALIZING], indexUUID [HjGOiD2dTRiSDZFuWdyf8A], reason [after recovery from gateway] [2015-02-02 16:02:06,318][DEBUG][cluster.action.shard ] [Xemu] received shard started for [myindex][0], node[eJj0uF6YQIGQ0610E9-Rqg], [P], s[INITIALIZING], indexUUID [HjGOiD2dTRiSDZFuWdyf8A], reason [after recovery from gateway] [2015-02-02 16:02:06,318][DEBUG][cluster.service ] [Xemu] processing [shard-started ([myindex][0], node[eJj0uF6YQIGQ0610E9-Rqg], [P], s[INITIALIZING]), reason [after recovery from gateway]]: execute [2015-02-02 16:02:06,319][DEBUG][cluster.action.shard ] [Xemu] [myindex][0] will apply shard started [myindex][0], node[eJj0uF6YQIGQ0610E9-Rqg], [P], s[INITIALIZING], indexUUID [HjGOiD2dTRiSDZFuWdyf8A], reason [after recovery from gateway] [2015-02-02 16:02:06,439][DEBUG][cluster.routing.allocation.decider] [Xemu] Node [eJj0uF6YQIGQ0610E9-Rqg] has 30.185693233866644% free disk (75405336576 bytes) [2015-02-02 16:02:06,439][DEBUG][cluster.service ] [Xemu] cluster state updated, version [7], source [shard-started ([myindex][0], node[eJj0uF6YQIGQ0610E9-Rqg], [P], s[INITIALIZING]), reason [after recovery from gateway]] [2015-02-02 16:02:06,439][DEBUG][cluster.service ] [Xemu] publishing cluster state version 7 [2015-02-02 16:02:06,860][DEBUG][cluster.service ] [Xemu] set local cluster state to version 7 [2015-02-02 16:02:06,861][DEBUG][index.shard ] [Xemu] [myindex][0] state: [POST_RECOVERY]->[STARTED], reason [global state is [STARTED]] [2015-02-02 16:02:06,861][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: execute [2015-02-02 16:02:06,861][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: no change in cluster_state [2015-02-02 16:02:06,866][DEBUG][cluster.service ] [Xemu] processing [shard-started ([myindex][0], node[eJj0uF6YQIGQ0610E9-Rqg], [P], s[INITIALIZING]), reason [after recovery from gateway]]: done applying updated cluster_state (version: 7) [2015-02-02 16:02:06,932][DEBUG][cluster.service ] [Xemu] processing [recovery_mapping_check]: execute [2015-02-02 16:02:06,933][DEBUG][cluster.service ] [Xemu] processing [recovery_mapping_check]: no change in cluster_state [2015-02-02 16:02:06,946][DEBUG][cluster.action.shard ] [Xemu] received shard started for [myindex][0], node[MqFW42-ZT8G6AL8zR2krTg], [R], s[INITIALIZING], indexUUID [HjGOiD2dTRiSDZFuWdyf8A], reason [after recovery (replica) from node [[Xemu][eJj0uF6YQIGQ0610E9-Rqg][Xanadu.local][inet[/10.0.0.16:9300]]{add_id_to_custom_path=false, enable_custom_paths=true}]] [2015-02-02 16:02:06,946][DEBUG][cluster.service ] [Xemu] processing [shard-started ([myindex][0], node[MqFW42-ZT8G6AL8zR2krTg], [R], s[INITIALIZING]), reason [after recovery (replica) from node [[Xemu][eJj0uF6YQIGQ0610E9-Rqg][Xanadu.local][inet[/10.0.0.16:9300]]{add_id_to_custom_path=false, enable_custom_paths=true}]]]: execute [2015-02-02 16:02:06,947][DEBUG][cluster.action.shard ] [Xemu] [myindex][0] will apply shard started [myindex][0], node[MqFW42-ZT8G6AL8zR2krTg], [R], s[INITIALIZING], indexUUID [HjGOiD2dTRiSDZFuWdyf8A], reason [after recovery (replica) from node [[Xemu][eJj0uF6YQIGQ0610E9-Rqg][Xanadu.local][inet[/10.0.0.16:9300]]{add_id_to_custom_path=false, enable_custom_paths=true}]] [2015-02-02 16:02:06,947][DEBUG][cluster.routing.allocation.decider] [Xemu] Node [eJj0uF6YQIGQ0610E9-Rqg] has 30.185693233866644% free disk (75405336576 bytes) [2015-02-02 16:02:06,947][DEBUG][cluster.routing.allocation.decider] [Xemu] Node [MqFW42-ZT8G6AL8zR2krTg] has 30.185693233866644% free disk (75405336576 bytes) [2015-02-02 16:02:06,947][DEBUG][cluster.service ] [Xemu] cluster state updated, version [8], source [shard-started ([myindex][0], node[MqFW42-ZT8G6AL8zR2krTg], [R], s[INITIALIZING]), reason [after recovery (replica) from node [[Xemu][eJj0uF6YQIGQ0610E9-Rqg][Xanadu.local][inet[/10.0.0.16:9300]]{add_id_to_custom_path=false, enable_custom_paths=true}]]] [2015-02-02 16:02:06,948][DEBUG][cluster.service ] [Xemu] publishing cluster state version 8 [2015-02-02 16:02:06,982][DEBUG][cluster.service ] [Xemu] set local cluster state to version 8 [2015-02-02 16:02:06,983][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: execute [2015-02-02 16:02:06,983][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: no change in cluster_state [2015-02-02 16:02:06,985][DEBUG][cluster.service ] [Xemu] processing [shard-started ([myindex][0], node[MqFW42-ZT8G6AL8zR2krTg], [R], s[INITIALIZING]), reason [after recovery (replica) from node [[Xemu][eJj0uF6YQIGQ0610E9-Rqg][Xanadu.local][inet[/10.0.0.16:9300]]{add_id_to_custom_path=false, enable_custom_paths=true}]]]: done applying updated cluster_state (version: 8) [2015-02-02 16:02:12,612][DEBUG][cluster.service ] [Xemu] processing [update-mapping [myindex][doc] / node [eJj0uF6YQIGQ0610E9-Rqg], order [1]]: execute [2015-02-02 16:02:12,619][DEBUG][cluster.metadata ] [Xemu] [myindex] update_mapping [doc] (dynamic) with source [{"doc":{"properties":{"body":{"type":"string"}}}}] [2015-02-02 16:02:12,624][DEBUG][cluster.service ] [Xemu] cluster state updated, version [9], source [update-mapping [myindex][doc] / node [eJj0uF6YQIGQ0610E9-Rqg], order [1]] [2015-02-02 16:02:12,625][DEBUG][cluster.service ] [Xemu] publishing cluster state version 9 [2015-02-02 16:02:12,707][DEBUG][cluster.service ] [Xemu] set local cluster state to version 9 [2015-02-02 16:02:12,707][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: execute [2015-02-02 16:02:12,708][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: no change in cluster_state [2015-02-02 16:02:12,711][DEBUG][cluster.service ] [Xemu] processing [update-mapping [myindex][doc] / node [eJj0uF6YQIGQ0610E9-Rqg], order [1]]: done applying updated cluster_state (version: 9) [2015-02-02 16:02:12,711][DEBUG][cluster.action.index ] [Xemu] successfully updated master with mapping update: index [myindex], indexUUID [HjGOiD2dTRiSDZFuWdyf8A], type [doc] and source [{"doc":{"properties":{"body":{"type":"string"}}}}] [2015-02-02 16:02:12,731][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] DW: anyChanges? numDocsInRam=2 deletes=false hasTickets:false pendingChangesInFullFlush: false [2015-02-02 16:02:12,732][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IW: nrtIsCurrent: infoVersion matches: false; DW changes: true; BD changes: false [2015-02-02 16:02:12,732][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IW: flush at getReader [2015-02-02 16:02:12,732][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] DW: startFullFlush [2015-02-02 16:02:12,733][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] DW: anyChanges? numDocsInRam=2 deletes=false hasTickets:false pendingChangesInFullFlush: false [2015-02-02 16:02:12,733][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] DWFC: addFlushableState DocumentsWriterPerThread [pendingDeletes=gen=0, segment=_0, aborted=false, numDocsInRAM=2, deleteQueue=DWDQ: [ generation: 2 ]] [2015-02-02 16:02:12,740][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] DWPT: flush postings as segment _0 numDocs=2 [2015-02-02 16:02:12,784][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] DWPT: new segment has 0 deleted docs [2015-02-02 16:02:12,784][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] DWPT: new segment has no vectors; norms; docValues; prox; freqs [2015-02-02 16:02:12,784][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] DWPT: flushedFiles=[_0_Lucene50_0.doc, _0_Lucene50_0.tim, _0_Lucene50_0.dvd, _0_Lucene50_0.pos, _0.nvd, _0.fdx, _0_Lucene50_0.dvm, _0_Lucene50_0.tip, _0.fdt, _0.nvm, _0.fnm] [2015-02-02 16:02:12,784][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] DWPT: flushed codec=Lucene50 [2015-02-02 16:02:12,786][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] DWPT: flushed: segment=_0 ramUsed=0.112 MB newFlushedSize=0.002 MB docs/MB=1,005.346 [2015-02-02 16:02:12,787][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IW: create compound file [2015-02-02 16:02:12,790][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] DW: publishFlushedSegment seg-private updates=null [2015-02-02 16:02:12,790][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IW: publishFlushedSegment [2015-02-02 16:02:12,791][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IW: publish sets newSegment delGen=3 seg=_0(5.1.0):c2 [2015-02-02 16:02:12,791][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: now checkpoint "_0(5.1.0):c2" [1 segments ; isCommit = false] [2015-02-02 16:02:12,791][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: 0 msec to checkpoint [2015-02-02 16:02:12,791][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IW: apply all deletes during flush [2015-02-02 16:02:12,791][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IW: now apply all deletes for all segments maxDoc=2 [2015-02-02 16:02:12,791][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] BD: applyDeletes: open segment readers took 0 msec [2015-02-02 16:02:12,791][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] BD: applyDeletes: no segments; skipping [2015-02-02 16:02:12,791][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] BD: prune sis=segments: _0(5.1.0):c2 minGen=3 packetCount=0 [2015-02-02 16:02:12,820][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IW: return reader version=3 reader=StandardDirectoryReader(segments:3:nrt _0(5.1.0):c2) [2015-02-02 16:02:12,820][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] DW: elasticsearch[Xemu][index][T#2] finishFullFlush success=true [2015-02-02 16:02:12,820][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete new file "_0_Lucene50_0.doc" [2015-02-02 16:02:12,820][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete "_0_Lucene50_0.doc" [2015-02-02 16:02:12,820][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete new file "_0_Lucene50_0.tim" [2015-02-02 16:02:12,820][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete "_0_Lucene50_0.tim" [2015-02-02 16:02:12,821][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete new file "_0_Lucene50_0.dvd" [2015-02-02 16:02:12,821][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete "_0_Lucene50_0.dvd" [2015-02-02 16:02:12,821][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete new file "_0_Lucene50_0.pos" [2015-02-02 16:02:12,821][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete "_0_Lucene50_0.pos" [2015-02-02 16:02:12,821][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete new file "_0.nvd" [2015-02-02 16:02:12,821][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete "_0.nvd" [2015-02-02 16:02:12,822][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete new file "_0.fdx" [2015-02-02 16:02:12,822][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete "_0.fdx" [2015-02-02 16:02:12,822][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete new file "_0_Lucene50_0.dvm" [2015-02-02 16:02:12,822][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete "_0_Lucene50_0.dvm" [2015-02-02 16:02:12,822][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete new file "_0_Lucene50_0.tip" [2015-02-02 16:02:12,822][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete "_0_Lucene50_0.tip" [2015-02-02 16:02:12,822][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete new file "_0.fdt" [2015-02-02 16:02:12,822][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete "_0.fdt" [2015-02-02 16:02:12,823][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete new file "_0.nvm" [2015-02-02 16:02:12,823][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete "_0.nvm" [2015-02-02 16:02:12,823][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete new file "_0.fnm" [2015-02-02 16:02:12,823][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IFD: delete "_0.fnm" [2015-02-02 16:02:12,826][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] TMP: findMerges: 1 segments [2015-02-02 16:02:12,827][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] TMP: seg=_0(5.1.0):c2 size=0.002 MB [floored] [2015-02-02 16:02:12,827][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] TMP: allowedSegmentCount=1 vs count=1 (eligible count=1) tooBigCount=0 [2015-02-02 16:02:12,828][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] MS: now merge [2015-02-02 16:02:12,828][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] MS: index: _0(5.1.0):c2 [2015-02-02 16:02:12,828][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] MS: no more merges pending; now return [2015-02-02 16:02:12,828][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#2] IW: getReader took 96 msec [2015-02-02 16:02:12,860][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: commit: start [2015-02-02 16:02:12,860][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: commit: enter lock [2015-02-02 16:02:12,860][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: commit: now prepare [2015-02-02 16:02:12,860][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: prepareCommit: flush [2015-02-02 16:02:12,860][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: index before flush _0(5.1.0):c2 [2015-02-02 16:02:12,860][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] DW: startFullFlush [2015-02-02 16:02:12,860][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: apply all deletes during flush [2015-02-02 16:02:12,860][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: now apply all deletes for all segments maxDoc=2 [2015-02-02 16:02:12,860][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] BD: applyDeletes: open segment readers took 0 msec [2015-02-02 16:02:12,860][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] BD: applyDeletes: no segments; skipping [2015-02-02 16:02:12,860][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] BD: prune sis=segments: _0(5.1.0):c2 minGen=3 packetCount=0 [2015-02-02 16:02:12,860][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] DW: elasticsearch[Xemu][flush][T#1] finishFullFlush success=true [2015-02-02 16:02:12,861][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: startCommit(): start [2015-02-02 16:02:12,861][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: startCommit index=_0(5.1.0):c2 changeCount=5 [2015-02-02 16:02:12,862][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: done all syncs: [_0.cfe, _0.si, _0.cfs] [2015-02-02 16:02:12,862][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: commit: pendingCommit != null [2015-02-02 16:02:12,862][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IFD: now checkpoint "_0(5.1.0):c2" [1 segments ; isCommit = true] [2015-02-02 16:02:12,862][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IFD: deleteCommits: now decRef commit "segments_1" [2015-02-02 16:02:12,863][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IFD: delete "segments_1" [2015-02-02 16:02:12,863][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IFD: 0 msec to checkpoint [2015-02-02 16:02:12,863][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: commit: wrote segments file "segments_2" [2015-02-02 16:02:12,863][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: commit: took 3.0 msec [2015-02-02 16:02:12,863][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][flush][T#1] IW: commit: done [2015-02-02 16:02:12,924][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] DW: anyChanges? numDocsInRam=2 deletes=false hasTickets:false pendingChangesInFullFlush: false [2015-02-02 16:02:12,924][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IW: nrtIsCurrent: infoVersion matches: false; DW changes: true; BD changes: false [2015-02-02 16:02:12,924][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IW: flush at getReader [2015-02-02 16:02:12,924][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] DW: startFullFlush [2015-02-02 16:02:12,924][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] DW: anyChanges? numDocsInRam=2 deletes=false hasTickets:false pendingChangesInFullFlush: false [2015-02-02 16:02:12,924][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] DWFC: addFlushableState DocumentsWriterPerThread [pendingDeletes=gen=0, segment=_1, aborted=false, numDocsInRAM=2, deleteQueue=DWDQ: [ generation: 4 ]] [2015-02-02 16:02:12,925][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] DWPT: flush postings as segment _1 numDocs=2 [2015-02-02 16:02:12,932][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] DWPT: new segment has 0 deleted docs [2015-02-02 16:02:12,932][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] DWPT: new segment has no vectors; norms; docValues; prox; freqs [2015-02-02 16:02:12,932][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] DWPT: flushedFiles=[_1_Lucene50_0.doc, _1_Lucene50_0.tim, _1.nvd, _1_Lucene50_0.pos, _1_Lucene50_0.dvd, _1.fdx, _1.nvm, _1.fnm, _1.fdt, _1_Lucene50_0.dvm, _1_Lucene50_0.tip] [2015-02-02 16:02:12,932][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] DWPT: flushed codec=Lucene50 [2015-02-02 16:02:12,933][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] DWPT: flushed: segment=_1 ramUsed=0.112 MB newFlushedSize=0.002 MB docs/MB=1,005.828 [2015-02-02 16:02:12,933][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IW: create compound file [2015-02-02 16:02:12,937][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] DW: publishFlushedSegment seg-private updates=null [2015-02-02 16:02:12,937][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IW: publishFlushedSegment [2015-02-02 16:02:12,937][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IW: publish sets newSegment delGen=6 seg=_1(5.1.0):c2 [2015-02-02 16:02:12,937][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: now checkpoint "_0(5.1.0):c2 _1(5.1.0):c2" [2 segments ; isCommit = false] [2015-02-02 16:02:12,937][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: 0 msec to checkpoint [2015-02-02 16:02:12,937][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IW: apply all deletes during flush [2015-02-02 16:02:12,937][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IW: now apply all deletes for all segments maxDoc=4 [2015-02-02 16:02:12,937][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] BD: applyDeletes: open segment readers took 0 msec [2015-02-02 16:02:12,937][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] BD: applyDeletes: no segments; skipping [2015-02-02 16:02:12,938][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] BD: prune sis=segments: _0(5.1.0):c2 _1(5.1.0):c2 minGen=3 packetCount=0 [2015-02-02 16:02:12,942][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IW: return reader version=5 reader=StandardDirectoryReader(segments:5:nrt _0(5.1.0):c2 _1(5.1.0):c2) [2015-02-02 16:02:12,942][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] DW: elasticsearch[Xemu][index][T#4] finishFullFlush success=true [2015-02-02 16:02:12,942][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete new file "_1_Lucene50_0.doc" [2015-02-02 16:02:12,942][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete "_1_Lucene50_0.doc" [2015-02-02 16:02:12,943][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete new file "_1_Lucene50_0.tim" [2015-02-02 16:02:12,943][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete "_1_Lucene50_0.tim" [2015-02-02 16:02:12,943][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete new file "_1.nvd" [2015-02-02 16:02:12,943][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete "_1.nvd" [2015-02-02 16:02:12,943][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete new file "_1_Lucene50_0.pos" [2015-02-02 16:02:12,943][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete "_1_Lucene50_0.pos" [2015-02-02 16:02:12,943][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete new file "_1_Lucene50_0.dvd" [2015-02-02 16:02:12,943][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete "_1_Lucene50_0.dvd" [2015-02-02 16:02:12,943][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete new file "_1.fdx" [2015-02-02 16:02:12,943][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete "_1.fdx" [2015-02-02 16:02:12,943][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete new file "_1.nvm" [2015-02-02 16:02:12,944][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete "_1.nvm" [2015-02-02 16:02:12,944][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete new file "_1.fnm" [2015-02-02 16:02:12,944][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete "_1.fnm" [2015-02-02 16:02:12,944][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete new file "_1.fdt" [2015-02-02 16:02:12,944][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete "_1.fdt" [2015-02-02 16:02:12,944][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete new file "_1_Lucene50_0.dvm" [2015-02-02 16:02:12,944][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete "_1_Lucene50_0.dvm" [2015-02-02 16:02:12,944][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete new file "_1_Lucene50_0.tip" [2015-02-02 16:02:12,944][TRACE][index.engine.internal.lucene.iw.ifd] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IFD: delete "_1_Lucene50_0.tip" [2015-02-02 16:02:12,944][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] TMP: findMerges: 2 segments [2015-02-02 16:02:12,945][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] TMP: seg=_0(5.1.0):c2 size=0.002 MB [floored] [2015-02-02 16:02:12,945][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] TMP: seg=_1(5.1.0):c2 size=0.002 MB [floored] [2015-02-02 16:02:12,945][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] TMP: allowedSegmentCount=1 vs count=2 (eligible count=2) tooBigCount=0 [2015-02-02 16:02:12,945][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] MS: now merge [2015-02-02 16:02:12,945][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] MS: index: _0(5.1.0):c2 _1(5.1.0):c2 [2015-02-02 16:02:12,946][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] MS: no more merges pending; now return [2015-02-02 16:02:12,946][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][index][T#4] IW: getReader took 22 msec [2015-02-02 16:02:15,785][DEBUG][indices.memory ] [Xemu] recalculating shard indexing buffer (reason=[[ADDED]]), total is [99mb] with [1] active shards, each shard set to indexing=[99mb], translog=[64kb] [2015-02-02 16:02:15,786][DEBUG][index.engine.internal ] [Xemu] [myindex][0] updating index_buffer_size from [64mb] to [99mb] [2015-02-02 16:06:40,501][DEBUG][cluster.service ] [Xemu] processing [cluster_reroute (api)]: execute [2015-02-02 16:06:40,504][DEBUG][cluster.routing.allocation.decider] [Xemu] Node [eJj0uF6YQIGQ0610E9-Rqg] has 30.184784851314088% free disk (75403067392 bytes) [2015-02-02 16:06:40,504][DEBUG][cluster.service ] [Xemu] cluster state updated, version [10], source [cluster_reroute (api)] [2015-02-02 16:06:40,504][DEBUG][cluster.service ] [Xemu] publishing cluster state version 10 [2015-02-02 16:06:41,316][DEBUG][cluster.service ] [Xemu] set local cluster state to version 10 [2015-02-02 16:06:41,316][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: execute [2015-02-02 16:06:41,317][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: no change in cluster_state [2015-02-02 16:06:41,318][DEBUG][cluster.service ] [Xemu] processing [cluster_reroute (api)]: done applying updated cluster_state (version: 10) [2015-02-02 16:06:41,431][DEBUG][cluster.service ] [Xemu] processing [recovery_mapping_check]: execute [2015-02-02 16:06:41,432][DEBUG][cluster.service ] [Xemu] processing [recovery_mapping_check]: no change in cluster_state [2015-02-02 16:06:41,488][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] IW: commit: start [2015-02-02 16:06:41,488][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] IW: commit: enter lock [2015-02-02 16:06:41,488][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] IW: commit: now prepare [2015-02-02 16:06:41,488][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] IW: prepareCommit: flush [2015-02-02 16:06:41,488][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] IW: index before flush _0(5.1.0):c2 _1(5.1.0):c2 [2015-02-02 16:06:41,488][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] DW: startFullFlush [2015-02-02 16:06:41,488][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] IW: apply all deletes during flush [2015-02-02 16:06:41,488][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] IW: now apply all deletes for all segments maxDoc=4 [2015-02-02 16:06:41,489][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] BD: applyDeletes: open segment readers took 0 msec [2015-02-02 16:06:41,489][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] BD: applyDeletes: no segments; skipping [2015-02-02 16:06:41,489][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] BD: prune sis=segments: _0(5.1.0):c2 _1(5.1.0):c2 minGen=3 packetCount=0 [2015-02-02 16:06:41,489][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] DW: elasticsearch[Xemu][generic][T#7] finishFullFlush success=true [2015-02-02 16:06:41,489][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] IW: startCommit(): start [2015-02-02 16:06:41,489][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] IW: startCommit index=_0(5.1.0):c2 _1(5.1.0):c2 changeCount=8 [2015-02-02 16:06:41,490][TRACE][index.engine.internal.lucene.iw] [Xemu] [myindex][0] elasticsearch[Xemu][generic][T#7] IW: hit exception committing segments file [2015-02-02 16:06:41,491][WARN ][index.engine.internal ] [Xemu] [myindex][0] failed to flush shard post recovery org.elasticsearch.index.engine.FlushFailedEngineException: [myindex][0] Flush failed at org.elasticsearch.index.engine.internal.InternalEngine.flush(InternalEngine.java:799) at org.elasticsearch.index.engine.internal.InternalEngine$FlushingRecoveryCounter.endRecovery(InternalEngine.java:1435) at org.elasticsearch.index.engine.internal.RecoveryCounter.close(RecoveryCounter.java:63) at org.elasticsearch.common.lease.Releasables.close(Releasables.java:45) at org.elasticsearch.common.lease.Releasables.close(Releasables.java:60) at org.elasticsearch.common.lease.Releasables.close(Releasables.java:81) at org.elasticsearch.common.lease.Releasables.close(Releasables.java:89) at org.elasticsearch.index.engine.internal.InternalEngine.recover(InternalEngine.java:1048) at org.elasticsearch.index.shard.IndexShard.recover(IndexShard.java:673) at org.elasticsearch.indices.recovery.RecoverySource.recover(RecoverySource.java:120) at org.elasticsearch.indices.recovery.RecoverySource.access$200(RecoverySource.java:48) at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:141) at org.elasticsearch.indices.recovery.RecoverySource$StartRecoveryTransportRequestHandler.messageReceived(RecoverySource.java:127) at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.doRun(MessageChannelHandler.java:276) at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:36) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Caused by: java.nio.file.NoSuchFileException: /private/tmp/foo/myindex/0/index/_1.cfs at sun.nio.fs.UnixException.translateToIOException(UnixException.java:86) at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:102) at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:107) at sun.nio.fs.UnixFileSystemProvider.newFileChannel(UnixFileSystemProvider.java:177) at java.nio.channels.FileChannel.open(FileChannel.java:287) at java.nio.channels.FileChannel.open(FileChannel.java:335) at org.apache.lucene.util.IOUtils.fsync(IOUtils.java:392) at org.apache.lucene.store.FSDirectory.fsync(FSDirectory.java:281) at org.apache.lucene.store.FSDirectory.sync(FSDirectory.java:226) at org.apache.lucene.store.FilterDirectory.sync(FilterDirectory.java:78) at org.apache.lucene.store.FilterDirectory.sync(FilterDirectory.java:78) at org.apache.lucene.store.FilterDirectory.sync(FilterDirectory.java:78) at org.apache.lucene.index.IndexWriter.startCommit(IndexWriter.java:4284) at org.apache.lucene.index.IndexWriter.prepareCommitInternal(IndexWriter.java:2721) at org.apache.lucene.index.IndexWriter.commitInternal(IndexWriter.java:2824) at org.apache.lucene.index.IndexWriter.commit(IndexWriter.java:2791) at org.elasticsearch.index.engine.internal.InternalEngine.flush(InternalEngine.java:785) ... 17 more [2015-02-02 16:06:41,498][DEBUG][cluster.action.shard ] [Xemu] received shard started for [myindex][0], node[KaR0Q_QFSs2Capu6kCBPaw], relocating [MqFW42-ZT8G6AL8zR2krTg], [R], s[INITIALIZING], indexUUID [HjGOiD2dTRiSDZFuWdyf8A], reason [after recovery (replica) from node [[Xemu][eJj0uF6YQIGQ0610E9-Rqg][Xanadu.local][inet[/10.0.0.16:9300]]{add_id_to_custom_path=false, enable_custom_paths=true}]] [2015-02-02 16:06:41,498][DEBUG][cluster.service ] [Xemu] processing [shard-started ([myindex][0], node[KaR0Q_QFSs2Capu6kCBPaw], relocating [MqFW42-ZT8G6AL8zR2krTg], [R], s[INITIALIZING]), reason [after recovery (replica) from node [[Xemu][eJj0uF6YQIGQ0610E9-Rqg][Xanadu.local][inet[/10.0.0.16:9300]]{add_id_to_custom_path=false, enable_custom_paths=true}]]]: execute [2015-02-02 16:06:41,498][DEBUG][cluster.action.shard ] [Xemu] [myindex][0] will apply shard started [myindex][0], node[KaR0Q_QFSs2Capu6kCBPaw], relocating [MqFW42-ZT8G6AL8zR2krTg], [R], s[INITIALIZING], indexUUID [HjGOiD2dTRiSDZFuWdyf8A], reason [after recovery (replica) from node [[Xemu][eJj0uF6YQIGQ0610E9-Rqg][Xanadu.local][inet[/10.0.0.16:9300]]{add_id_to_custom_path=false, enable_custom_paths=true}]] [2015-02-02 16:06:41,498][DEBUG][cluster.routing.allocation.decider] [Xemu] Node [eJj0uF6YQIGQ0610E9-Rqg] has 30.184784851314088% free disk (75403067392 bytes) [2015-02-02 16:06:41,499][DEBUG][cluster.routing.allocation.decider] [Xemu] Node [KaR0Q_QFSs2Capu6kCBPaw] has 30.184784851314088% free disk (75403067392 bytes) [2015-02-02 16:06:41,499][DEBUG][cluster.service ] [Xemu] cluster state updated, version [11], source [shard-started ([myindex][0], node[KaR0Q_QFSs2Capu6kCBPaw], relocating [MqFW42-ZT8G6AL8zR2krTg], [R], s[INITIALIZING]), reason [after recovery (replica) from node [[Xemu][eJj0uF6YQIGQ0610E9-Rqg][Xanadu.local][inet[/10.0.0.16:9300]]{add_id_to_custom_path=false, enable_custom_paths=true}]]] [2015-02-02 16:06:41,499][DEBUG][cluster.service ] [Xemu] publishing cluster state version 11 [2015-02-02 16:06:41,533][DEBUG][cluster.service ] [Xemu] set local cluster state to version 11 [2015-02-02 16:06:41,533][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: execute [2015-02-02 16:06:41,533][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: no change in cluster_state [2015-02-02 16:06:41,534][DEBUG][cluster.service ] [Xemu] processing [shard-started ([myindex][0], node[KaR0Q_QFSs2Capu6kCBPaw], relocating [MqFW42-ZT8G6AL8zR2krTg], [R], s[INITIALIZING]), reason [after recovery (replica) from node [[Xemu][eJj0uF6YQIGQ0610E9-Rqg][Xanadu.local][inet[/10.0.0.16:9300]]{add_id_to_custom_path=false, enable_custom_paths=true}]]]: done applying updated cluster_state (version: 11) [2015-02-02 16:08:42,464][DEBUG][cluster.service ] [Xemu] processing [zen-disco-node_left([Flambe][MqFW42-ZT8G6AL8zR2krTg][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true})]: execute [2015-02-02 16:08:42,464][DEBUG][cluster.routing.allocation.decider] [Xemu] Node [eJj0uF6YQIGQ0610E9-Rqg] has 30.18413225879438% free disk (75401437184 bytes) [2015-02-02 16:08:42,464][DEBUG][cluster.routing.allocation.decider] [Xemu] Node [KaR0Q_QFSs2Capu6kCBPaw] has 30.18413225879438% free disk (75401437184 bytes) [2015-02-02 16:08:42,465][DEBUG][cluster.service ] [Xemu] cluster state updated, version [12], source [zen-disco-node_left([Flambe][MqFW42-ZT8G6AL8zR2krTg][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true})] [2015-02-02 16:08:42,465][INFO ][cluster.service ] [Xemu] removed {[Flambe][MqFW42-ZT8G6AL8zR2krTg][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true},}, reason: zen-disco-node_left([Flambe][MqFW42-ZT8G6AL8zR2krTg][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true}) [2015-02-02 16:08:42,465][DEBUG][cluster.service ] [Xemu] publishing cluster state version 12 [2015-02-02 16:08:42,478][DEBUG][transport.netty ] [Xemu] disconnecting from [[Flambe][MqFW42-ZT8G6AL8zR2krTg][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true}], channel closed event [2015-02-02 16:08:42,479][DEBUG][cluster.service ] [Xemu] set local cluster state to version 12 [2015-02-02 16:08:42,480][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: execute [2015-02-02 16:08:42,480][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: no change in cluster_state [2015-02-02 16:08:42,486][DEBUG][cluster.service ] [Xemu] processing [zen-disco-node_left([Flambe][MqFW42-ZT8G6AL8zR2krTg][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true})]: done applying updated cluster_state (version: 12) [2015-02-02 16:08:42,486][DEBUG][cluster.service ] [Xemu] processing [routing-table-updater]: execute [2015-02-02 16:08:42,486][DEBUG][cluster.routing.allocation.decider] [Xemu] Node [eJj0uF6YQIGQ0610E9-Rqg] has 30.18413225879438% free disk (75401437184 bytes) [2015-02-02 16:08:42,486][DEBUG][cluster.routing.allocation.decider] [Xemu] Node [KaR0Q_QFSs2Capu6kCBPaw] has 30.18413225879438% free disk (75401437184 bytes) [2015-02-02 16:08:42,486][DEBUG][cluster.service ] [Xemu] processing [routing-table-updater]: no change in cluster_state [2015-02-02 16:08:50,236][DEBUG][transport.netty ] [Xemu] connected to node [[Enchantress][Z1Wa4ztSR_izGw0svk2vwQ][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true}] [2015-02-02 16:08:53,254][DEBUG][cluster.service ] [Xemu] processing [zen-disco-receive(join from node[[Enchantress][Z1Wa4ztSR_izGw0svk2vwQ][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true}])]: execute [2015-02-02 16:08:53,254][DEBUG][cluster.service ] [Xemu] cluster state updated, version [13], source [zen-disco-receive(join from node[[Enchantress][Z1Wa4ztSR_izGw0svk2vwQ][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true}])] [2015-02-02 16:08:53,255][INFO ][cluster.service ] [Xemu] added {[Enchantress][Z1Wa4ztSR_izGw0svk2vwQ][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true},}, reason: zen-disco-receive(join from node[[Enchantress][Z1Wa4ztSR_izGw0svk2vwQ][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true}]) [2015-02-02 16:08:53,255][DEBUG][cluster.service ] [Xemu] publishing cluster state version 13 [2015-02-02 16:08:53,312][DEBUG][cluster.service ] [Xemu] set local cluster state to version 13 [2015-02-02 16:08:53,313][DEBUG][cluster ] [Xemu] data node was added, retrieving new cluster info [2015-02-02 16:08:53,314][DEBUG][cluster.service ] [Xemu] processing [zen-disco-receive(join from node[[Enchantress][Z1Wa4ztSR_izGw0svk2vwQ][Xanadu.local][inet[/10.0.0.16:9302]]{add_id_to_custom_path=false, enable_custom_paths=true}])]: done applying updated cluster_state (version: 13) [2015-02-02 16:08:53,314][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: execute [2015-02-02 16:08:53,315][DEBUG][river.cluster ] [Xemu] processing [reroute_rivers_node_changed]: no change in cluster_state [2015-02-02 16:08:59,950][DEBUG][cluster.service ] [Xemu] processing [routing-table-updater]: execute [2015-02-02 16:08:59,951][DEBUG][cluster.routing.allocation.decider] [Xemu] Node [eJj0uF6YQIGQ0610E9-Rqg] has 30.183963371785516% free disk (75401015296 bytes) [2015-02-02 16:08:59,951][DEBUG][cluster.routing.allocation.decider] [Xemu] Node [KaR0Q_QFSs2Capu6kCBPaw] has 30.183963371785516% free disk (75401015296 bytes) [2015-02-02 16:08:59,951][DEBUG][cluster.service ] [Xemu] processing [routing-table-updater]: no change in cluster_state