diff --git a/CHANGES.md b/CHANGES.md index 694ce98a..7cadab4d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,25 @@ Changes to mtools ================= +#### version 1.1.6 + + * mlogfilter: `--thread` now also matches "connection accepted" lines for that connection (#218, #219) + * mlogfilter: fixed bug that would print milliseconds in timestamp twice in 2.6 format for UTC timezone (#241) + * mlaunch: allow overriding hostname for replica set setup (#256) + * mlaunch: added a `restart` command (#253) + * mlaunch: added `--startup` to `list` command to show all startup strings (#257) + * mlaunch: aliased `--verbose` (now depricated) as `--tags` (#257) + * mloginfo: added `--rsstate` option to show all replica set state changes in log file. Added by @jimoleary (#228) + * mloginfo: fixed issues with 95-percentile calculation. Added by @gianpaj (#238) + * mloginfo: show host name and port if available (#247) + * mloginfo: fixed bug where empty lines can't be parsed (#213) + * mloginfo: show milliseconds for start/end (#245) + * mloginfo: made numpy dependency optional for mloginfo. Added by @brondsem (#216) + * mplotqueries: option to write output to image file instead of interactive mode. Added by @dpercy (#266) + * mplotqueries: show correct timezone for time axis (#274) + * mplotqueries: added option to allow fixing y-axis to specific min/max values (#214) + + #### version 1.1.5 * added workaround for compile errors with XCode 5.1 / clang 3.4 (#203) diff --git a/README.md b/README.md index 9e899673..07ea2207 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ instructions for these modules. Recent Changes -------------- -The current version of mtools is 1.1.5. See [CHANGES.md](./CHANGES.md) for a list of recent changes from previous versions of mtools. +The current version of mtools is 1.1.6. See [CHANGES.md](./CHANGES.md) for a list of recent changes from previous versions of mtools. Contribute to mtools diff --git a/mtools/mgenerate/mgenerate.py b/mtools/mgenerate/mgenerate.py index 457cf9d1..ae986a3c 100644 --- a/mtools/mgenerate/mgenerate.py +++ b/mtools/mgenerate/mgenerate.py @@ -4,6 +4,7 @@ import bson import sys import inspect +from datetime import datetime from multiprocessing import Process, cpu_count try: @@ -20,17 +21,31 @@ from mtools.util.cmdlinetool import BaseCmdLineTool +class DateTimeEncoder(json.JSONEncoder): + """ custom datetime encoder for json output. """ + def default(self, obj): + if isinstance(obj, datetime): + return obj.isoformat() + try: + res = json.JSONEncoder.default(self, obj) + except TypeError: + res = str(obj) + return res + + class InsertProcess(Process): - def __init__(self, number, template, collection, stdout): + operator_classes = inspect.getmembers(operators, inspect.isclass) + + def __init__(self, number, template, collection, args): Process.__init__(self) self.number = number self.template = template self.collection = collection - self.stdout = stdout + self.args = args # add all operators classes from the operators module, pass in _decode method - self.operators = [c[1](self._decode) for c in inspect.getmembers(operators, inspect.isclass)] + self.operators = [c[1](self._decode) for c in self.operator_classes] self.string_operators = {} self.dict_operators = {} @@ -52,8 +67,10 @@ def run(self): # decode the template doc = self._decode(self.template) - if self.stdout: - print doc + if not self.collection: + indent = 4 if self.args['pretty'] else None + print json.dumps(doc, cls=DateTimeEncoder, indent=indent, ensure_ascii=False) + else: batch.append(doc) batchsize += self.bsonsize(doc) @@ -63,7 +80,7 @@ def run(self): batch = [] batchsize = 0 - if not self.stdout: + if self.collection: if batch: self.collection.insert(batch) @@ -145,8 +162,9 @@ def __init__(self): self.argparser.add_argument('--collection', '-c', action='store', metavar='C', default='mgendata', help='collection C to import data, default=mgendata') self.argparser.add_argument('--drop', action='store_true', default=False, help='drop collection before inserting data') self.argparser.add_argument('--stdout', action='store_true', default=False, help='prints data to stdout instead of inserting to mongod/s instance.') + self.argparser.add_argument('--pretty', action='store_true', default=False, help="if set, prettyfies the output to stdout (indented), requires --stdout") self.argparser.add_argument('--write-concern', '-w', action='store', metavar="W", default=1, help='write concern for inserts, default=1') - + self.argparser.add_argument('--processes', '-p', action='store', type=int, default=0, help='specify number of processes (# cpus by default)') def run(self, arguments=None): BaseCmdLineTool.run(self, arguments) @@ -174,16 +192,24 @@ def run(self, arguments=None): col = mc[self.args['database']][self.args['collection']] if self.args['drop']: col.drop() + else: + col = None # divide work over number of cores - num_cores = 1 if self.args['stdout'] else cpu_count() + if self.args['stdout']: + num_cores = 1 + elif self.args['processes'] > 0: + num_cores = self.args['processes'] + else: + num_cores = cpu_count() + num_list = [self.args['number'] // num_cores] * num_cores num_list[0] += self.args['number'] % num_cores processes = [] for n in num_list: - p = InsertProcess(n, template, col, self.args['stdout']) + p = InsertProcess(n, template, col, self.args) p.start() processes.append(p) diff --git a/mtools/mgenerate/operators/__init__.py b/mtools/mgenerate/operators/__init__.py index d4fad68e..8f7ab2e8 100644 --- a/mtools/mgenerate/operators/__init__.py +++ b/mtools/mgenerate/operators/__init__.py @@ -1 +1 @@ -from operators import ChooseOperator, MissingOperator, NumberOperator, FloatOperator, ObjectIdOperator, ArrayOperator, DateTimeOperator, StringOperator, IncOperator \ No newline at end of file +from operators import ChooseOperator, MissingOperator, NumberOperator, FloatOperator, ObjectIdOperator, ArrayOperator, DateTimeOperator, StringOperator, IncOperator, CoordinateOperator, PointOperator \ No newline at end of file diff --git a/mtools/mgenerate/operators/operators.py b/mtools/mgenerate/operators/operators.py index b0397e8b..a044a0c9 100644 --- a/mtools/mgenerate/operators/operators.py +++ b/mtools/mgenerate/operators/operators.py @@ -8,6 +8,7 @@ import time import string +import itertools class BaseOperator(object): @@ -19,7 +20,6 @@ class BaseOperator(object): def __init__(self, decode_method): self._decode = decode_method - def _parse_options(self, options={}): parsed = self.defaults.copy() @@ -84,19 +84,25 @@ def __call__(self, options=None): return val - class IncOperator(BaseOperator): - dict_format = False + dict_format = True string_format = True names = ['$inc'] - value = -1 + defaults = OrderedDict([ ('start', 0), ('step', 1) ]) + + def __init__(self, decode_method): + self.counter = None + BaseOperator.__init__(self, decode_method) def __call__(self, options=None): options = self._parse_options(options) + + # initialize counter on first use (not threadsafe!) + if self.counter == None: + self.counter = itertools.count(options['start'], options['step']) - self.value += 1 - return self.value + return self.counter.next() class StringOperator(BaseOperator): @@ -193,6 +199,42 @@ def __call__(self, options=None): return [ options['of'] ] * number +class CoordinateOperator(BaseOperator): + + dict_format = True + string_format = True + names = ['$coordinates', '$coordinate', '$coord'] + defaults = OrderedDict([ ('long_lim', [-180, 180]), ('lat_lim', [-90, 90]) ]) + + def __call__(self, options=None): + options = self._parse_options(options) + + # evaluate limits + long_lim = self._decode(options['long_lim']) + lat_lim = self._decode(options['lat_lim']) + + # return coordinate by using random numbers between limits + return [ {"$float": long_lim }, {"$float": lat_lim }] + + +class PointOperator(BaseOperator): + + dict_format = True + string_format = True + names = ['$point'] + defaults = OrderedDict([ ('long_lim', [-180, 180]), ('lat_lim', [-90, 90]) ]) + + def __call__(self, options=None): + options = self._parse_options(options) + + # evaluate limits + long_lim = self._decode(options['long_lim']) + lat_lim = self._decode(options['lat_lim']) + + # return coordinate by using random numbers between limits + return { "type": "Point", "coordinates": { "$coord": [long_lim, lat_lim] } } + + class DateTimeOperator(BaseOperator): dict_format = True diff --git a/mtools/mlaunch/mlaunch.py b/mtools/mlaunch/mlaunch.py index 5c94062b..dc715d70 100755 --- a/mtools/mlaunch/mlaunch.py +++ b/mtools/mlaunch/mlaunch.py @@ -13,7 +13,9 @@ import signal from collections import defaultdict -from operator import itemgetter +from mtools.util import OrderedDict + +from operator import itemgetter, eq from mtools.util.cmdlinetool import BaseCmdLineTool from mtools.util.print_table import print_table @@ -96,8 +98,6 @@ class MLaunchTool(BaseCmdLineTool): def __init__(self): BaseCmdLineTool.__init__(self) - self.hostname = socket.gethostname() - # arguments self.args = None @@ -166,6 +166,7 @@ def run(self, arguments=None): init_parser.add_argument('--port', action='store', type=int, default=27017, help='port for mongod, start of port range in case of replica set or shards (default=27017)') init_parser.add_argument('--binarypath', action='store', default=None, metavar='PATH', help='search for mongod/s binaries in the specified PATH.') init_parser.add_argument('--dir', action='store', default='./data', help='base directory to create db and log paths (default=./data/)') + init_parser.add_argument('--hostname', action='store', default=socket.gethostname(), help='override hostname for replica set configuration') # authentication, users, roles self._default_auth_roles = ['dbAdminAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'clusterAdmin'] @@ -189,12 +190,22 @@ def run(self, arguments=None): stop_parser.add_argument('tags', metavar='TAG', action='store', nargs='*', default=[], help='without tags, all running nodes will be stopped. Provide additional tags to narrow down the set of nodes to stop.') stop_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.') stop_parser.add_argument('--dir', action='store', default='./data', help='base directory to stop nodes (default=./data/)') + + # restart command + restart_parser = subparsers.add_parser('restart', help='stops, then restarts MongoDB instances.', + description='stops running MongoDB instances with the shutdown command. Then restarts the stopped instances.') + restart_parser.add_argument('tags', metavar='TAG', action='store', nargs='*', default=[], help='without tags, all non-running nodes will be restarted. Provide additional tags to narrow down the set of nodes to start.') + restart_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.') + restart_parser.add_argument('--dir', action='store', default='./data', help='base directory to restart nodes (default=./data/)') + restart_parser.add_argument('--binarypath', action='store', default=None, metavar='PATH', help='search for mongod/s binaries in the specified PATH.') # list command list_parser = subparsers.add_parser('list', help='list MongoDB instances of this environment.', description='list MongoDB instances of this environment.') list_parser.add_argument('--dir', action='store', default='./data', help='base directory to list nodes (default=./data/)') - list_parser.add_argument('--verbose', action='store_true', default=False, help='outputs more verbose information.') + list_parser.add_argument('--tags', action='store_true', default=False, help='outputs the tags for each instance. Tags can be used to target instances for start/stop/kill.') + list_parser.add_argument('--startup', action='store_true', default=False, help='outputs the startup command lines for each instance.') + list_parser.add_argument('--verbose', action='store_true', default=False, help='alias for --tags.') # list command kill_parser = subparsers.add_parser('kill', help='kills (or sends another signal to) MongoDB instances of this environment.', @@ -222,7 +233,7 @@ def run(self, arguments=None): getattr(self, self.args['command'])() - # -- below are the main commands: init, start, stop, list + # -- below are the main commands: init, start, stop, list, kill def init(self): """ sub-command init. Branches out to sharded, replicaset or single node methods. """ @@ -480,7 +491,7 @@ def list(self): # mongos for node in sorted(self.get_tagged(['mongos'])): - doc = {'process':'mongos', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'} + doc = OrderedDict([ ('process','mongos'), ('port',node), ('status','running' if self.cluster_running[node] else 'down') ]) print_docs.append( doc ) if len(self.get_tagged(['mongos'])) > 0: @@ -488,7 +499,7 @@ def list(self): # configs for node in sorted(self.get_tagged(['config'])): - doc = {'process':'config server', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'} + doc = OrderedDict([ ('process','config server'), ('port',node), ('status','running' if self.cluster_running[node] else 'down') ]) print_docs.append( doc ) if len(self.get_tagged(['config'])) > 0: @@ -510,12 +521,12 @@ def list(self): primary = self.get_tagged(tags + ['primary', 'running']) if len(primary) > 0: node = list(primary)[0] - print_docs.append( {'process':padding+'primary', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'} ) + print_docs.append( OrderedDict([ ('process', padding+'primary'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) ) # secondaries secondaries = self.get_tagged(tags + ['secondary', 'running']) for node in sorted(secondaries): - print_docs.append( {'process':padding+'secondary', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'} ) + print_docs.append( OrderedDict([ ('process', padding+'secondary'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) ) # data-bearing nodes that are down or not in the replica set yet mongods = self.get_tagged(tags + ['mongod']) @@ -523,27 +534,45 @@ def list(self): nodes = sorted(mongods - primary - secondaries - arbiters) for node in nodes: - print_docs.append( {'process':padding+'mongod', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'}) + print_docs.append( OrderedDict([ ('process', padding+'mongod'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) ) # arbiters for node in arbiters: - print_docs.append( {'process':padding+'arbiter', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'} ) + print_docs.append( OrderedDict([ ('process', padding+'arbiter'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) ) else: nodes = self.get_tagged(tags + ['mongod']) if len(nodes) > 0: node = nodes.pop() - print_docs.append( {'process':padding+'single', 'port':node, 'status': 'running' if self.cluster_running[node] else 'down'} ) + print_docs.append( OrderedDict([ ('process', padding+'single'), ('port', node), ('status', 'running' if self.cluster_running[node] else 'down') ]) ) if shard: print_docs.append(None) - if self.args['verbose']: - # print tags as well - for doc in filter(lambda x: type(x) == dict, print_docs): + processes = self._get_processes() + startup = self.startup_info + + # print tags as well + for doc in filter(lambda x: type(x) == OrderedDict, print_docs): + try: + doc['pid'] = processes[doc['port']].pid + except KeyError: + doc['pid'] = '-' + + if self.args['verbose'] or self.args['tags']: tags = self.get_tags_of_port(doc['port']) doc['tags'] = ', '.join(tags) + if self.args['startup']: + try: + # first try running process (startup may be modified via start command) + doc['startup command'] = ' '.join(processes[doc['port']].cmdline) + except KeyError: + # if not running, use stored startup_info + doc['startup command'] = startup[str(doc['port'])] + + + print_docs.append( None ) print print_table(print_docs) @@ -588,6 +617,22 @@ def kill(self): self.discover() + def restart(self): + # stop nodes via stop command + self.stop() + + # there is a very brief period in which nodes are not reachable anymore, but the + # port is not torn down fully yet and an immediate start command would fail. This + # very short sleep prevents that case, and it is practically not noticable by users + time.sleep(0.1) + + # refresh discover + self.discover() + + # start nodes again via start command + self.start() + + # --- below are api helper methods, can be called after creating an MLaunchTool() object @@ -834,6 +879,8 @@ def _load_parameters(self): in_dict['protocol_version'] = 1 self.loaded_args = in_dict self.startup_info = {} + # hostname was added recently + self.loaded_args['hostname'] = socket.gethostname() elif in_dict['protocol_version'] == 2: self.startup_info = in_dict['startup_info'] @@ -1032,8 +1079,8 @@ def _add_user(self, port, name, password, database, roles): def _get_processes(self): - all_ports = self.get_tagged('all') - + all_ports = self.get_tagged('running') + process_dict = {} for p in psutil.process_iter(): @@ -1041,13 +1088,17 @@ def _get_processes(self): if p.name not in ['mongos', 'mongod']: continue - # find first TCP listening port - ports = [con.laddr[1] for con in p.get_connections(kind='tcp') if con.status=='LISTEN'] - if len(ports) > 0: - port = min(ports) - else: - continue - + port = None + for possible_port in self.startup_info: + # compare ports based on command line argument + startup = self.startup_info[possible_port].split() + p_port = p.cmdline[p.cmdline.index('--port')+1] + startup_port = startup[startup.index('--port')+1] + + if str(p_port) == str(startup_port): + port = int(possible_port) + break + # only consider processes belonging to this environment if port in all_ports: process_dict[port] = p @@ -1119,7 +1170,7 @@ def _construct_sharded(self): for name in config_names: self._construct_config(self.dir, nextport, name) - config_string.append('%s:%i'%(self.hostname, nextport)) + config_string.append('%s:%i'%(self.args['hostname'], nextport)) nextport += 1 # multiple mongos use /mongos/ as subdir for log files @@ -1151,7 +1202,7 @@ def _construct_replset(self, basedir, portstart, name): datapath = self._create_paths(basedir, '%s/rs%i'%(name, i+1)) self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), portstart+i, replset=name) - host = '%s:%i'%(self.hostname, portstart+i) + host = '%s:%i'%(self.args['hostname'], portstart+i) self.config_docs[name]['members'].append({'_id':len(self.config_docs[name]['members']), 'host':host, 'votes':int(len(self.config_docs[name]['members']) < 7 - int(self.args['arbiter']))}) # launch arbiter if True @@ -1159,7 +1210,7 @@ def _construct_replset(self, basedir, portstart, name): datapath = self._create_paths(basedir, '%s/arb'%(name)) self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), portstart+self.args['nodes'], replset=name) - host = '%s:%i'%(self.hostname, portstart+self.args['nodes']) + host = '%s:%i'%(self.args['hostname'], portstart+self.args['nodes']) self.config_docs[name]['members'].append({'_id':len(self.config_docs[name]['members']), 'host':host, 'arbiterOnly': True}) return name + '/' + ','.join([c['host'] for c in self.config_docs[name]['members']]) @@ -1178,7 +1229,7 @@ def _construct_single(self, basedir, port, name=None): datapath = self._create_paths(basedir, name) self._construct_mongod(os.path.join(datapath, 'db'), os.path.join(datapath, 'mongod.log'), port, replset=None) - host = '%s:%i'%(self.hostname, port) + host = '%s:%i'%(self.args['hostname'], port) return host @@ -1199,7 +1250,7 @@ def _construct_mongod(self, dbpath, logpath, port, replset=None, extra=''): extra = self._filter_valid_arguments(self.unknown_args, "mongod", config=config) + ' ' + extra path = self.args['binarypath'] or '' - command_str = "%s %s --dbpath %s --logpath %s --port %i --logappend %s %s --fork"%(os.path.join(path, 'mongod'), rs_param, dbpath, logpath, port, auth_param, extra) + command_str = "%s %s --dbpath %s --logpath %s --port %i --logappend --fork %s %s"%(os.path.join(path, 'mongod'), rs_param, dbpath, logpath, port, auth_param, extra) # store parameters in startup_info self.startup_info[str(port)] = command_str diff --git a/mtools/mlogfilter/filters/__init__.py b/mtools/mlogfilter/filters/__init__.py index c11fff01..6b8a956c 100644 --- a/mtools/mlogfilter/filters/__init__.py +++ b/mtools/mlogfilter/filters/__init__.py @@ -4,4 +4,4 @@ from logline_filter import LogLineFilter from tablescan_filter import TableScanFilter from word_filter import WordFilter -from mask_filter import MaskFilter \ No newline at end of file +from mask_filter import MaskFilter \ No newline at end of file diff --git a/mtools/mlogfilter/filters/logline_filter.py b/mtools/mlogfilter/filters/logline_filter.py index c375bc44..7766b60a 100644 --- a/mtools/mlogfilter/filters/logline_filter.py +++ b/mtools/mlogfilter/filters/logline_filter.py @@ -1,6 +1,6 @@ from mtools.util.logevent import LogEvent from mtools.util.pattern import json2pattern - +import re from base_filter import BaseFilter class LogLineFilter(BaseFilter): @@ -41,8 +41,9 @@ def accept(self, logevent): return False if self.operation and logevent.operation != self.operation: return False - if self.thread and logevent.thread != self.thread: - return False + if self.thread: + if logevent.thread != self.thread and logevent.conn != self.thread: + return False if self.pattern and logevent.pattern != self.pattern: return False return True diff --git a/mtools/mlogfilter/mlogfilter.py b/mtools/mlogfilter/mlogfilter.py index 59c399e7..bb29c859 100755 --- a/mtools/mlogfilter/mlogfilter.py +++ b/mtools/mlogfilter/mlogfilter.py @@ -281,7 +281,6 @@ def run(self, arguments=None): if sys.stdin.isatty(): break - if __name__ == '__main__': tool = MLogFilterTool() diff --git a/mtools/mloginfo/mloginfo.py b/mtools/mloginfo/mloginfo.py index 921ae052..b198d1be 100644 --- a/mtools/mloginfo/mloginfo.py +++ b/mtools/mloginfo/mloginfo.py @@ -32,9 +32,19 @@ def run(self, arguments=None): print ' ------------------------------------------' print + if self.logfile.datetime_format == 'ctime-pre2.4': + # no milliseconds when datetime format doesn't support it + start_time = self.logfile.start.strftime("%Y %b %d %H:%M:%S") if self.logfile.start else "unknown" + end_time = self.logfile.end.strftime("%Y %b %d %H:%M:%S") if self.logfile.start else "unknown" + else: + # include milliseconds + start_time = self.logfile.start.strftime("%Y %b %d %H:%M:%S.%f")[:-3] if self.logfile.start else "unknown" + end_time = self.logfile.end.strftime("%Y %b %d %H:%M:%S.%f")[:-3] if self.logfile.start else "unknown" + print " source: %s" % self.logfile.name - print " start: %s" % (self.logfile.start.strftime("%Y %b %d %H:%M:%S") if self.logfile.start else "unknown") - print " end: %s" % (self.logfile.end.strftime("%Y %b %d %H:%M:%S") if self.logfile.start else "unknown") + print " host: %s" % (self.logfile.hostname + ':' + self.logfile.port if self.logfile.hostname else "unknown") + print " start: %s" % (start_time) + print " end: %s" % (end_time) # TODO: add timezone if iso8601 format print "date format: %s" % self.logfile.datetime_format @@ -50,7 +60,8 @@ def run(self, arguments=None): version = '< 2.4 (no milliseconds)' elif self.logfile.datetime_format == 'ctime': version = '>= 2.4 (milliseconds present)' - elif self.logfile.datetime_format.startswith('iso8601-'): + elif self.logfile.datetime_format == "iso8601-utc" or \ + self.logfile.datetime_format == "iso8601-local": version = '>= 2.6 (iso8601 format)' print " version: %s" % version, diff --git a/mtools/mloginfo/sections/__init__.py b/mtools/mloginfo/sections/__init__.py index 9d370e98..389a2007 100644 --- a/mtools/mloginfo/sections/__init__.py +++ b/mtools/mloginfo/sections/__init__.py @@ -2,3 +2,4 @@ from distinct_section import DistinctSection from connection_section import ConnectionSection from query_section import QuerySection +from rs_state_section import RsStateSection diff --git a/mtools/mloginfo/sections/query_section.py b/mtools/mloginfo/sections/query_section.py index 41997420..dae402d4 100644 --- a/mtools/mloginfo/sections/query_section.py +++ b/mtools/mloginfo/sections/query_section.py @@ -6,8 +6,14 @@ from mtools.util import OrderedDict from operator import itemgetter +from collections import namedtuple -import numpy as np +try: + import numpy as np +except ImportError: + np = None + +LogTuple = namedtuple('LogTuple', ['namespace', 'pattern', 'duration']) class QuerySection(BaseSection): """ @@ -39,7 +45,6 @@ def run(self): else: self.mloginfo.progress_bar_enabled = False - for i, le in enumerate(logfile): # update progress bar every 1000 lines if self.mloginfo.progress_bar_enabled and (i % 1000 == 0): @@ -48,7 +53,8 @@ def run(self): self.mloginfo.update_progress(float(progress_curr-progress_start) / progress_total) if le.operation in ['query', 'update', 'remove']: - grouping.add(le) + lt = LogTuple(namespace=le.namespace, pattern=le.pattern, duration=le.duration) + grouping.add(lt) grouping.sort_by_size() @@ -56,8 +62,13 @@ def run(self): if self.mloginfo.progress_bar_enabled: self.mloginfo.update_progress(1.0) + # no queries in the log file + if len(grouping) < 1: + raise SystemExit('nothing to print.') + titles = ['namespace', 'pattern', 'count', 'min (ms)', 'max (ms)', 'mean (ms)', '95%-ile (ms)', 'sum (ms)'] table_rows = [] + for g in grouping: # calculate statistics for this group namespace, pattern = g @@ -71,7 +82,10 @@ def run(self): stats['min'] = min( group_events ) if group_events else '-' stats['max'] = max( group_events ) if group_events else '-' stats['mean'] = 0 - stats['95%'] = np.percentile(group_events, 95) + if np: + stats['95%'] = np.percentile(group_events, 95) if group_events else '-' + else: + stats['95%'] = 'n/a' stats['sum'] = sum( group_events ) if group_events else '-' stats['mean'] = stats['sum'] / stats['count'] if group_events else '-' diff --git a/mtools/mloginfo/sections/rs_state_section.py b/mtools/mloginfo/sections/rs_state_section.py new file mode 100644 index 00000000..cc3e6124 --- /dev/null +++ b/mtools/mloginfo/sections/rs_state_section.py @@ -0,0 +1,40 @@ +from base_section import BaseSection +from mtools.util.print_table import print_table +from mtools.util import OrderedDict + +class RsStateSection(BaseSection): + """ This section determines if there were any Replica Set state changes in the log file and prints out + the times and information about the restarts found. + """ + + name = "rsstate" + + def __init__(self, mloginfo): + BaseSection.__init__(self, mloginfo) + + # add --restarts flag to argparser + self.mloginfo.argparser_sectiongroup.add_argument('--rsstate', action='store_true', help='outputs information about every detected RS state change') + + + @property + def active(self): + """ return boolean if this section is active. """ + return self.mloginfo.args['rsstate'] + + def run(self): + """ run this section and print out information. """ + + titles = ['date', 'host', 'state/message'] + table_rows = [] + + for host, state, logevent in self.mloginfo.logfile.rsstate: + stats = OrderedDict() + stats['date'] = logevent.datetime.strftime("%b %d %H:%M:%S") + stats['host'] = host + stats['state/message'] = state + table_rows.append(stats) + + print_table(table_rows, titles, uppercase_headers=False) + + if len(self.mloginfo.logfile.rsstate) == 0: + print " no rs state changes found" \ No newline at end of file diff --git a/mtools/mplotqueries/mplotqueries.py b/mtools/mplotqueries/mplotqueries.py index 0d2d6ae8..eff71425 100755 --- a/mtools/mplotqueries/mplotqueries.py +++ b/mtools/mplotqueries/mplotqueries.py @@ -13,10 +13,11 @@ from copy import copy from mtools import __version__ from datetime import timedelta +from dateutil.tz import tzutc, tzoffset try: import matplotlib.pyplot as plt - from matplotlib.dates import DateFormatter, date2num + from matplotlib.dates import AutoDateFormatter, date2num, AutoDateLocator from matplotlib.lines import Line2D from matplotlib.text import Text from matplotlib import __version__ as mpl_version @@ -60,6 +61,8 @@ def __init__(self): self.argparser.add_argument('--group-limit', metavar='N', type=int, default=None, help="specify an upper limit of the number of groups. Groups are sorted by number of data points. If limit is specified, only the top N will be listed separately, the rest are grouped together in an 'others' group") self.argparser.add_argument('--no-others', action='store_true', default=False, help="if this flag is used, the 'others' group (see --group-limit) will be discarded.") self.argparser.add_argument('--optime-start', action='store_true', default=False, help="plot operations with a duration when they started instead (by subtracting the duration). The default is to plot them when they finish (at the time they are logged).") + self.argparser.add_argument('--ylimits', action='store', default=None, type=int, nargs=2, metavar='VAL', help="if set, limits the y-axis view to [min, max], requires exactly 2 values.") + self.argparser.add_argument('--output-file', metavar='FILE', action='store', default=None, help="Save the plot to a file instead of displaying it in a window") self.legend = None @@ -129,6 +132,8 @@ def parse_logevents(self): progress_total = self._datetime_to_epoch(logfile.end) - progress_start else: self.progress_bar_enabled = False + progress_start = 0 + progress_total = 1 if progress_total == 0: # protect from division by zero errors @@ -358,6 +363,10 @@ def onpress(self, event): plt.gca().set_yscale('linear') plt.autoscale(True, axis='y', tight=True) + # honor forced limits + if self.args['ylimits']: + plt.gca().set_ylim( self.args['ylimits'] ) + plt.gcf().canvas.draw() @@ -366,6 +375,11 @@ def plot(self): if len(self.plot_instances) == 0: raise SystemExit('no data to plot.') + if self.args['output_file'] is not None: + # --output-file means don't depend on X, + # so switch to a pure-image backend before doing any plotting. + plt.switch_backend('agg') + self.artists = [] plt.figure(figsize=(12,8), dpi=100, facecolor='w', edgecolor='w') axis = plt.subplot(111) @@ -379,20 +393,40 @@ def plot(self): xlabel = 'time' ylabel = '' + + # use timezone of first log file (may not always be what user wants but must make a choice) + tz = self.args['logfile'][0].timezone + tzformat = '%b %d\n%H:%M:%S' if tz == tzutc() else '%b %d\n%H:%M:%S%z' + + locator = AutoDateLocator(tz=tz, minticks=5, maxticks=10) + formatter = AutoDateFormatter(locator, tz=tz) + + formatter.scaled = { + 365.0 : '%Y', + 30. : '%b %Y', + 1.0 : '%b %d %Y', + 1./24. : '%b %d %Y\n%H:%M:%S', + 1./(24.*60.): '%b %d %Y\n%H:%M:%S', + } + + # add timezone to format if not UTC + if tz != tzutc(): + formatter.scaled[1./24.] = '%b %d %Y\n%H:%M:%S%z' + formatter.scaled[1./(24.*60.)] = '%b %d %Y\n%H:%M:%S%z' + for i, plot_inst in enumerate(sorted(self.plot_instances, key=lambda pi: pi.sort_order)): self.artists.extend(plot_inst.plot(axis, i, len(self.plot_instances), (xlim_min, xlim_max) )) if hasattr(plot_inst, 'xlabel'): xlabel = plot_inst.xlabel if hasattr(plot_inst, 'ylabel'): ylabel = plot_inst.ylabel - self.print_shortcuts() + if self.args['output_file'] is None: + self.print_shortcuts() axis.set_xlabel(xlabel) - axis.set_xticklabels(axis.get_xticks(), rotation=90, fontsize=10) - axis.xaxis.set_major_formatter(DateFormatter('%b %d\n%H:%M:%S')) - - for label in axis.get_xticklabels(): # make the xtick labels pickable - label.set_picker(True) + axis.set_xticklabels(axis.get_xticks(), rotation=90, fontsize=9) + axis.xaxis.set_major_locator(locator) + axis.xaxis.set_major_formatter(formatter) axis.set_xlim(date2num([xlim_min, xlim_max])) @@ -421,10 +455,17 @@ def plot(self): legend_line.set_picker(10) legend_line._mt_legend_item = i + # overwrite y-axis limits if set + if self.args['ylimits'] != None: + print self.args['ylimits'] + axis.set_ylim( self.args['ylimits']) + plt.gcf().canvas.mpl_connect('pick_event', self.onpick) plt.gcf().canvas.mpl_connect('key_press_event', self.onpress) - plt.show() - + if self.args['output_file'] is not None: + plt.savefig(self.args['output_file']) + else: + plt.show() if __name__ == '__main__': tool = MPlotQueriesTool() diff --git a/mtools/mplotqueries/plottypes/range_type.py b/mtools/mplotqueries/plottypes/range_type.py index 3aafef3a..583d693e 100644 --- a/mtools/mplotqueries/plottypes/range_type.py +++ b/mtools/mplotqueries/plottypes/range_type.py @@ -65,7 +65,7 @@ def plot_group(self, group, idx, axis): artists = [] for x_left, x_right in zip(x_lefts, x_rights): - width = max(0.001, x_right-x_left) + width = max(0.0001, x_right-x_left) artist = axis.barh(y_bottom-0.5*height, width=width, height=0.7*height, left=x_left, color=color, alpha=0.7, edgecolor='white', picker=5, linewidth=1, align='center')[0] artist._mt_plot_type = self diff --git a/mtools/mplotqueries/plottypes/scatter_type.py b/mtools/mplotqueries/plottypes/scatter_type.py index 7893dc37..a5df67af 100644 --- a/mtools/mplotqueries/plottypes/scatter_type.py +++ b/mtools/mplotqueries/plottypes/scatter_type.py @@ -7,6 +7,7 @@ try: import matplotlib.pyplot as plt + from matplotlib import __version__ as mpl_version from matplotlib.dates import date2num from matplotlib.lines import Line2D from matplotlib.patches import Polygon @@ -71,7 +72,9 @@ def clicked(self, event): group = event.artist._mt_group indices = event.ind - if not event.mouseevent.dblclick: + # double click only supported on 1.2 or later + major, minor, _ = mpl_version.split('.') + if (int(major), int(minor)) < (1, 2) or not event.mouseevent.dblclick: for i in indices: print self.groups[group][i].line_str diff --git a/mtools/test/logfiles/mongod_26_corrupt.log b/mtools/test/logfiles/mongod_26_corrupt.log new file mode 100644 index 00000000..da799993 --- /dev/null +++ b/mtools/test/logfiles/mongod_26_corrupt.log @@ -0,0 +1,681 @@ +2014-04-09T23:16:20.437-0400 [initandlisten] MongoDB starting : pid=4037 port=27019 dbpath=/Users/tr/Documents/tmp/data/shard01/rs1/db 64-bit host=enter.local +2014-04-09T23:16:20.437-0400 [initandlisten] db version v2.6.0 +2014-04-09T23:16:20.437-0400 [initandlisten] git version: 1c1c76aeca21c5983dc178920f5052c298db616c +2014-04-09T23:16:20.438-0400 [initandlisten] build info: Darwin mci-osx108-8.build.10gen.cc 12.5.0 Darwin Kernel Version 12.5.0: Sun Sep 29 13:33:47 PDT 2013; root:xnu-2050.48.12~1/RELEASE_X86_64 x86_64 BOOST_LIB_VERSION=1_49 +2014-04-09T23:16:20.438-0400 [initandlisten] allocator: system +2014-04-09T23:16:20.438-0400 [initandlisten] options: { net: { port: 27019 }, processManagement: { fork: true }, replication: { replSet: "shard01" }, security: { keyFile: "/Users/tr/Documents/tmp/data/keyfile" }, storage: { dbPath: "/Users/tr/Documents/tmp/data/shard01/rs1/db" }, systemLog: { destination: "file", logAppend: true, path: "/Users/tr/Documents/tmp/data/shard01/rs1/mongod.log" } } +2014-04-09T23:16:20.438-0400 [initandlisten] journal dir=/Users/tr/Documents/tmp/data/shard01/rs1/db/journal +2014-04-09T23:16:20.439-0400 [initandlisten] recover : no journal files present, no recovery needed +2014-04-09T23:16:20.449-0400 [FileAllocator] allocating new datafile /Users/tr/Documents/tmp/data/shard01/rs1/db/local.ns, filling with zeroes... +2014-04-09T23:16:20.450-0400 [FileAllocator] creating directory /Users/tr/Documents/tmp/data/shard01/rs1/db/_tmp +2014-04-09T23:16:20.463-0400 [FileAllocator] done allocating datafile /Users/tr/Documents/tmp/data/shard01/rs1/db/local.ns, size: 16MB, took 0.012 secs +2014-04-09T23:16:20.484-0400 [FileAllocator] allocating new datafile /Users/tr/Documents/tmp/data/shard01/rs1/db/local.0, filling with zeroes... +2014-04-09T23:16:20.559-0400 [FileAllocator] done allocating datafile /Users/tr/Documents/tmp/data/shard01/rs1/db/local.0, size: 64MB, took 0.074 secs +2014-04-09T23:16:20.589-0400 [initandlisten] build index on: local.startup_log properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "local.startup_log" } +2014-04-09T23:16:20.596-0400 [initandlisten] added index to empty collection +2014-04-09T23:16:20.596-0400 [initandlisten] command local.$cmd command: create { create: "startup_log", size: 10485760, capped: true } ntoreturn:1 keyUpdates:0 numYields:0 reslen:37 147ms +2014-04-09T23:16:20.597-0400 [initandlisten] waiting for connections on port 27019 +2014-04-09T23:16:20.603-0400 [rsStart] replSet can't get local.system.replset config from self or any seed (EMPTYCONFIG) +2014-04-09T23:16:20.604-0400 [rsStart] replSet info you may need to run replSetInitiate -- rs.initiate() in the shell -- if that is not already done +2014-04-09T23:16:21.435-0400 [initandlisten] connection accepted from 127.0.0.1:52059 #1 (1 connection now open) +2014-04-09T23:16:21.447-0400 [conn1] note: no users configured in admin.system.users, allowing localhost access +2014-04-09T23:16:21.450-0400 [conn1] end connection 127.0.0.1:52059 (0 connections now open) +2014-04-09T23:16:21.533-0400 [initandlisten] connection accepted from 127.0.0.1:52068 #2 (1 connection now open) +2014-04-09T23:16:21.534-0400 [conn2] replSet replSetInitiate admin command received from client +2014-04-09T23:16:21.540-0400 [conn2] replSet replSetInitiate config object parses ok, 3 members specified +2014-04-09T23:16:21.605-0400 [rsStart] replSet can't get local.system.replset config from self or any seed (EMPTYCONFIG) +2014-04-09T23:16:21.857-0400 [conn2] replSet replSetInitiate all members seem up + +2014-04-09T23:16:21.858-0400 [conn2] ****** +2014-04-09T23:16:21.858-0400 [conn2] creating replication oplog of size: 192MB... +2014-04-09T23:16:21.858-0400 [FileAllocator] allocating new datafile /Users/tr/Documents/tmp/data/shard01/rs1/db/local.1, filling with zeroes... +2014-04-09T23:16:22.271-0400 [FileAllocator] done allocating datafile /Users/tr/Documents/tmp/data/shard01/rs1/db/local.1, size: 256MB, took 0.412 secs +2014-04-09T23:16:22.287-0400 [conn2] ****** +2014-04-09T23:16:22.287-0400 [conn2] replSet info saving a newer config version to local.system.replset: { _id: "shard01", version: 1, members: [ { _id: 0, host: "enter.local:27019" }, { _id: 1, host: "enter.local:27020" }, { _id: 2, host: "enter.local:27021" } ] } +2014-04-09T23:16:22.290-0400 [conn2] build index on: local.system.replset properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "local.system.replset" } +2014-04-09T23:16:22.291-0400 [conn2] added index to empty collection +2014-04-09T23:16:22.291-0400 [conn2] replSet saveConfigLocally done +2014-04-09T23:16:22.292-0400 [conn2] replSet replSetInitiate config now saved locally. Should come online in about a minute. +2014-04-09T23:16:22.292-0400 [conn2] command admin.$cmd command: replSetInitiate { replSetInitiate: { _id: "shard01", members: [ { host: "enter.local:27019", _id: 0, votes: 1 }, { host: "enter.local:27020", _id: 1, votes: 1 }, { host: "enter.local:27021", _id: 2, votes: 1 } ] } } keyUpdates:0 numYields:0 locks(micros) W:439906 reslen:112 757ms +2014-04-09T23:16:22.293-0400 [conn2] end connection 127.0.0.1:52068 (0 connections now open) +2014-04-09T23:16:22.605-0400 [rsStart] replSet I am enter.local:27019 +2014-04-09T23:16:22.684-0400 [rsStart] build index on: local.me properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "local.me" } +2014-04-09T23:16:22.690-0400 [rsStart] added index to empty collection +2014-04-09T23:16:22.690-0400 [rsStart] replSet STARTUP2 +2014-04-09T23:16:22.692-0400 [rsSync] replSet SECONDARY +2014-04-09T23:16:24.042-0400 [initandlisten] connection accepted from 10.0.1.135:52127 #3 (1 connection now open) +2014-04-09T23:16:24.043-0400 [conn3] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:24.047-0400 [initandlisten] connection accepted from 10.0.1.135:52131 #4 (2 connections now open) +2014-04-09T23:16:24.047-0400 [conn4] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:24.048-0400 [conn4] end connection 10.0.1.135:52131 (1 connection now open) +2014-04-09T23:16:24.608-0400 [rsHealthPoll] replSet member enter.local:27020 is up +2014-04-09T23:16:24.608-0400 [rsHealthPoll] replSet member enter.local:27021 is up +2014-04-09T23:16:24.609-0400 [rsMgr] replSet not electing self, not all members up and we have been up less than 5 minutes +2014-04-09T23:16:24.610-0400 [rsMgr] replSet not electing self, not all members up and we have been up less than 5 minutes +2014-04-09T23:16:24.739-0400 [initandlisten] connection accepted from 10.0.1.135:52136 #5 (2 connections now open) +2014-04-09T23:16:24.740-0400 [conn5] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:24.866-0400 [initandlisten] connection accepted from 10.0.1.135:52137 #6 (3 connections now open) +2014-04-09T23:16:24.866-0400 [conn6] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:25.060-0400 [initandlisten] connection accepted from 10.0.1.135:52138 #7 (4 connections now open) +2014-04-09T23:16:25.060-0400 [conn7] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:25.062-0400 [conn7] end connection 10.0.1.135:52138 (3 connections now open) +2014-04-09T23:16:26.072-0400 [initandlisten] connection accepted from 10.0.1.135:52142 #8 (4 connections now open) +2014-04-09T23:16:26.073-0400 [conn8] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:26.075-0400 [conn8] end connection 10.0.1.135:52142 (3 connections now open) +2014-04-09T23:16:26.609-0400 [rsHealthPoll] replset info enter.local:27021 thinks that we are down +2014-04-09T23:16:26.609-0400 [rsHealthPoll] replset info enter.local:27020 thinks that we are down +2014-04-09T23:16:26.610-0400 [rsHealthPoll] replSet member enter.local:27021 is now in state STARTUP2 +2014-04-09T23:16:26.610-0400 [rsHealthPoll] replSet member enter.local:27020 is now in state STARTUP2 +2014-04-09T23:16:26.611-0400 [rsMgr] not electing self, enter.local:27021 would veto with 'I don't think enter.local:27019 is electable' +2014-04-09T23:16:26.612-0400 [rsMgr] not electing self, enter.local:27021 would veto with 'I don't think enter.local:27019 is electable' +2014-04-09T23:16:27.084-0400 [initandlisten] connection accepted from 10.0.1.135:52146 #9 (4 connections now open) +2014-04-09T23:16:27.085-0400 [conn9] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:27.086-0400 [conn9] end connection 10.0.1.135:52146 (3 connections now open) +2014-04-09T23:16:28.092-0400 [initandlisten] connection accepted from 10.0.1.135:52150 #10 (4 connections now open) +2014-04-09T23:16:28.093-0400 [conn10] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:28.094-0400 [conn10] end connection 10.0.1.135:52150 (3 connections now open) +2014-04-09T23:16:29.102-0400 [initandlisten] connection accepted from 10.0.1.135:52152 #11 (4 connections now open) +2014-04-09T23:16:29.103-0400 [conn11] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:29.104-0400 [conn11] end connection 10.0.1.135:52152 (3 connections now open) +2014-04-09T23:16:30.112-0400 [initandlisten] connection accepted from 10.0.1.135:52154 #12 (4 connections now open) +2014-04-09T23:16:30.113-0400 [conn12] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:30.114-0400 [conn12] end connection 10.0.1.135:52154 (3 connections now open) +2014-04-09T23:16:31.123-0400 [initandlisten] connection accepted from 10.0.1.135:52156 #13 (4 connections now open) +2014-04-09T23:16:31.123-0400 [conn13] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:31.124-0400 [conn13] end connection 10.0.1.135:52156 (3 connections now open) +2014-04-09T23:16:32.265-0400 [initandlisten] connection accepted from 10.0.1.135:52158 #14 (4 connections now open) +2014-04-09T23:16:32.266-0400 [conn14] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:32.267-0400 [conn14] end connection 10.0.1.135:52158 (3 connections now open) +2014-04-09T23:16:32.616-0400 [rsMgr] replSet info electSelf 0 +2014-04-09T23:16:32.701-0400 [rsMgr] replSet PRIMARY + +2014-04-09T23:16:33.273-0400 [initandlisten] connection accepted from 10.0.1.135:52159 #15 (4 connections now open) +2014-04-09T23:16:33.274-0400 [conn15] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:34.292-0400 [initandlisten] connection accepted from 127.0.0.1:52161 #16 (5 connections now open) +2014-04-09T23:16:34.293-0400 [conn16] end connection 127.0.0.1:52161 (4 connections now open) +2014-04-09T23:16:34.302-0400 [initandlisten] connection accepted from 127.0.0.1:52173 #17 (5 connections now open) +2014-04-09T23:16:34.303-0400 [conn17] end connection 127.0.0.1:52173 (4 connections now open) +2014-04-09T23:16:34.313-0400 [initandlisten] connection accepted from 127.0.0.1:52182 #18 (5 connections now open) +2014-04-09T23:16:34.330-0400 [initandlisten] connection accepted from 10.0.1.135:52184 #19 (6 connections now open) +2014-04-09T23:16:34.332-0400 [conn18] end connection 127.0.0.1:52182 (5 connections now open) +2014-04-09T23:16:34.336-0400 [conn19] end connection 10.0.1.135:52184 (4 connections now open) +2014-04-09T23:16:34.341-0400 [initandlisten] connection accepted from 10.0.1.135:52194 #20 (5 connections now open) +2014-04-09T23:16:34.341-0400 [conn20] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:34.342-0400 [initandlisten] connection accepted from 10.0.1.135:52195 #21 (6 connections now open) +2014-04-09T23:16:34.343-0400 [conn20] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers +2014-04-09T23:16:34.343-0400 [conn21] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:34.597-0400 [initandlisten] connection accepted from 127.0.0.1:52204 #22 (7 connections now open) +2014-04-09T23:16:34.597-0400 [conn22] end connection 127.0.0.1:52204 (6 connections now open) +2014-04-09T23:16:34.608-0400 [initandlisten] connection accepted from 127.0.0.1:52213 #23 (7 connections now open) +2014-04-09T23:16:34.610-0400 [initandlisten] connection accepted from 10.0.1.135:52215 #24 (8 connections now open) +2014-04-09T23:16:34.611-0400 [conn23] end connection 127.0.0.1:52213 (7 connections now open) +2014-04-09T23:16:34.615-0400 [conn24] end connection 10.0.1.135:52215 (6 connections now open) +2014-04-09T23:16:36.451-0400 [initandlisten] connection accepted from 10.0.1.135:52224 #25 (7 connections now open) +2014-04-09T23:16:36.452-0400 [conn25] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:36.452-0400 [initandlisten] connection accepted from 10.0.1.135:52225 #26 (8 connections now open) +2014-04-09T23:16:36.453-0400 [conn26] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:40.754-0400 [conn5] end connection 10.0.1.135:52136 (7 connections now open) +2014-04-09T23:16:40.755-0400 [initandlisten] connection accepted from 10.0.1.135:52230 #27 (8 connections now open) +2014-04-09T23:16:40.755-0400 [conn27] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:40.879-0400 [conn6] end connection 10.0.1.135:52137 (7 connections now open) +2014-04-09T23:16:40.880-0400 [initandlisten] connection accepted from 10.0.1.135:52231 #28 (8 connections now open) +2014-04-09T23:16:40.880-0400 [conn28] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:41.317-0400 [initandlisten] connection accepted from 10.0.1.135:52233 #29 (9 connections now open) +2014-04-09T23:16:41.318-0400 [conn29] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:41.326-0400 [initandlisten] connection accepted from 10.0.1.135:52234 #30 (10 connections now open) +2014-04-09T23:16:41.326-0400 [conn30] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:41.328-0400 [conn30] end connection 10.0.1.135:52234 (9 connections now open) +2014-04-09T23:16:41.342-0400 [conn29] end connection 10.0.1.135:52233 (8 connections now open) +2014-04-09T23:16:41.636-0400 [initandlisten] connection accepted from 10.0.1.135:52236 #31 (9 connections now open) +2014-04-09T23:16:41.637-0400 [conn31] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:41.645-0400 [initandlisten] connection accepted from 10.0.1.135:52237 #32 (10 connections now open) +2014-04-09T23:16:41.645-0400 [conn32] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:41.647-0400 [conn32] end connection 10.0.1.135:52237 (9 connections now open) +2014-04-09T23:16:41.659-0400 [conn31] end connection 10.0.1.135:52236 (8 connections now open) +2014-04-09T23:16:41.777-0400 [initandlisten] connection accepted from 10.0.1.135:52238 #33 (9 connections now open) +2014-04-09T23:16:41.778-0400 [conn33] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:41.779-0400 [initandlisten] connection accepted from 10.0.1.135:52239 #34 (10 connections now open) +2014-04-09T23:16:41.780-0400 [conn34] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:41.953-0400 [initandlisten] connection accepted from 10.0.1.135:52240 #35 (11 connections now open) +2014-04-09T23:16:41.953-0400 [conn35] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:41.955-0400 [initandlisten] connection accepted from 10.0.1.135:52241 #36 (12 connections now open) +2014-04-09T23:16:41.956-0400 [conn36] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:16:42.620-0400 [rsHealthPoll] replSet member enter.local:27021 is now in state RECOVERING +2014-04-09T23:16:42.623-0400 [rsHealthPoll] replSet member enter.local:27020 is now in state SECONDARY +2014-04-09T23:16:42.835-0400 [slaveTracking] build index on: local.slaves properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "local.slaves" } +2014-04-09T23:16:42.835-0400 [slaveTracking] added index to empty collection +2014-04-09T23:16:44.622-0400 [rsHealthPoll] replSet member enter.local:27021 is now in state SECONDARY +2014-04-09T23:17:10.775-0400 [conn27] end connection 10.0.1.135:52230 (11 connections now open) +2014-04-09T23:17:10.776-0400 [initandlisten] connection accepted from 10.0.1.135:52262 #37 (13 connections now open) +2014-04-09T23:17:10.776-0400 [conn37] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:17:10.901-0400 [conn28] end connection 10.0.1.135:52231 (11 connections now open) +2014-04-09T23:17:10.902-0400 [initandlisten] connection accepted from 10.0.1.135:52263 #38 (12 connections now open) +2014-04-09T23:17:10.903-0400 [conn38] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:17:20.467-0400 [clientcursormon] mem (MB) res:10 virt:3196 +2014-04-09T23:17:20.467-0400 [clientcursormon] mapped (incl journal view):672 +2014-04-09T23:17:20.467-0400 [clientcursormon] connections:12 +2014-04-09T23:17:20.468-0400 [clientcursormon] replication threads:32 +2014-04-09T23:17:40.798-0400 [conn37] end connection 10.0.1.135:52262 (11 connections now open) +2014-04-09T23:17:40.799-0400 [initandlisten] connection accepted from 10.0.1.135:52274 #39 (13 connections now open) +2014-04-09T23:17:40.800-0400 [conn39] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:17:40.926-0400 [conn38] end connection 10.0.1.135:52263 (11 connections now open) +2014-04-09T23:17:40.926-0400 [initandlisten] connection accepted from 10.0.1.135:52275 #40 (12 connections now open) +2014-04-09T23:17:40.927-0400 [conn40] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:18:10.822-0400 [conn39] end connection 10.0.1.135:52274 (11 connections now open) +2014-04-09T23:18:10.822-0400 [initandlisten] connection accepted from 10.0.1.135:52288 #41 (12 connections now open) +2014-04-09T23:18:10.823-0400 [conn41] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:18:10.949-0400 [conn40] end connection 10.0.1.135:52275 (11 connections now open) +2014-04-09T23:18:10.950-0400 [initandlisten] connection accepted from 10.0.1.135:52289 #42 (12 connections now open) +2014-04-09T23:18:10.950-0400 [conn42] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:18:40.496-0400 [initandlisten] connection accepted from 10.0.1.135:52301 #43 (13 connections now open) +2014-04-09T23:18:40.496-0400 [conn43] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:18:40.844-0400 [conn41] end connection 10.0.1.135:52288 (12 connections now open) +2014-04-09T23:18:40.844-0400 [initandlisten] connection accepted from 10.0.1.135:52302 #44 (14 connections now open) +2014-04-09T23:18:40.845-0400 [conn44] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:18:40.970-0400 [conn42] end connection 10.0.1.135:52289 (12 connections now open) +2014-04-09T23:18:40.971-0400 [initandlisten] connection accepted from 10.0.1.135:52303 #45 (14 connections now open) +2014-04-09T23:18:40.971-0400 [conn45] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:19:10.865-0400 [conn44] end connection 10.0.1.135:52302 (12 connections now open) +2014-04-09T23:19:10.865-0400 [initandlisten] connection accepted from 10.0.1.135:52315 #46 (14 connections now open) +2014-04-09T23:19:10.866-0400 [conn46] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:19:10.993-0400 [conn45] end connection 10.0.1.135:52303 (12 connections now open) +2014-04-09T23:19:10.993-0400 [initandlisten] connection accepted from 10.0.1.135:52316 #47 (13 connections now open) +2014-04-09T23:19:10.993-0400 [conn47] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:19:26.368-0400 [initandlisten] connection accepted from 10.0.1.135:52325 #48 (14 connections now open) +2014-04-09T23:19:26.368-0400 [conn48] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:19:26.369-0400 [FileAllocator] allocating new datafile /Users/tr/Documents/tmp/data/shard01/rs1/db/test.ns, filling with zeroes... +2014-04-09T23:19:26.380-0400 [FileAllocator] done allocating datafile /Users/tr/Documents/tmp/data/shard01/rs1/db/test.ns, size: 16MB, took 0.011 secs +2014-04-09T23:19:26.406-0400 [FileAllocator] allocating new datafile /Users/tr/Documents/tmp/data/shard01/rs1/db/test.0, filling with zeroes... +2014-04-09T23:19:26.518-0400 [FileAllocator] done allocating datafile /Users/tr/Documents/tmp/data/shard01/rs1/db/test.0, size: 64MB, took 0.11 secs +2014-04-0 [conn48] buil +2014-04-09T23:19:26.544-0400 [conn48] added index to empty collection +2014-04-09T23:19:26.550-0400 [conn48] build index on: test.docs properties: { v: 1, key: { foo: "hashed" }, name: "foo_hashed", ns: "test.docs" } +2014-04-09T23:19:26.551-0400 [conn48] added index to empty collection +2014-04-09T23:19:26.551-0400 [conn48] insert test.system.indexes query: { ns: "test.docs", key: { foo: "hashed" }, name: "foo_hashed" } ninserted:1 keyUpdates:0 numYields:0 locks(micros) w:182206 182ms +2014-04-09T23:19:26.551-0400 [conn48] command test.$cmd command: insert { insert: "system.indexes", documents: [ { ns: "test.docs", key: { foo: "hashed" }, name: "foo_hashed" } ], ordered: true, metadata: { shardName: "shard01", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 keyUpdates:0 numYields:0 locks(micros) w:209 reslen:140 182ms +2014-04-09T23:19:26.702-0400 [conn20] remote client 10.0.1.135:52194 initialized this host (shard01/enter.local:27019,enter.local:27020,enter.local:27021) as shard shard01 +2014-04-09T23:19:26.704-0400 [conn20] remotely refreshing metadata for test.docs with requested shard version 1|1||53460dbe4aaa0fc95616708e, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 +2014-04-09T23:19:26.704-0400 [conn20] SyncClusterConnection connecting to [enter.local:27025] +2014-04-09T23:19:26.705-0400 [conn20] SyncClusterConnection connecting to [enter.local:27026] +2014-04-09T23:19:26.706-0400 [conn20] SyncClusterConnection connecting to [enter.local:27027] +2014-04-09T23:19:26.709-0400 [conn20] collection test.docs was previously unsharded, new metadata loaded with shard version 1|1||53460dbe4aaa0fc95616708e +2014-04-09T23:19:26.709-0400 [conn20] collection version was loaded at version 1|1||53460dbe4aaa0fc95616708e, took 4ms +2014-04-09T23:19:26.776-0400 [conn15] moveChunk waiting for full cleanup after move +2014-04-09T23:19:26.776-0400 [conn15] received moveChunk request: { moveChunk: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", to: "shard02/enter.local:27022,enter.local:27023,enter.local:27024", fromShard: "shard01", toShard: "shard02", min: { foo: 0 }, max: { foo: MaxKey }, maxChunkSizeBytes: 67108864, shardId: "test.docs-foo_0", configdb: "enter.local:27025,enter.local:27026,enter.local:27027", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0 } +2014-04-09T23:19:26.785-0400 [LockPinger] creating distributed lock ping thread for enter.local:27025,enter.local:27026,enter.local:27027 and process enter.local:27019:1397099966:1022657369 (sleeping for 30000ms) +2014-04-09T23:19:26.785-0400 [LockPinger] SyncClusterConnection connecting to [enter.local:27025] +2014-04-09T23:19:26.786-0400 [LockPinger] SyncClusterConnection connecting to [enter.local:27026] +2014-04-09T23:19:26.786-0400 [LockPinger] SyncClusterConnection connecting to [enter.local:27027] +2014-04-09T23:19:27.053-0400 [LockPinger] cluster enter.local:27025,enter.local:27026,enter.local:27027 pinged successfully at Wed Apr 9 23:19:26 2014 by distributed lock pinger 'enter.local:27025,enter.local:27026,enter.local:27027/enter.local:27019:1397099966:1022657369', sleeping for 30000ms +2014-04-09T23:19:27.193-0400 [conn15] distributed lock 'test.docs/enter.local:27019:1397099966:1022657369' acquired, ts : 53460dbe7bad85a712a6c3fb +2014-04-09T23:19:27.193-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:27-53460dbf7bad85a712a6c3fc", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099967193), what: "moveChunk.start", ns: "test.docs", details: { min: { foo: 0 }, max: { foo: MaxKey }, from: "shard01", to: "shard02" } } +2014-04-09T23:19:27.342-0400 [conn15] remotely refreshing metadata for test.docs based on current shard version 1|1||53460dbe4aaa0fc95616708e, current metadata version is 1|1||53460dbe4aaa0fc95616708e +2014-04-09T23:19:27.342-0400 [conn15] metadata of collection test.docs already up to date (shard version : 1|1||53460dbe4aaa0fc95616708e, took 0ms) +2014-04-09T23:19:27.342-0400 [conn15] moveChunk request accepted at version 1|1||53460dbe4aaa0fc95616708e +2014-04-09T23:19:27.343-0400 [conn15] moveChunk number of documents: 0 +2014-04-09T23:19:27.343-0400 [conn15] starting new replica set monitor for replica set shard02 with seeds enter.local:27022,enter.local:27023,enter.local:27024 +2014-04-09T23:19:27.343-0400 [ReplicaSetMonitorWatcher] starting +2014-04-09T23:19:27.351-0400 [conn15] moveChunk data transfer progress: { active: true, ns: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", min: { foo: 0 }, max: { foo: MaxKey }, shardKeyPattern: { foo: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 +2014-04-09T23:19:27.352-0400 [initandlisten] connection accepted from 10.0.1.135:52345 #49 (15 connections now open) +2014-04-09T23:19:27.353-0400 [conn49] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:19:27.353-0400 [initandlisten] connection accepted from 10.0.1.135:52346 #50 (16 connections now open) +2014-04-09T23:19:27.354-0400 [conn50] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:19:27.354-0400 [conn15] moveChunk data transfer progress: { active: true, ns: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", min: { foo: 0 }, max: { foo: MaxKey }, shardKeyPattern: { foo: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 +2014-04-09T23:19:27.359-0400 [conn15] moveChunk data transfer progress: { active: true, ns: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", min: { foo: 0 }, max: { foo: MaxKey }, shardKeyPattern: { foo: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 +2014-04-09T23:19:27.367-0400 [conn15] moveChunk data transfer progress: { active: true, ns: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", min: { foo: 0 }, max: { foo: MaxKey }, shardKeyPattern: { foo: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 +2014-04-09T23:19:27.384-0400 [conn15] moveChunk data transfer progress: { active: true, ns: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", min: { foo: 0 }, max: { foo: MaxKey }, shardKeyPattern: { foo: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 +2014-04-09T23:19:27.417-0400 [conn15] moveChunk data transfer progress: { active: true, ns: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", min: { foo: 0 }, max: { foo: MaxKey }, shardKeyPattern: { foo: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 +2014-04-09T23:19:27.482-0400 [conn15] moveChunk data transfer progress: { active: true, ns: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", min: { foo: 0 }, max: { foo: MaxKey }, shardKeyPattern: { foo: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 +2014-04-09T23:19:27.611-0400 [conn15] moveChunk data transfer progress: { active: true, ns: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", min: { foo: 0 }, max: { foo: MaxKey }, shardKeyPattern: { foo: "hashed" }, state: "catchup", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 +2014-04-09T23:19:27.869-0400 [conn15] moveChunk data transfer progress: { active: true, ns: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", min: { foo: 0 }, max: { foo: MaxKey }, shardKeyPattern: { foo: "hashed" }, state: "catchup", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 +2014-04-09T23:19:28.383-0400 [conn15] moveChunk data transfer progress: { active: true, ns: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", min: { foo: 0 }, max: { foo: MaxKey }, shardKeyPattern: { foo: "hashed" }, state: "catchup", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 +2014-04-09T23:19:29.220-0400 [conn26] received moveChunk request: { moveChunk: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", to: "shard02/enter.local:27022,enter.local:27023,enter.local:27024", fromShard: "shard01", toShard: "shard02", min: { foo: MinKey }, max: { foo: 0 }, maxChunkSizeBytes: 67108864, shardId: "test.docs-foo_MinKey", configdb: "enter.local:27025,enter.local:27026,enter.local:27027", secondaryThrottle: true, waitForDelete: false, maxTimeMS: 0 } +2014-04-09T23:19:29.221-0400 [conn26] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:29-53460dc17bad85a712a6c3fd", server: "enter.local", clientAddr: "10.0.1.135:52225", time: new Date(1397099969221), what: "moveChunk.from", ns: "test.docs", details: { min: { foo: MinKey }, max: { foo: 0 }, step 1 of 6: 0, note: "aborted" } } +2014-04-09T23:19:29.280-0400 [initandlisten] connection accepted from 10.0.1.135:52347 #51 (17 connections now open) +2014-04-09T23:19:29.280-0400 [conn51] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:19:29.408-0400 [conn15] moveChunk data transfer progress: { active: true, ns: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", min: { foo: 0 }, max: { foo: MaxKey }, shardKeyPattern: { foo: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 +2014-04-09T23:19:29.409-0400 [conn15] About to check if it is safe to enter critical section +2014-04-09T23:19:29.409-0400 [conn15] About to enter migrate critical section +2014-04-09T23:19:29.409-0400 [conn15] moveChunk setting version to: 2|0||53460dbe4aaa0fc95616708e +2014-04-09T23:19:29.564-0400 [conn51] received moveChunk request: { moveChunk: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", to: "shard02/enter.local:27022,enter.local:27023,enter.local:27024", fromShard: "shard01", toShard: "shard02", min: { foo: MinKey }, max: { foo: 0 }, maxChunkSizeBytes: 67108864, shardId: "test.docs-foo_MinKey", configdb: "enter.local:27025,enter.local:27026,enter.local:27027", secondaryThrottle: true, waitForDelete: false, maxTimeMS: 0 } +2014-04-09T23:19:29.564-0400 [conn51] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:29-53460dc17bad85a712a6c3fe", server: "enter.local", clientAddr: "10.0.1.135:52347", time: new Date(1397099969564), what: "moveChunk.from", ns: "test.docs", details: { min: { foo: MinKey }, max: { foo: 0 }, step 1 of 6: 0, note: "aborted" } } +2014-04-09T23:19:29.568-0400 [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", min: { foo: 0 }, max: { foo: MaxKey }, shardKeyPattern: { foo: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } +2014-04-09T23:19:29.568-0400 [conn15] moveChunk updating self version to: 2|1||53460dbe4aaa0fc95616708e through { foo: MinKey } -> { foo: 0 } for collection 'test.docs' +**************************** +2014-04-09T23:19:29.694-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:29-53460dc17bad85a712a6c3ff", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099969694), what: "moveChunk.commit", ns: "test.docs", details: { min: { foo: 0 }, max: { foo: MaxKey }, from: "shard01", to: "shard02", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } +2014-04-09T23:19:29.709-0400 [conn15] MigrateFromStatus::done About to acquire global write lock to exit critical section +2014-04-09T23:19:29.709-0400 [conn15] MigrateFromStatus::done Global lock acquired +2014-04-09T23:19:29.709-0400 [conn15] doing delete inline for cleanup of chunk data +2014-04-09T23:19:29.710-0400 [conn15] Deleter starting delete for: test.docs from { foo: 0 } -> { foo: MaxKey }, with opId: 578 +2014-04-09T23:19:29.710-0400 [conn15] rangeDeleter deleted 0 documents for test.docs from { foo: 0 } -> { foo: MaxKey } +2014-04-09T23:19:29.710-0400 [conn15] MigrateFromStatus::done About to acquire global write lock to exit critical section +2014-04-09T23:19:29.710-0400 [conn15] MigrateFromStatus::done Global lock acquired +2014-04-09T23:19:29.710-0400 [conn15] SyncClusterConnection connecting to [enter.local:27025] +2014-04-09T23:19:29.711-0400 [conn15] SyncClusterConnection connecting to [enter.local:27026] +2014-04-09T23:19:29.712-0400 [conn15] SyncClusterConnection connecting to [enter.local:27027] +2014-04-09T23:19:29.833-0400 [conn15] distributed lock 'test.docs/enter.local:27019:1397099966:1022657369' unlocked. +2014-04-09T23:19:29.833-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:29-53460dc17bad85a712a6c400", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099969833), what: "moveChunk.from", ns: "test.docs", details: { min: { foo: 0 }, max: { foo: MaxKey }, step 1 of 6: 0, step 2 of 6: 566, step 3 of 6: 7, step 4 of 6: 2058, step 5 of 6: 300, step 6 of 6: 0 } } +2014-04-09T23:19:29.840-0400 [conn15] command admin.$cmd command: moveChunk { moveChunk: "test.docs", from: "shard01/enter.local:27019,enter.local:27020,enter.local:27021", to: "shard02/enter.local:27022,enter.local:27023,enter.local:27024", fromShard: "shard01", toShard: "shard02", min: { foo: 0 }, max: { foo: MaxKey }, maxChunkSizeBytes: 67108864, shardId: "test.docs-foo_0", configdb: "enter.local:27025,enter.local:27026,enter.local:27027", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0 } ntoreturn:1 keyUpdates:0 numYields:0 locks(micros) W:276 r:285 w:48 reslen:37 3063ms +2014-04-09T23:19:29.841-0400 [conn15] received splitChunk request: { splitChunk: "test.docs", keyPattern: { foo: "hashed" }, min: { foo: MinKey }, max: { foo: 0 }, from: "shard01", splitKeys: [ { foo: -8854437155380584768 }, { foo: -8485502273906393736 }, { foo: -8116567392432202704 }, { foo: -7747632510958011672 }, { foo: -7378697629483820640 }, { foo: -7009762748009629608 }, { foo: -6640827866535438576 }, { foo: -6271892985061247544 }, { foo: -5902958103587056512 }, { foo: -5534023222112865480 }, { foo: -5165088340638674448 }, { foo: -4796153459164483416 }, { foo: -4427218577690292384 }, { foo: -4058283696216101352 }, { foo: -3689348814741910320 }, { foo: -3320413933267719288 }, { foo: -2951479051793528256 }, { foo: -2582544170319337224 }, { foo: -2213609288845146192 }, { foo: -1844674407370955160 }, { foo: -1475739525896764128 }, { foo: -1106804644422573096 }, { foo: -737869762948382064 }, { foo: -368934881474191032 } ], shardId: "test.docs-foo_MinKey", configdb: "enter.local:27025,enter.local:27026,enter.local:27027" } +2014-04-09T23:19:30.110-0400 [conn15] distributed lock 'test.docs/enter.local:27019:1397099966:1022657369' acquired, ts : 53460dc17bad85a712a6c401 +2014-04-09T23:19:30.112-0400 [conn15] remotely refreshing metadata for test.docs based on current shard version 2|0||53460dbe4aaa0fc95616708e, current metadata version is 2|0||53460dbe4aaa0fc95616708e +2014-04-09T23:19:30.112-0400 [conn15] updating metadata for test.docs from shard version 2|0||53460dbe4aaa0fc95616708e to shard version 2|1||53460dbe4aaa0fc95616708e +2014-04-09T23:19:30.113-0400 [conn15] collection version was loaded at version 2|1||53460dbe4aaa0fc95616708e, took 0ms +2014-04-09T23:19:30.113-0400 [conn15] splitChunk accepted at version 2|1||53460dbe4aaa0fc95616708e +2014-04-09T23:19:30.248-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c402", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970248), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 1, of: 25, chunk: { min: { foo: MinKey }, max: { foo: -8854437155380584768 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.255-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c403", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970255), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 2, of: 25, chunk: { min: { foo: -8854437155380584768 }, max: { foo: -8485502273906393736 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.260-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c404", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970260), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 3, of: 25, chunk: { min: { foo: -8485502273906393736 }, max: { foo: -8116567392432202704 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.265-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c405", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970265), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 4, of: 25, chunk: { min: { foo: -8116567392432202704 }, max: { foo: -7747632510958011672 }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.270-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c406", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970270), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 5, of: 25, chunk: { min: { foo: -7747632510958011672 }, max: { foo: -7378697629483820640 }, lastmod: Timestamp 2000|6, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.276-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c407", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970276), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 6, of: 25, chunk: { min: { foo: -7378697629483820640 }, max: { foo: -7009762748009629608 }, lastmod: Timestamp 2000|7, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.281-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c408", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970281), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 7, of: 25, chunk: { min: { foo: -7009762748009629608 }, max: { foo: -6640827866535438576 }, lastmod: Timestamp 2000|8, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.286-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c409", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970286), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 8, of: 25, chunk: { min: { foo: -6640827866535438576 }, max: { foo: -6271892985061247544 }, lastmod: Timestamp 2000|9, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.291-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c40a", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970291), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 9, of: 25, chunk: { min: { foo: -6271892985061247544 }, max: { foo: -5902958103587056512 }, lastmod: Timestamp 2000|10, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.296-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c40b", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970296), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 10, of: 25, chunk: { min: { foo: -5902958103587056512 }, max: { foo: -5534023222112865480 }, lastmod: Timestamp 2000|11, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.302-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c40c", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970302), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 11, of: 25, chunk: { min: { foo: -5534023222112865480 }, max: { foo: -5165088340638674448 }, lastmod: Timestamp 2000|12, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.308-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c40d", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970308), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 12, of: 25, chunk: { min: { foo: -5165088340638674448 }, max: { foo: -4796153459164483416 }, lastmod: Timestamp 2000|13, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.314-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c40e", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970314), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 13, of: 25, chunk: { min: { foo: -4796153459164483416 }, max: { foo: -4427218577690292384 }, lastmod: Timestamp 2000|14, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.320-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c40f", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970320), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 14, of: 25, chunk: { min: { foo: -4427218577690292384 }, max: { foo: -4058283696216101352 }, lastmod: Timestamp 2000|15, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.326-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c410", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970326), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 15, of: 25, chunk: { min: { foo: -4058283696216101352 }, max: { foo: -3689348814741910320 }, lastmod: Timestamp 2000|16, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.332-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c411", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970332), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 16, of: 25, chunk: { min: { foo: -3689348814741910320 }, max: { foo: -3320413933267719288 }, lastmod: Timestamp 2000|17, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.339-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c412", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970339), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 17, of: 25, chunk: { min: { foo: -3320413933267719288 }, max: { foo: -2951479051793528256 }, lastmod: Timestamp 2000|18, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.345-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c413", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970345), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 18, of: 25, chunk: { min: { foo: -2951479051793528256 }, max: { foo: -2582544170319337224 }, lastmod: Timestamp 2000|19, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.352-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c414", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970352), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 19, of: 25, chunk: { min: { foo: -2582544170319337224 }, max: { foo: -2213609288845146192 }, lastmod: Timestamp 2000|20, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.358-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c415", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970358), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 20, of: 25, chunk: { min: { foo: -2213609288845146192 }, max: { foo: -1844674407370955160 }, lastmod: Timestamp 2000|21, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.364-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c416", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970364), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 21, of: 25, chunk: { min: { foo: -1844674407370955160 }, max: { foo: -1475739525896764128 }, lastmod: Timestamp 2000|22, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.369-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c417", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970369), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 22, of: 25, chunk: { min: { foo: -1475739525896764128 }, max: { foo: -1106804644422573096 }, lastmod: Timestamp 2000|23, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.375-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c418", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970375), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 23, of: 25, chunk: { min: { foo: -1106804644422573096 }, max: { foo: -737869762948382064 }, lastmod: Timestamp 2000|24, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.380-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c419", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970380), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 24, of: 25, chunk: { min: { foo: -737869762948382064 }, max: { foo: -368934881474191032 }, lastmod: Timestamp 2000|25, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.386-0400 [conn15] about to log metadata event: { _id: "enter.local-2014-04-10T03:19:30-53460dc27bad85a712a6c41a", server: "enter.local", clientAddr: "10.0.1.135:52159", time: new Date(1397099970386), what: "multi-split", ns: "test.docs", details: { before: { min: { foo: MinKey }, max: { foo: 0 }, lastmod: Timestamp 2000|1, lastmodEpoch: ObjectId('000000000000000000000000') }, number: 25, of: 25, chunk: { min: { foo: -368934881474191032 }, max: { foo: 0 }, lastmod: Timestamp 2000|26, lastmodEpoch: ObjectId('53460dbe4aaa0fc95616708e') } } } +2014-04-09T23:19:30.526-0400 [conn15] distributed lock 'test.docs/enter.local:27019:1397099966:1022657369' unlocked. +2014-04-09T23:19:30.526-0400 [conn15] command admin.$cmd command: splitChunk { $msg: "query not recording (too large)" } ntoreturn:1 keyUpdates:0 numYields:0 locks(micros) w:72 reslen:37 685ms +2014-04-09T23:19:40.888-0400 [conn46] end connection 10.0.1.135:52315 (16 connections now open) +2014-04-09T23:19:40.888-0400 [initandlisten] connection accepted from 10.0.1.135:52380 #52 (17 connections now open) +2014-04-09T23:19:40.889-0400 [conn52] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:19:41.011-0400 [conn47] end connection 10.0.1.135:52316 (16 connections now open) +2014-04-09T23:19:41.012-0400 [initandlisten] connection accepted from 10.0.1.135:52381 #53 (17 connections now open) +2014-04-09T23:19:41.012-0400 [conn53] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:19:57.477-0400 [LockPinger] cluster enter.local:27025,enter.local:27026,enter.local:27027 pinged successfully at Wed Apr 9 23:19:57 2014 by distributed lock pinger 'enter.local:27025,enter.local:27026,enter.local:27027/enter.local:27019:1397099966:1022657369', sleeping for 30000ms +2014-04-09T23:20:10.908-0400 [conn52] end connection 10.0.1.135:52380 (16 connections now open) +2014-04-09T23:20:10.908-0400 [initandlisten] connection accepted from 10.0.1.135:52392 #54 (17 connections now open) +2014-04-09T23:20:10.909-0400 [conn54] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:20:11.032-0400 [conn53] end connection 10.0.1.135:52381 (16 connections now open) +2014-04-09T23:20:11.033-0400 [initandlisten] connection accepted from 10.0.1.135:52393 #55 (17 connections now open) +2014-04-09T23:20:11.033-0400 [conn55] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:20:20.526-0400 [clientcursormon] mem (MB) res:12 virt:3364 +2014-04-09T23:20:20.526-0400 [clientcursormon] mapped (incl journal view):832 +2014-04-09T23:20:20.526-0400 [clientcursormon] connections:17 +2014-04-09T23:20:20.527-0400 [clientcursormon] replication threads:32 +2014-04-09T23:20:27.741-0400 [LockPinger] cluster enter.local:27025,enter.local:27026,enter.local:27027 pinged successfully at Wed Apr 9 23:20:27 2014 by distributed lock pinger 'enter.local:27025,enter.local:27026,enter.local:27027/enter.local:27019:1397099966:1022657369', sleeping for 30000ms +2014-04-09T23:20:40.929-0400 [conn54] end connection 10.0.1.135:52392 (16 connections now open) +2014-04-09T23:20:40.930-0400 [initandlisten] connection accepted from 10.0.1.135:52407 #56 (17 connections now open) +2014-04-09T23:20:40.930-0400 [conn56] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:20:41.052-0400 [conn55] end connection 10.0.1.135:52393 (16 connections now open) +2014-04-09T23:20:41.053-0400 [initandlisten] connection accepted from 10.0.1.135:52408 #57 (17 connections now open) +2014-04-09T23:20:41.054-0400 [conn57] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:20:58.006-0400 [LockPinger] cluster enter.local:27025,enter.local:27026,enter.local:27027 pinged successfully at Wed Apr 9 23:20:57 2014 by distributed lock pinger 'enter.local:27025,enter.local:27026,enter.local:27027/enter.local:27019:1397099966:1022657369', sleeping for 30000ms +2014-04-09T23:21:10.951-0400 [conn56] end connection 10.0.1.135:52407 (16 connections now open) +2014-04-09T23:21:10.952-0400 [initandlisten] connection accepted from 10.0.1.135:52420 #58 (17 connections now open) +2014-04-09T23:21:10.953-0400 [conn58] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:21:11.076-0400 [conn57] end connection 10.0.1.135:52408 (16 connections now open) +2014-04-09T23:21:11.076-0400 [initandlisten] connection accepted from 10.0.1.135:52421 #59 (18 connections now open) +2014-04-09T23:21:11.077-0400 [conn59] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:21:28.406-0400 [LockPinger] cluster enter.local:27025,enter.local:27026,enter.local:27027 pinged successfully at Wed Apr 9 23:21:28 2014 by distributed lock pinger 'enter.local:27025,enter.local:27026,enter.local:27027/enter.local:27019:1397099966:1022657369', sleeping for 30000ms +2014-04-09T23:21:34.354-0400 [conn21] command admin.$cmd command: writebacklisten { writebacklisten: ObjectId('53460d074aaa0fc956167075') } ntoreturn:1 keyUpdates:0 numYields:0 reslen:44 300010ms +2014-04-09T23:21:40.973-0400 [conn58] end connection 10.0.1.135:52420 (16 connections now open) +2014-04-09T23:21:40.974-0400 [initandlisten] connection accepted from 10.0.1.135:52437 #60 (17 connections now open) +2014-04-09T23:21:40.975-0400 [conn60] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:21:41.098-0400 [conn59] end connection 10.0.1.135:52421 (16 connections now open) +2014-04-09T23:21:41.099-0400 [initandlisten] connection accepted from 10.0.1.135:52438 #61 (17 connections now open) +2014-04-09T23:21:41.099-0400 [conn61] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:21:58.826-0400 [LockPinger] cluster enter.local:27025,enter.local:27026,enter.local:27027 pinged successfully at Wed Apr 9 23:21:58 2014 by distributed lock pinger 'enter.local:27025,enter.local:27026,enter.local:27027/enter.local:27019:1397099966:1022657369', sleeping for 30000ms +2014-04-09T23:22:01.327-0400 [initandlisten] connection accepted from 127.0.0.1:52449 #62 (18 connections now open) +2014-04-09T23:22:01.328-0400 [conn62] end connection 127.0.0.1:52449 (17 connections now open) +2014-04-09T23:22:01.338-0400 [initandlisten] connection accepted from 127.0.0.1:52458 #63 (18 connections now open) +2014-04-09T23:22:01.345-0400 [initandlisten] connection accepted from 10.0.1.135:52460 #64 (19 connections now open) +2014-04-09T23:22:01.347-0400 [conn63] end connection 127.0.0.1:52458 (18 connections now open) +2014-04-09T23:22:01.351-0400 [conn64] end connection 10.0.1.135:52460 (17 connections now open) +2014-04-09T23:22:03.459-0400 [initandlisten] connection accepted from 127.0.0.1:52471 #65 (18 connections now open) +2014-04-09T23:22:03.460-0400 [conn65] end connection 127.0.0.1:52471 (17 connections now open) +2014-04-09T23:22:03.470-0400 [initandlisten] connection accepted from 127.0.0.1:52480 #66 (18 connections now open) +2014-04-09T23:22:03.477-0400 [initandlisten] connection accepted from 10.0.1.135:52482 #67 (19 connections now open) +2014-04-09T23:22:03.479-0400 [conn66] end connection 127.0.0.1:52480 (18 connections now open) +2014-04-09T23:22:03.483-0400 [conn67] end connection 10.0.1.135:52482 (17 connections now open) +2014-04-09T23:22:03.490-0400 [conn48] end connection 10.0.1.135:52325 (16 connections now open) +2014-04-09T23:22:03.490-0400 [conn51] end connection 10.0.1.135:52347 (16 connections now open) +2014-04-09T23:22:03.490-0400 [conn20] end connection 10.0.1.135:52194 (16 connections now open) +2014-04-09T23:22:03.490-0400 [conn43] end connection 10.0.1.135:52301 (16 connections now open) +2014-04-09T23:22:03.490-0400 [conn15] end connection 10.0.1.135:52159 (16 connections now open) +2014-04-09T23:22:03.490-0400 [conn3] end connection 10.0.1.135:52127 (15 connections now open) +2014-04-09T23:22:03.498-0400 [conn26] end connection 10.0.1.135:52225 (10 connections now open) +2014-04-09T23:22:03.498-0400 [conn25] end connection 10.0.1.135:52224 (10 connections now open) +2014-04-09T23:22:03.500-0400 [initandlisten] connection accepted from 127.0.0.1:52493 #68 (10 connections now open) +2014-04-09T23:22:03.501-0400 [conn68] authenticate db: admin { authenticate: 1, user: "user", nonce: "xxx", key: "xxx" } +2014-04-09T23:22:03.502-0400 [conn68] Failed to authenticate user@admin with mechanism MONGODB-CR: AuthenticationFailed UserNotFound Could not find user user@admin +2014-04-09T23:22:03.502-0400 [conn68] terminating, shutdown command received +2014-04-09T23:22:03.502-0400 [conn68] dbexit: shutdown called +2014-04-09T23:22:03.502-0400 [conn68] shutdown: going to close listening sockets... +2014-04-09T23:22:03.503-0400 [conn68] closing listening socket: 9 +2014-04-09T23:22:03.503-0400 [conn68] closing listening socket: 10 +2014-04-09T23:22:03.503-0400 [conn68] removing socket file: /tmp/mongodb-27019.sock +2014-04-09T23:22:03.503-0400 [conn68] shutdown: going to flush diaglog... +2014-04-09T23:22:03.503-0400 [conn68] shutdown: going to close sockets... +2014-04-09T23:22:03.504-0400 [conn68] shutdown: waiting for fs preallocator... +2014-04-09T23:22:03.504-0400 [conn61] end connection 10.0.1.135:52438 (9 connections now open) +2014-04-09T23:22:03.504-0400 [conn68] shutdown: lock for final commit... +2014-04-09T23:22:03.505-0400 [conn34] end connection 10.0.1.135:52239 (9 connections now open) +2014-04-09T23:22:03.505-0400 [conn36] end connection 10.0.1.135:52241 (9 connections now open) +2014-04-09T23:22:03.506-0400 [conn68] shutdown: final commit... +2014-04-09T23:22:03.506-0400 [conn50] end connection 10.0.1.135:52346 (9 connections now open) +2014-04-09T23:22:03.507-0400 [conn60] end connection 10.0.1.135:52437 (9 connections now open) +2014-04-09T23:22:03.507-0400 [conn49] end connection 10.0.1.135:52345 (9 connections now open) +2014-04-09T23:22:03.514-0400 [conn68] shutdown: closing all files... +2014-04-09T23:22:03.514-0400 [conn68] closeAllFiles() finished +2014-04-09T23:22:03.514-0400 [conn68] journalCleanup... +2014-04-09T23:22:03.514-0400 [conn68] removeJournalFiles +2014-04-09T23:22:03.515-0400 [conn68] shutdown: removing fs lock... +2014-04-09T23:22:03.515-0400 [conn68] dbexit: really exiting now +2014-04-09T23:22:48.502-0400 ***** SERVER RESTARTED ***** +2014-04-09T23:22:48.507-0400 [initandlisten] MongoDB starting : pid=4201 port=27019 dbpath=/Users/tr/Documents/tmp/data/shard01/rs1/db 64-bit host=enter.local +2014-04-09T23:22:48.508-0400 [initandlisten] db version v2.6.0 +2014-04-09T23:22:48.508-0400 [initandlisten] git version: 1c1c76aeca21c5983dc178920f5052c298db616c +2014-04-09T23:22:48.508-0400 [initandlisten] build info: Darwin mci-osx108-8.build.10gen.cc 12.5.0 Darwin Kernel Version 12.5.0: Sun Sep 29 13:33:47 PDT 2013; root:xnu-2050.48.12~1/RELEASE_X86_64 x86_64 BOOST_LIB_VERSION=1_49 +2014-04-09T23:22:48.508-0400 [initandlisten] allocator: system +2014-04-09T23:22:48.508-0400 [initandlisten] options: { net: { port: 27019 }, processManagement: { fork: true }, replication: { replSet: "shard01" }, security: { keyFile: "/Users/tr/Documents/tmp/data/keyfile" }, storage: { dbPath: "/Users/tr/Documents/tmp/data/shard01/rs1/db" }, systemLog: { destination: "file", logAppend: true, path: "/Users/tr/Documents/tmp/data/shard01/rs1/mongod.log" } } +2014-04-09T23:22:48.509-0400 [initandlisten] journal dir=/Users/tr/Documents/tmp/data/shard01/rs1/db/journal +2014-04-09T23:22:48.510-0400 [initandlisten] recover : no journal files present, no recovery needed +2014-04-09T23:22:48.592-0400 [initandlisten] waiting for connections on port 27019 +2014-04-09T23:22:48.632-0400 [rsStart] replSet I am enter.local:27019 +2014-04-09T23:22:48.641-0400 [rsStart] replSet STARTUP2 +2014-04-09T23:22:48.642-0400 [rsSync] replSet SECONDARY +2014-04-09T23:22:49.363-0400 [initandlisten] connection accepted from 127.0.0.1:52558 #1 (1 connection now open) +2014-04-09T23:22:49.364-0400 [conn1] note: no users configured in admin.system.users, allowing localhost access +2014-04-09T23:22:49.365-0400 [conn1] end connection 127.0.0.1:52558 (0 connections now open) +2014-04-09T23:22:49.517-0400 [initandlisten] connection accepted from 10.0.1.135:52580 #2 (1 connection now open) +2014-04-09T23:22:49.518-0400 [conn2] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:22:49.593-0400 [initandlisten] connection accepted from 127.0.0.1:52597 #3 (2 connections now open) +2014-04-09T23:22:49.594-0400 [conn3] end connection 127.0.0.1:52597 (1 connection now open) +2014-04-09T23:22:49.597-0400 [initandlisten] connection accepted from 10.0.1.135:52606 #4 (2 connections now open) +2014-04-09T23:22:49.598-0400 [conn4] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:22:49.605-0400 [initandlisten] connection accepted from 127.0.0.1:52615 #5 (3 connections now open) +2014-04-09T23:22:49.606-0400 [conn5] end connection 127.0.0.1:52615 (2 connections now open) +2014-04-09T23:22:49.617-0400 [initandlisten] connection accepted from 127.0.0.1:52624 #6 (3 connections now open) +2014-04-09T23:22:49.624-0400 [initandlisten] connection accepted from 10.0.1.135:52626 #7 (4 connections now open) +2014-04-09T23:22:49.625-0400 [conn6] end connection 127.0.0.1:52624 (3 connections now open) +2014-04-09T23:22:49.631-0400 [conn7] end connection 10.0.1.135:52626 (2 connections now open) +2014-04-09T23:22:50.636-0400 [rsHealthPoll] replset info enter.local:27021 thinks that we are down +2014-04-09T23:22:50.636-0400 [rsHealthPoll] replset info enter.local:27020 thinks that we are down +2014-04-09T23:22:50.636-0400 [rsHealthPoll] replSet member enter.local:27021 is up +2014-04-09T23:22:50.637-0400 [rsHealthPoll] replSet member enter.local:27020 is up +2014-04-09T23:22:50.637-0400 [rsHealthPoll] replSet member enter.local:27021 is now in state SECONDARY +2014-04-09T23:22:50.638-0400 [rsHealthPoll] replSet member enter.local:27020 is now in state SECONDARY +2014-04-09T23:22:50.638-0400 [rsMgr] not electing self, enter.local:27021 would veto with 'I don't think enter.local:27019 is electable' +2014-04-09T23:22:50.639-0400 [rsMgr] not electing self, enter.local:27021 would veto with 'I don't think enter.local:27019 is electable' +2014-04-09T23:22:50.749-0400 [initandlisten] connection accepted from 10.0.1.135:52638 #8 (3 connections now open) +2014-04-09T23:22:50.750-0400 [conn8] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:22:50.864-0400 [initandlisten] connection accepted from 10.0.1.135:52640 #9 (4 connections now open) +2014-04-09T23:22:50.864-0400 [conn9] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:22:56.643-0400 [rsMgr] replSet info electSelf 0 +2014-04-09T23:22:56.649-0400 [rsMgr] replSet PRIMARY +2014-04-09T23:22:56.766-0400 [initandlisten] connection accepted from 10.0.1.135:52647 #10 (5 connections now open) +2014-04-09T23:22:56.766-0400 [conn10] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:22:56.767-0400 [initandlisten] connection accepted from 10.0.1.135:52648 #11 (6 connections now open) +2014-04-09T23:22:56.768-0400 [conn11] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:22:56.878-0400 [initandlisten] connection accepted from 10.0.1.135:52649 #12 (7 connections now open) +2014-04-09T23:22:56.879-0400 [conn12] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:22:56.881-0400 [initandlisten] connection accepted from 10.0.1.135:52650 #13 (8 connections now open) +2014-04-09T23:22:56.881-0400 [conn13] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:22:59.960-0400 [initandlisten] connection accepted from 127.0.0.1:52657 #14 (9 connections now open) +2014-04-09T23:22:59.961-0400 [conn14] end connection 127.0.0.1:52657 (8 connections now open) +2014-04-09T23:22:59.971-0400 [initandlisten] connection accepted from 127.0.0.1:52666 #15 (9 connections now open) +2014-04-09T23:22:59.977-0400 [initandlisten] connection accepted from 10.0.1.135:52668 #16 (10 connections now open) +2014-04-09T23:22:59.979-0400 [conn15] end connection 127.0.0.1:52666 (9 connections now open) +2014-04-09T23:22:59.983-0400 [conn16] end connection 10.0.1.135:52668 (8 connections now open) +2014-04-09T23:23:04.762-0400 [conn8] end connection 10.0.1.135:52638 (7 connections now open) +2014-04-09T23:23:04.762-0400 [initandlisten] connection accepted from 10.0.1.135:52679 #17 (8 connections now open) +2014-04-09T23:23:04.763-0400 [conn17] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:23:04.876-0400 [conn9] end connection 10.0.1.135:52640 (7 connections now open) +2014-04-09T23:23:04.876-0400 [initandlisten] connection accepted from 10.0.1.135:52680 #18 (8 connections now open) +2014-04-09T23:23:04.877-0400 [conn18] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:23:07.962-0400 [initandlisten] connection accepted from 127.0.0.1:52686 #19 (9 connections now open) +2014-04-09T23:23:07.962-0400 [conn19] end connection 127.0.0.1:52686 (8 connections now open) +2014-04-09T23:23:07.973-0400 [initandlisten] connection accepted from 127.0.0.1:52695 #20 (9 connections now open) +2014-04-09T23:23:07.980-0400 [initandlisten] connection accepted from 10.0.1.135:52697 #21 (10 connections now open) +2014-04-09T23:23:07.982-0400 [conn20] end connection 127.0.0.1:52695 (9 connections now open) +2014-04-09T23:23:07.986-0400 [conn21] end connection 10.0.1.135:52697 (8 connections now open) +2014-04-09T23:23:11.668-0400 [initandlisten] connection accepted from 127.0.0.1:52708 #22 (9 connections now open) +2014-04-09T23:23:11.669-0400 [conn22] end connection 127.0.0.1:52708 (8 connections now open) +2014-04-09T23:23:11.679-0400 [initandlisten] connection accepted from 127.0.0.1:52717 #23 (9 connections now open) +2014-04-09T23:23:11.686-0400 [initandlisten] connection accepted from 10.0.1.135:52719 #24 (10 connections now open) +2014-04-09T23:23:11.687-0400 [conn23] end connection 127.0.0.1:52717 (9 connections now open) +2014-04-09T23:23:11.692-0400 [conn24] end connection 10.0.1.135:52719 (8 connections now open) +2014-04-09T23:23:11.702-0400 [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends +2014-04-09T23:23:11.703-0400 [signalProcessingThread] now exiting +2014-04-09T23:23:11.703-0400 [signalProcessingThread] dbexit: +2014-04-09T23:23:11.703-0400 [signalProcessingThread] shutdown: going to close listening sockets... +2014-04-09T23:23:11.703-0400 [signalProcessingThread] closing listening socket: 10 +2014-04-09T23:23:11.708-0400 [conn2] end connection 10.0.1.135:52580 (7 connections now open) +2014-04-09T23:23:11.709-0400 [conn4] end connection 10.0.1.135:52606 (6 connections now open) +2014-04-09T23:23:11.710-0400 [signalProcessingThread] closing listening socket: 11 +2014-04-09T23:23:11.711-0400 [signalProcessingThread] removing socket file: /tmp/mongodb-27019.sock +2014-04-09T23:23:11.712-0400 [signalProcessingThread] shutdown: going to flush diaglog... +2014-04-09T23:23:11.712-0400 [signalProcessingThread] shutdown: going to close sockets... +2014-04-09T23:23:11.712-0400 [signalProcessingThread] shutdown: waiting for fs preallocator... +2014-04-09T23:23:11.712-0400 [signalProcessingThread] shutdown: lock for final commit... +2014-04-09T23:23:11.713-0400 [signalProcessingThread] shutdown: final commit... +2014-04-09T23:23:11.727-0400 [conn17] end connection 10.0.1.135:52679 (5 connections now open) +2014-04-09T23:23:11.728-0400 [conn11] end connection 10.0.1.135:52648 (5 connections now open) +2014-04-09T23:23:11.728-0400 [conn13] end connection 10.0.1.135:52650 (5 connections now open) +2014-04-09T23:23:11.729-0400 [conn18] end connection 10.0.1.135:52680 (5 connections now open) +2014-04-09T23:23:11.750-0400 [signalProcessingThread] shutdown: closing all files... +2014-04-09T23:23:11.751-0400 [signalProcessingThread] closeAllFiles() finished +2014-04-09T23:23:11.752-0400 [signalProcessingThread] journalCleanup... +2014-04-09T23:23:11.752-0400 [signalProcessingThread] removeJournalFiles +2014-04-09T23:23:11.752-0400 [signalProcessingThread] shutdown: removing fs lock... +2014-04-09T23:23:11.753-0400 [signalProcessingThread] dbexit: really exiting now +2014-04-09T23:23:16.155-0400 ***** SERVER RESTARTED ***** +2014-04-09T23:23:16.160-0400 [initandlisten] MongoDB starting : pid=4243 port=27019 dbpath=/Users/tr/Documents/tmp/data/shard01/rs1/db 64-bit host=enter.local +2014-04-09T23:23:16.160-0400 [initandlisten] db version v2.6.0 +2014-04-09T23:23:16.160-0400 [initandlisten] git version: 1c1c76aeca21c5983dc178920f5052c298db616c +2014-04-09T23:23:16.160-0400 [initandlisten] build info: Darwin mci-osx108-8.build.10gen.cc 12.5.0 Darwin Kernel Version 12.5.0: Sun Sep 29 13:33:47 PDT 2013; root:xnu-2050.48.12~1/RELEASE_X86_64 x86_64 BOOST_LIB_VERSION=1_49 +2014-04-09T23:23:16.160-0400 [initandlisten] allocator: system +2014-04-09T23:23:16.160-0400 [initandlisten] options: { net: { port: 27019 }, processManagement: { fork: true }, replication: { replSet: "shard01" }, security: { keyFile: "/Users/tr/Documents/tmp/data/keyfile" }, storage: { dbPath: "/Users/tr/Documents/tmp/data/shard01/rs1/db" }, systemLog: { destination: "file", logAppend: true, path: "/Users/tr/Documents/tmp/data/shard01/rs1/mongod.log" } } +2014-04-09T23:23:16.162-0400 [initandlisten] journal dir=/Users/tr/Documents/tmp/data/shard01/rs1/db/journal +2014-04-09T23:23:16.162-0400 [initandlisten] recover : no journal files present, no recovery needed +2014-04-09T23:23:16.184-0400 [initandlisten] waiting for connections on port 27019 +2014-04-09T23:23:16.192-0400 [rsStart] replSet I am enter.local:27019 +2014-04-09T23:23:16.200-0400 [rsStart] replSet STARTUP2 +2014-04-09T23:23:16.201-0400 [rsSync] replSet SECONDARY +2014-04-09T23:23:16.634-0400 [initandlisten] connection accepted from 127.0.0.1:52788 #1 (1 connection now open) +2014-04-09T23:23:16.635-0400 [conn1] note: no users configured in admin.system.users, allowing localhost access +2014-04-09T23:23:16.638-0400 [conn1] end connection 127.0.0.1:52788 (0 connections now open) +2014-04-09T23:23:16.743-0400 [initandlisten] connection accepted from 10.0.1.135:52809 #2 (1 connection now open) +2014-04-09T23:23:16.744-0400 [conn2] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:23:16.822-0400 [initandlisten] connection accepted from 127.0.0.1:52827 #3 (2 connections now open) +2014-04-09T23:23:16.824-0400 [initandlisten] connection accepted from 10.0.1.135:52832 #4 (3 connections now open) +2014-04-09T23:23:16.825-0400 [conn4] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:23:16.828-0400 [conn3] end connection 127.0.0.1:52827 (2 connections now open) +2014-04-09T23:23:16.830-0400 [initandlisten] connection accepted from 10.0.1.135:52842 #5 (3 connections now open) +2014-04-09T23:23:16.831-0400 [conn5] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:23:16.832-0400 [conn5] end connection 10.0.1.135:52842 (2 connections now open) +2014-04-09T23:23:16.835-0400 [initandlisten] connection accepted from 127.0.0.1:52845 #6 (3 connections now open) +2014-04-09T23:23:16.836-0400 [conn6] end connection 127.0.0.1:52845 (2 connections now open) +2014-04-09T23:23:16.847-0400 [initandlisten] connection accepted from 127.0.0.1:52854 #7 (3 connections now open) +2014-04-09T23:23:16.854-0400 [initandlisten] connection accepted from 10.0.1.135:52856 #8 (4 connections now open) +2014-04-09T23:23:16.856-0400 [conn7] end connection 127.0.0.1:52854 (3 connections now open) +2014-04-09T23:23:16.861-0400 [conn8] end connection 10.0.1.135:52856 (2 connections now open) +2014-04-09T23:23:18.196-0400 [rsHealthPoll] replset info enter.local:27020 thinks that we are down +2014-04-09T23:23:18.196-0400 [rsHealthPoll] replset info enter.local:27021 thinks that we are down +2014-04-09T23:23:18.196-0400 [rsHealthPoll] replSet member enter.local:27020 is up +2014-04-09T23:23:18.197-0400 [rsHealthPoll] replSet member enter.local:27021 is up +2014-04-09T23:23:18.197-0400 [rsHealthPoll] replSet member enter.local:27020 is now in state SECONDARY +2014-04-09T23:23:18.198-0400 [rsHealthPoll] replSet member enter.local:27021 is now in state SECONDARY +2014-04-09T23:23:18.198-0400 [rsMgr] not electing self, enter.local:27021 would veto with 'I don't think enter.local:27019 is electable' + +2014-04-09T23:23:18.199-0400 [rsMgr] not electing self, enter.local:27021 would veto with 'I don't think enter.local:27019 is electable' +2014-04-09T23:23:18.253-0400 [initandlisten] connection accepted from 10.0.1.135:52868 #9 (3 connections now open) +2014-04-09T23:23:18.254-0400 [conn9] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:23:18.309-0400 [initandlisten] connection accepted from 10.0.1.135:52870 #10 (4 connections now open) +2014-04-09T23:23:18.310-0400 [conn10] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:23:24.204-0400 [rsMgr] replSet info electSelf 0 +2014-04-09T23:23:24.208-0400 [rsMgr] replSet PRIMARY +2014-04-09T23:23:24.264-0400 [initandlisten] connection accepted from 10.0.1.135:52878 #11 (5 connections now open) +2014-04-09T23:23:24.264-0400 [conn11] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:23:24.265-0400 [initandlisten] connection accepted from 10.0.1.135:52879 #12 (6 connections now open) +2014-04-09T23:23:24.266-0400 [conn12] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:23:24.322-0400 [initandlisten] connection accepted from 10.0.1.135:52880 #13 (7 connections now open) +2014-04-09T23:23:24.323-0400 [conn13] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:23:24.325-0400 [initandlisten] connection accepted from 10.0.1.135:52881 #14 (8 connections now open) +2014-04-09T23:23:24.326-0400 [conn14] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:23:32.268-0400 [conn9] end connection 10.0.1.135:52868 (7 connections now open) +2014-04-09T23:23:32.269-0400 [initandlisten] connection accepted from 10.0.1.135:52887 #15 (8 connections now open) +2014-04-09T23:23:32.269-0400 [conn15] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:23:32.321-0400 [conn10] end connection 10.0.1.135:52870 (7 connections now open) +2014-04-09T23:23:32.321-0400 [initandlisten] connection accepted from 10.0.1.135:52888 #16 (8 connections now open) +2014-04-09T23:23:32.322-0400 [conn16] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:24:02.290-0400 [conn15] end connection 10.0.1.135:52887 (7 connections now open) +2014-04-09T23:24:02.291-0400 [initandlisten] connection accepted from 10.0.1.135:52899 #17 (8 connections now open) +2014-04-09T23:24:02.291-0400 [conn17] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T22:24:02.341-0400 [conn16] end connection 10.0.1.135:52888 (7 connections now open) +2014-04-08T23:24:02.342-0400 [initandlisten] connection accepted from 10.0.1.135:52900 #18 (8 connections now open) +2014-03-09T23:24:02.342-0400 [conn18] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2013-04-09T23:24:16.197-0400 [clientcursormon] mem (MB) res:25 virt:3354 +2014-04-09T23:24:16.197-0400 [clientcursormon] mapped (incl journal view):832 +1014-04-09T23:24:16.197-0400 [clientcursormon] connections:8 +2014-04-09T23:24:16.198-0400 [clientcursormon] replication threads:32 +2014-04-09T23:24:16.758-0400 [initandlisten] connection accepted from 10.0.1.135:52911 #19 (9 connections now open) +2014-04-09T23:24:16.758-0400 [conn19] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:24:16.836-0400 [initandlisten] connection accepted from 10.0.1.135:52927 #20 (10 connections now open) +2014-04-09T23:24:16.836-0400 [conn20] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:24:32.306-0400 [conn17] end connection 10.0.1.135:52899 (9 connections now open) +2014-04-09T23:24:32.307-0400 [initandlisten] connection accepted from 10.0.1.135:52940 #21 (10 connections now open) +2014-04-09T23:24:32.308-0400 [conn21] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:24:32.358-0400 [conn18] end connection 10.0.1.135:52900 (9 connections now open) +2014-04-09T23:24:32.358-0400 [initandlisten] connection accepted from 10.0.1.135:52941 #22 (10 connections now open) +2014-04-09T23:24:32.359-0400 [conn22] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:25:02.319-0400 [conn21] end connection 10.0.1.135:52940 (9 connections now open) +2014-04-09T23:25:02.319-0400 [initandlisten] connection accepted from 10.0.1.135:52952 #23 (10 connections now open) +2014-04-09T23:25:02.320-0400 [conn23] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:25:02.370-0400 [conn22] end connection 10.0.1.135:52941 (9 connections now open) +2014-04-09T23:25:02.371-0400 [initandlisten] connection accepted from 10.0.1.135:52953 #24 (10 connections now open) +2014-04-09T23:25:02.371-0400 [conn24] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:25:32.339-0400 [conn23] end connection 10.0.1.135:52952 (9 connections now open) +2014-04-09T23:25:32.339-0400 [initandlisten] connection accepted from 10.0.1.135:52964 #25 (11 connections now open) +2014-04-09T23:25:32.340-0400 [conn25] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:25:32.391-0400 [conn24] end connection 10.0.1.135:52953 (9 connections now open) +2014-04-09T23:25:32.392-0400 [initandlisten] connection accepted from 10.0.1.135:52965 #26 (10 connections now open) +2014-04-09T23:25:32.392-0400 [conn26] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:25:32.422-0400 [initandlisten] connection accepted from 10.0.1.135:52966 #27 (11 connections now open) +2014-04-09T23:25:32.422-0400 [conn27] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:25:32.425-0400 [conn27] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers +2014-04-09T23:25:32.500+0000 [conn3434] query toothsome.dimgray.huckleberry query: { cid: ObjectId('50b48019a8e0e6aa50be060e') } planSummary: IXSCAN { cid: 1 } ntoreturn:0 ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:637 nreturned:51 reslen:9887 0ms +2014-04-09T23:25:32.501+0000 [conn4209] query toothsome.dimgray.jambul query: { _id: ObjectId('5371ee419194bf134f7a34d0') } planSummary: IDHACK ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:53 nreturned:1 reslen:836 0ms +2014-04-09T23:25:32.502+0000 [conn3355] query toothsome.dimgray.cranberry query: { cid: ObjectId('535c8bc39194bf134f7757c0') } planSummary: IXSCAN { cid: 1 } ntoreturn:20 ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:793 nreturned:0 reslen:20 0ms +2014-04-09T23:25:32.503+0000 [conn1112] command admin.$cmd command: replSetUpdatePosition { replSetUpdatePosition: 1, optimes: [ { _id: ObjectId('507109f41d535b2d6303250a'), optime: Timestamp 1403491199000|904, config: { _id: 5, host: a2f444e5356783e874610f0db364e614 } }, { _id: ObjectId('533f7599c518384d6bac172a'), optime: Timestamp 1403491199000|903, config: { _id: 7, host: 58b1f6ae4c71a21cb350b00a3df8dfbd } }, { _id: ObjectId('5340467fc84d93219d11da8f'), optime: Timestamp 1403491199000|907, config: { _id: 8, host: 8eeacf41d74c678adc52946c74876cea, priority: 5.0 } }, { _id: ObjectId('5342d40c9194bf134f7407c7'), optime: Timestamp 1403490939000|99, config: { _id: 10, host: fed904eba15f0d8f6c8bf16bd89abb88, priority: 10.0 } } ] } ntoreturn:1 keyUpdates:0 numYields:0 reslen:37 0ms +2014-04-09T23:25:32.504+0000 [conn4492] query toothsome.dimgray.gooseberry query: { $query: { groupId: ObjectId('51c44d167fe227e9f18ce94a'), active: true }, $orderby: { name: 1 } } planSummary: IXSCAN { groupId: 1, name: 1 } ntoreturn:20 ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:1353 nreturned:1 reslen:1125 1ms +2014-04-09T23:25:32.505+0000 [conn3297] query toothsome.dimgray.honeydew query: { burlywood.raisin: 63a7dc66fcdcf22f4b4f6adba20458fc } planSummary: IXSCAN { burlywood.raisin: 1 } ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:169 nreturned:1 reslen:1295 0ms +2014-04-09T23:25:32.506+0000 [conn3067] update toothsome.dimgray.huckleberry query: { _id: 0bcaf8127fb736e69893550ee650920e } update: { $set: { cid: ObjectId('503ead0bf1a5dd0b002fb069'), n: b84afcc70af7c1ddceca1004352d7569, p: 27017, hid: b35b2aa83ecd70f0eb7d183d37a867f7, lu: 1403491195955, uniqueHash: d042393cf56eb50c9d15762451857760 } } nscanned:1 nscannedObjects:1 nMatched:1 nModified:1 fastmod:1 keyUpdates:0 numYields:0 locks(micros) w:156 294ms +2014-04-09T23:25:32.507+0000 [conn3262] query toothsome.dimgray.jambul query: { _id: ObjectId('5216f42a7ec5df2d7bff5b92') } planSummary: IDHACK ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:73 nreturned:1 reslen:38 0ms +2014-04-09T23:25:32.508+0000 [conn3067] command toothsome.$cmd command: update { update: cf61573f3f20d6df92095da0475f1d9e, ordered: true, updates: [ { q: { _id: 0bcaf8127fb736e69893550ee650920e }, u: { $set: { cid: ObjectId('503ead0bf1a5dd0b002fb069'), n: b84afcc70af7c1ddceca1004352d7569, p: 27017, hid: b35b2aa83ecd70f0eb7d183d37a867f7, lu: 1403491195955, uniqueHash: d042393cf56eb50c9d15762451857760 } }, upsert: true } ] } keyUpdates:0 numYields:0 reslen:95 294ms +2014-04-09T23:25:32.509+0000 [conn1112] command admin.$cmd command: replSetUpdatePosition { replSetUpdatePosition: 1, optimes: [ { _id: ObjectId('507109f41d535b2d6303250a'), optime: Timestamp 1403491199000|904, config: { _id: 5, host: a2f444e5356783e874610f0db364e614 } }, { _id: ObjectId('533f7599c518384d6bac172a'), optime: Timestamp 1403491199000|903, config: { _id: 7, host: 58b1f6ae4c71a21cb350b00a3df8dfbd } }, { _id: ObjectId('5340467fc84d93219d11da8f'), optime: Timestamp 1403491200000|1, config: { _id: 8, host: 8eeacf41d74c678adc52946c74876cea, priority: 5.0 } }, { _id: ObjectId('5342d40c9194bf134f7407c7'), optime: Timestamp 1403490939000|99, config: { _id: 10, host: fed904eba15f0d8f6c8bf16bd89abb88, priority: 10.0 } } ] } ntoreturn:1 keyUpdates:0 numYields:0 reslen:37 0ms +2014-04-09T23:25:32.510+0000 [conn3259] query toothsome.dimgray.jambul query: { _id: ObjectId('51f2e8de7ec5df2d7bf9e8ee') } planSummary: IDHACK ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:79 nreturned:1 reslen:38 0ms +2014-04-09T23:25:32.511+0000 [conn2367] getmore local.oplog.rs cursorid:537001767503 ntoreturn:0 keyUpdates:0 numYields:0 locks(micros) r:37 nreturned:1 reslen:209 2ms +2014-04-09T23:25:32.512+0000 [conn3466] command toothsome.$cmd command: update { $msg: 70f2c4e066bff1d86b0ab4fea0c54fd3 } keyUpdates:0 numYields:0 reslen:95 300ms +2014-04-09T23:25:32.513+0000 [conn1112] command admin.$cmd command: replSetUpdatePosition { replSetUpdatePosition: 1, optimes: [ { _id: ObjectId('507109f41d535b2d6303250a'), optime: Timestamp 1403491199000|904, config: { _id: 5, host: a2f444e5356783e874610f0db364e614 } }, { _id: ObjectId('533f7599c518384d6bac172a'), optime: Timestamp 1403491199000|907, config: { _id: 7, host: 58b1f6ae4c71a21cb350b00a3df8dfbd } }, { _id: ObjectId('5340467fc84d93219d11da8f'), optime: Timestamp 1403491200000|1, config: { _id: 8, host: 8eeacf41d74c678adc52946c74876cea, priority: 5.0 } }, { _id: ObjectId('5342d40c9194bf134f7407c7'), optime: Timestamp 1403490939000|99, config: { _id: 10, host: fed904eba15f0d8f6c8bf16bd89abb88, priority: 10.0 } } ] } ntoreturn:1 keyUpdates:0 numYields:0 reslen:37 0ms +2014-04-09T23:25:32.514+0000 [conn4092] query toothsome.dimgray.bell.pepper query: { cid: ObjectId('536080589194bf134f77929b') } planSummary: IXSCAN { cid: 1, ht: 1 } ntoreturn:20 ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:1006 nreturned:2 reslen:1794 1ms +2014-04-09T23:25:32.515+0000 [conn4157] query toothsome.dimgray.jambul query: { k: f403383e2dc974bd876df25ac068232d } planSummary: IXSCAN { k: 1 } ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:155 nreturned:1 reslen:864 0ms +2014-04-09T23:25:32.516+0000 [conn4259] query toothsome.dimgray.bell.pepper query: { _id: 620d11743f2a2319e8a410615c02da21 } planSummary: IDHACK ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:50 nreturned:0 reslen:20 0ms +2014-04-09T23:25:32.517+0000 [conn4097] query toothsome.dimgray.bell.pepper query: { _id: e1e3d8e1f572a5adc97fc1d83ce02531 } planSummary: IDHACK ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:57 nreturned:0 reslen:20 0ms +2014-04-09T23:25:32.518+0000 [conn3502] query toothsome.dimgray.pear query: { cid: ObjectId('526f93647ec5df2d7b0a0ea1') } planSummary: IXSCAN { cid: 1 } ntoreturn:20 ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:137 nreturned:0 reslen:20 0ms +2014-04-09T23:25:32.519+0000 [conn1112] command admin.$cmd command: replSetUpdatePosition { replSetUpdatePosition: 1, optimes: [ { _id: ObjectId('507109f41d535b2d6303250a'), optime: Timestamp 1403491199000|907, config: { _id: 5, host: a2f444e5356783e874610f0db364e614 } }, { _id: ObjectId('533f7599c518384d6bac172a'), optime: Timestamp 1403491199000|907, config: { _id: 7, host: 58b1f6ae4c71a21cb350b00a3df8dfbd } }, { _id: ObjectId('5340467fc84d93219d11da8f'), optime: Timestamp 1403491200000|1, config: { _id: 8, host: 8eeacf41d74c678adc52946c74876cea, priority: 5.0 } }, { _id: ObjectId('5342d40c9194bf134f7407c7'), optime: Timestamp 1403490939000|99, config: { _id: 10, host: fed904eba15f0d8f6c8bf16bd89abb88, priority: 10.0 } } ] } ntoreturn:1 keyUpdates:0 numYields:0 reslen:37 0ms +2014-04-09T23:25:32.520+0000 [conn1111] getmore local.oplog.rs cursorid:537932004656 ntoreturn:0 keyUpdates:0 numYields:0 locks(micros) r:38 nreturned:1 reslen:209 2ms +2014-04-09T23:25:32.521+0000 [conn4183] query toothsome.dimgray.jambul query: { _id: ObjectId('53443e5f9194bf134f7408fc') } planSummary: IDHACK ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:66 nreturned:1 reslen:38 0ms +2014-04-09T23:25:32.522+0000 [conn3053] query toothsome.dimgray.jambul query: { _id: ObjectId('503c9e3bf1a5dd0b002faf17') } planSummary: IDHACK ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:61 nreturned:1 reslen:42 0ms +2014-04-09T23:25:32.523+0000 [conn4259] query toothsome.dimgray.bell.pepper query: { _id: c9bda22779f6cef88c25453c061a140a } planSummary: IDHACK ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:43 nreturned:0 reslen:20 0ms +2014-04-09T23:25:32.524+0000 [conn3297] query toothsome.dimgray.jambul query: { _id: ObjectId('4e9c5b14ae6429bfa40fcac1') } planSummary: IDHACK ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:50 nreturned:1 reslen:1547 0ms +2014-04-09T23:25:32.525+0000 [conn4215] query toothsome.dimgray.tangerine query: { cid: ObjectId('4eada187ae6429bfa40fee65') } planSummary: IXSCAN { cid: 1 } ntoreturn:0 ntoskip:0 keyUpdates:0 numYields:0 locks(micros) r:421 nreturned:5 reslen:841 0ms + +2014-04-09T23:25:32.426-0400 [initandlisten] connection accepted from 10.0.1.135:52967 #28 (12 connections now open) +2014-04-09T23:25:32.428-0400 [conn28] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:25:42.054-0400 [initandlisten] connection accepted from 10.0.1.135:52980 #29 (13 connections now open) +2014-04-09T23:25:42.054-0400 [conn29] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:25:42.070-0400 [initandlisten] connection accepted from 10.0.1.135:52985 #30 (14 connections now open) +2014-04-09T23:25:42.071-0400 [conn30] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:25:42.071-0400 [conn30] remote client 10.0.1.135:52985 initialized this host as shard shard01 +2014-04-09T23:25:42.071-0400 [conn30] remotely refreshing metadata for test.docs with requested shard version 2|26||53460dbe4aaa0fc95616708e, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 +2014-04-09T23:25:42.072-0400 [conn30] SyncClusterConnection connecting to [enter.local:27025] +2014-04-09T23:25:42.072-0400 [conn30] SyncClusterConnection connecting to [enter.local:27026] +2014-04-09T23:25:42.072-0400 [conn30] SyncClusterConnection connecting to [enter.local:27027] +2014-04-09T23:25:42.075-0400 [conn30] collection test.docs was previously unsharded, new metadata loaded with shard version 2|26||53460dbe4aaa0fc95616708e +2014-04-09T23:25:42.075-0400 [conn30] collection version was loaded at version 2|51||53460dbe4aaa0fc95616708e, took 3ms +2014-04-09T23:26:02.356-0400 [conn25] end connection 10.0.1.135:52964 (13 connections now open) +2014-04-09T23:26:02.356-0400 [initandlisten] connection accepted from 10.0.1.135:52996 #31 (15 connections now open) +2014-04-09T23:26:02.357-0400 [conn31] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:26:02.411-0400 [conn26] end connection 10.0.1.135:52965 (13 connections now open) +2014-04-09T23:26:02.412-0400 [initandlisten] connection accepted from 10.0.1.135:52997 #32 (15 connections now open) +2014-04-09T23:26:02.413-0400 [conn32] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:26:32.377-0400 [conn31] end connection 10.0.1.135:52996 (13 connections now open) +2014-04-09T23:26:32.378-0400 [initandlisten] connection accepted from 10.0.1.135:53008 #33 (15 connections now open) +2014-04-09T23:26:32.378-0400 [conn33] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:26:32.430-0400 [conn32] end connection 10.0.1.135:52997 (13 connections now open) +2014-04-09T23:26:32.431-0400 [initandlisten] connection accepted from 10.0.1.135:53009 #34 (15 connections now open) +2014-04-09T23:26:32.432-0400 [conn34] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:27:02.396-0400 [conn33] end connection 10.0.1.135:53008 (13 connections now open) +2014-04-09T23:27:02.397-0400 [initandlisten] connection accepted from 10.0.1.135:53020 #35 (15 connections now open) +2014-04-09T23:27:02.398-0400 [conn35] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:27:02.450-0400 [conn34] end connection 10.0.1.135:53009 (13 connections now open) +2014-04-09T23:27:02.451-0400 [initandlisten] connection accepted from 10.0.1.135:53021 #36 (15 connections now open) +2014-04-09T23:27:02.452-0400 [conn36] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:27:32.415-0400 [conn35] end connection 10.0.1.135:53020 (13 connections now open) +2014-04-09T23:27:32.415-0400 [initandlisten] connection accepted from 10.0.1.135:53032 #37 (15 connections now open) +2014-04-09T23:27:32.416-0400 [conn37] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:27:32.471-0400 [conn36] end connection 10.0.1.135:53021 (13 connections now open) +2014-04-09T23:27:32.471-0400 [initandlisten] connection accepted from 10.0.1.135:53033 #38 (14 connections now open) +2014-04-09T23:27:32.472-0400 [conn38] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:28:02.437-0400 [conn37] end connection 10.0.1.135:53032 (13 connections now open) +2014-04-09T23:28:02.438-0400 [initandlisten] connection accepted from 10.0.1.135:53044 #39 (15 connections now open) +2014-04-09T23:28:02.439-0400 [conn39] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:28:02.493-0400 [conn38] end connection 10.0.1.135:53033 (13 connections now open) +2014-04-09T23:28:02.494-0400 [initandlisten] connection accepted from 10.0.1.135:53045 #40 (15 connections now open) +2014-04-09T23:28:02.495-0400 [conn40] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:28:32.461-0400 [conn39] end connection 10.0.1.135:53044 (13 connections now open) +2014-04-09T23:28:32.462-0400 [initandlisten] connection accepted from 10.0.1.135:53062 #41 (15 connections now open) +2014-04-09T23:28:32.462-0400 [conn41] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:28:32.516-0400 [conn40] end connection 10.0.1.135:53045 (13 connections now open) +2014-04-09T23:28:32.517-0400 [initandlisten] connection accepted from 10.0.1.135:53063 #42 (15 connections now open) +2014-04-09T23:28:32.517-0400 [conn42] authenticate db: local { authenticate: 1, nonce: "xxx", user: "__system", key: "xxx" } +2014-04-09T23:28:37.998-0400 [initandlisten] connection accepted from 127.0.0.1:53069 #43 (15 connections now open) +2014-04-09T23:28:37.999-0400 [conn43] end connection 127.0.0.1:53069 (14 connections now open) +2014-04-09T23:28:38.010-0400 [initandlisten] connection accepted from 127.0.0.1:53078 #44 (15 connections now open) +2014-04-09T23:28:38.017-0400 [initandlisten] connection accepted from 10.0.1.135:53080 #45 (16 connections now open) +2014-04-09T23:28:38.019-0400 [conn44] end connection 127.0.0.1:53078 (15 connections now open) +2014-04-09T23:28:38.024-0400 [conn45] end connection 10.0.1.135:53080 (14 connections now open) +2014-04-09T23:28:38.032-0400 [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends +2014-04-09T23:28:38.034-0400 [signalProcessingThread] now exiting +2014-04-09T23:28:38.034-0400 [signalProcessingThread] dbexit: +2014-04-09T23:28:38.036-0400 [signalProcessingThread] shutdown: going to close listening sockets... +2014-04-09T23:28:38.036-0400 [signalProcessingThread] closing listening socket: 10 +2014-04-09T23:28:38.038-0400 [conn30] end connection 10.0.1.135:52985 (13 connections now open) +2014-04-09T23:28:38.038-0400 [conn29] end connection 10.0.1.135:52980 (13 connections now open) +2014-04-09T23:28:38.038-0400 [conn27] end connection 10.0.1.135:52966 (13 connections now open) +2014-04-09T23:28:38.038-0400 [conn19] end connection 10.0.1.135:52911 (12 connections now open) +2014-04-09T23:28:38.038-0400 [conn2] end connection 10.0.1.135:52809 (12 connections now open) +2014-04-09T23:28:38.039-0400 [conn4] end connection 10.0.1.135:52832 (12 connections now open) +2014-04-09T23:28:38.039-0400 [conn20] end connection 10.0.1.135:52927 (12 connections now open) +2014-04-09T23:28:38.039-0400 [signalProcessingThread] closing listening socket: 11 +2014-04-09T23:28:38.045-0400 [signalProcessingThread] removing socket file: /tmp/mongodb-27019.sock +2014-04-09T23:28:38.046-0400 [signalProcessingThread] shutdown: going to flush diaglog... +2014-04-09T23:28:38.046-0400 [signalProcessingThread] shutdown: going to close sockets... +2014-04-09T23:28:38.046-0400 [conn14] end connection 10.0.1.135:52881 (6 connections now open) +2014-04-09T23:28:38.046-0400 [signalProcessingThread] shutdown: waiting for fs preallocator... +2014-04-09T23:28:38.046-0400 [conn42] end connection 10.0.1.135:53063 (6 connections now open) +2014-04-09T23:28:38.050-0400 [signalProcessingThread] shutdown: lock for final commit... +2014-04-09T23:28:38.051-0400 [conn41] end connection 10.0.1.135:53062 (5 connections now open) +2014-04-09T23:28:38.059-0400 [conn12] end connection 10.0.1.135:52879 (5 connections now open) +2014-04-09T23:28:38.059-0400 [signalProcessingThread] shutdown: final commit... +2014-04-09T23:28:38.073-0400 [signalProcessingThread] shutdown: closing all files... +2014-04-09T23:28:38.074-0400 [signalProcessingThread] closeAllFiles() finished +2014-04-09T23:28:38.074-0400 [signalProcessingThread] journalCleanup... +2014-04-09T23:28:38.075-0400 [signalProcessingThread] removeJournalFiles +2014-04-09T23:28:38.075-0400 [signalProcessingThread] shutdown: removing fs lock... +2014-04-09T23:28:38.076-0400 [signalProcessingThread] dbexit: really exiting now diff --git a/mtools/test/logfiles/mongos.log b/mtools/test/logfiles/mongos.log new file mode 100644 index 00000000..85b6c0fb --- /dev/null +++ b/mtools/test/logfiles/mongos.log @@ -0,0 +1,72 @@ +Tue May 20 19:07:39.959 [mongosMain] MongoS version 2.4.7 starting: pid=36561 port=27017 64-bit host=jimoleary.local (--help for usage) +Tue May 20 19:07:39.960 [mongosMain] git version: 0161738abf06c1f067b56a465b706efd6f4bf2aa +Tue May 20 19:07:39.960 [mongosMain] build info: Darwin bs-osx-106-x86-64-2.10gen.cc 10.8.0 Darwin Kernel Version 10.8.0: Tue Jun 7 16:32:41 PDT 2011; root:xnu-1504.15.3~1/RELEASE_X86_64 x86_64 BOOST_LIB_VERSION=1_49 +Tue May 20 19:07:39.960 [mongosMain] options: { configdb: "jimoleary.local:27019", fork: true, logappend: true, logpath: "/Users/joleary/tmp/data/mongos.log", port: 27017 } +Tue May 20 19:07:39.972 [LockPinger] creating distributed lock ping thread for jimoleary.local:27019 and process jimoleary.local:27017:1400609259:16807 (sleeping for 30000ms) +Tue May 20 19:07:40.101 [mongosMain] distributed lock 'configUpgrade/jimoleary.local:27017:1400609259:16807' acquired, ts : 537b99eb5d0323897bae576c +Tue May 20 19:07:40.110 [mongosMain] starting upgrade of config server from v0 to v4 +Tue May 20 19:07:40.130 [mongosMain] starting next upgrade step from v0 to v4 +Tue May 20 19:07:40.130 [mongosMain] about to log new metadata event: { _id: "jimoleary.local-2014-05-20T18:07:40-537b99ec5d0323897bae576d", server: "jimoleary.local", clientAddr: "N/A", time: new Date(1400609260130), what: "starting upgrade of config database", ns: "config.version", details: { from: 0, to: 4 } } +Tue May 20 19:07:40.131 [mongosMain] writing initial config version at v4 +Tue May 20 19:07:40.133 [mongosMain] about to log new metadata event: { _id: "jimoleary.local-2014-05-20T18:07:40-537b99ec5d0323897bae576f", server: "jimoleary.local", clientAddr: "N/A", time: new Date(1400609260133), what: "finished upgrade of config database", ns: "config.version", details: { from: 0, to: 4 } } +Tue May 20 19:07:40.134 [mongosMain] upgrade of config server to v4 successful +Tue May 20 19:07:40.134 [mongosMain] distributed lock 'configUpgrade/jimoleary.local:27017:1400609259:16807' unlocked. +Tue May 20 19:07:40.135 [Balancer] about to contact config servers and shards +Tue May 20 19:07:40.135 [websvr] admin web console waiting for connections on port 28017 +Tue May 20 19:07:40.136 [mongosMain] waiting for connections on port 27017 +Tue May 20 19:07:40.138 [mongosMain] connection accepted from 127.0.0.1:55552 #1 (1 connection now open) +Tue May 20 19:07:40.166 [Balancer] config servers and shards contacted successfully +Tue May 20 19:07:40.166 [Balancer] balancer id: jimoleary.local:27017 started at May 20 19:07:40 +Tue May 20 19:07:40.171 [conn1] couldn't find database [admin] in config db +Tue May 20 19:07:40.191 [conn1] put [admin] on: config:jimoleary.local:27019 +Tue May 20 19:07:40.192 [mongosMain] connection accepted from 127.0.0.1:55554 #2 (2 connections now open) +Tue May 20 19:07:40.198 [Balancer] distributed lock 'balancer/jimoleary.local:27017:1400609259:16807' acquired, ts : 537b99ec5d0323897bae5771 +Tue May 20 19:07:40.199 [Balancer] distributed lock 'balancer/jimoleary.local:27017:1400609259:16807' unlocked. +Tue May 20 19:07:40.200 [conn2] creating WriteBackListener for: jimoleary.local:27019 serverID: 537b99ec5d0323897bae5770 +Tue May 20 19:07:40.200 [conn1] end connection 127.0.0.1:55552 (1 connection now open) +Tue May 20 19:07:40.209 [conn2] starting new replica set monitor for replica set shard01 with seed of jimoleary.local:27018 +Tue May 20 19:07:40.210 [conn2] successfully connected to seed jimoleary.local:27018 for replica set shard01 +Tue May 20 19:07:40.210 [conn2] warning: node: jimoleary.local:27018 isn't a part of set: shard01 ismaster: { ismaster: false, secondary: false, info: "Received replSetInitiate - should come online shortly.", isreplicaset: true, maxBsonObjectSize: 16777216, maxMessageSizeBytes: 48000000, localTime: new Date(1400609260210), ok: 1.0 } +Tue May 20 19:07:42.212 [conn2] warning: No primary detected for set shard01 +Tue May 20 19:07:42.212 [conn2] All nodes for set shard01 are down. This has happened for 1 checks in a row. Polling will stop after 29 more failed checks +Tue May 20 19:07:42.212 [conn2] replica set monitor for replica set shard01 started, address is shard01/ +Tue May 20 19:07:42.212 [ReplicaSetMonitorWatcher] starting +Tue May 20 19:07:42.220 [conn2] deleting replica set monitor for: shard01/ +Tue May 20 19:07:42.220 [conn2] addshard request { addShard: "shard01/jimoleary.local:27018" } failed: couldn't connect to new shard socket exception [CONNECT_ERROR] for shard01/jimoleary.local:27018 +Tue May 20 19:07:43.222 [conn2] starting new replica set monitor for replica set shard01 with seed of jimoleary.local:27018 +Tue May 20 19:07:43.223 [conn2] successfully connected to seed jimoleary.local:27018 for replica set shard01 +Tue May 20 19:07:43.223 [conn2] warning: node: jimoleary.local:27018 isn't a part of set: shard01 ismaster: { ismaster: false, secondary: false, info: "Received replSetInitiate - should come online shortly.", isreplicaset: true, maxBsonObjectSize: 16777216, maxMessageSizeBytes: 48000000, localTime: new Date(1400609263223), ok: 1.0 } +Tue May 20 19:07:45.225 [conn2] warning: No primary detected for set shard01 +Tue May 20 19:07:45.225 [conn2] All nodes for set shard01 are down. This has happened for 1 checks in a row. Polling will stop after 29 more failed checks +Tue May 20 19:07:45.225 [conn2] replica set monitor for replica set shard01 started, address is shard01/ +Tue May 20 19:07:45.225 [conn2] deleting replica set monitor for: shard01/ +Tue May 20 19:07:45.225 [conn2] addshard request { addShard: "shard01/jimoleary.local:27018" } failed: couldn't connect to new shard socket exception [CONNECT_ERROR] for shard01/jimoleary.local:27018 +Tue May 20 19:07:46.201 [Balancer] distributed lock 'balancer/jimoleary.local:27017:1400609259:16807' acquired, ts : 537b99f25d0323897bae5772 +Tue May 20 19:07:46.202 [Balancer] distributed lock 'balancer/jimoleary.local:27017:1400609259:16807' unlocked. +Tue May 20 19:07:46.227 [conn2] starting new replica set monitor for replica set shard01 with seed of jimoleary.local:27018 +Tue May 20 19:07:46.228 [conn2] successfully connected to seed jimoleary.local:27018 for replica set shard01 +Tue May 20 19:07:46.228 [conn2] warning: node: jimoleary.local:27018 isn't a part of set: shard01 ismaster: { ismaster: false, secondary: false, info: "Received replSetInitiate - should come online shortly.", isreplicaset: true, maxBsonObjectSize: 16777216, maxMessageSizeBytes: 48000000, localTime: new Date(1400609266228), ok: 1.0 } +Tue May 20 19:07:48.230 [conn2] warning: No primary detected for set shard01 +Tue May 20 19:07:48.230 [conn2] All nodes for set shard01 are down. This has happened for 1 checks in a row. Polling will stop after 29 more failed checks +Tue May 20 19:07:48.230 [conn2] replica set monitor for replica set shard01 started, address is shard01/ +Tue May 20 19:07:48.230 [conn2] deleting replica set monitor for: shard01/ +Tue May 20 19:07:48.230 [conn2] addshard request { addShard: "shard01/jimoleary.local:27018" } failed: couldn't connect to new shard socket exception [CONNECT_ERROR] for shard01/jimoleary.local:27018 +Tue May 20 19:07:49.232 [conn2] starting new replica set monitor for replica set shard01 with seed of jimoleary.local:27018 +Tue May 20 19:07:49.233 [conn2] successfully connected to seed jimoleary.local:27018 for replica set shard01 +Tue May 20 19:07:49.233 [conn2] changing hosts to { 0: "jimoleary.local:27018" } from shard01/ +Tue May 20 19:07:49.234 [conn2] trying to add new host jimoleary.local:27018 to replica set shard01 +Tue May 20 19:07:49.234 [conn2] successfully connected to new host jimoleary.local:27018 in replica set shard01 +Tue May 20 19:07:50.236 [conn2] ReplicaSetMonitor::_checkConnection: jimoleary.local:27018 { setName: "shard01", ismaster: false, secondary: true, hosts: [ "jimoleary.local:27018" ], me: "jimoleary.local:27018", maxBsonObjectSize: 16777216, maxMessageSizeBytes: 48000000, localTime: new Date(1400609270236), ok: 1.0 } +Tue May 20 19:07:51.237 [conn2] warning: No primary detected for set shard01 +Tue May 20 19:07:51.237 [conn2] replica set monitor for replica set shard01 started, address is shard01/jimoleary.local:27018 +Tue May 20 19:07:51.238 [conn2] Primary for replica set shard01 changed to jimoleary.local:27018 +Tue May 20 19:07:51.246 [conn2] going to add shard: { _id: "shard01", host: "shard01/jimoleary.local:27018" } +Tue May 20 19:07:52.204 [Balancer] distributed lock 'balancer/jimoleary.local:27017:1400609259:16807' acquired, ts : 537b99f85d0323897bae5773 +Tue May 20 19:07:52.205 [Balancer] distributed lock 'balancer/jimoleary.local:27017:1400609259:16807' unlocked. +Tue May 20 19:07:52.249 [mongosMain] connection accepted from 127.0.0.1:55564 #3 (2 connections now open) +Tue May 20 19:07:52.250 [conn3] end connection 127.0.0.1:55564 (1 connection now open) +Tue May 20 19:07:52.251 [mongosMain] connection accepted from 127.0.0.1:55567 #4 (2 connections now open) +Tue May 20 19:07:52.252 [conn4] end connection 127.0.0.1:55567 (1 connection now open) +Tue May 20 19:07:52.269 [conn2] end connection 127.0.0.1:55554 (0 connections now open) +Tue May 20 19:07:58.207 [Balancer] distributed lock 'balancer/jimoleary.local:27017:1400609259:16807' acquired, ts : 537b99fe5d0323897bae5774 +Tue May 20 19:07:58.207 [Balancer] distributed lock 'balancer/jimoleary.local:27017:1400609259:16807' unlocked. diff --git a/mtools/test/test_all_import.py b/mtools/test/test_all_import.py index 8f09a5d5..3648b86d 100644 --- a/mtools/test/test_all_import.py +++ b/mtools/test/test_all_import.py @@ -46,6 +46,6 @@ def new_func(): def test_import_all(): """ Import all tools from mtools module. The tools that have external dependencies will only be imported if the dependencies are fulfilled. - This test just passes by defaultbecause the imports are tested implicitly by loading this file. + This test just passes by default because the imports are tested implicitly by loading this file. """ pass diff --git a/mtools/test/test_mlaunch.py b/mtools/test/test_mlaunch.py index c8fbed75..d94ac58e 100644 --- a/mtools/test/test_mlaunch.py +++ b/mtools/test/test_mlaunch.py @@ -15,6 +15,8 @@ from nose.plugins.attrib import attr from nose.plugins.skip import Skip, SkipTest +# temporarily skipping mlaunch tests until issues are sorted out +raise SkipTest class TestMLaunch(object): """ This class tests functionality around the mlaunch tool. It has some diff --git a/mtools/test/test_mlogfilter.py b/mtools/test/test_mlogfilter.py index d609bad8..17e55ea8 100644 --- a/mtools/test/test_mlogfilter.py +++ b/mtools/test/test_mlogfilter.py @@ -79,7 +79,7 @@ def test_from_to_26_log(self): logfile_26 = LogFile(open(logfile_26_path, 'r')) random_start = random_date(logfile_26.start, logfile_26.end) - random_end = random_date(random_start, logfile_26.end) + random_end = random_date(random_start+timedelta(minutes=1), logfile_26.end+timedelta(minutes=1)) print random_start, random_end print logfile_26.start, logfile_26.end @@ -215,12 +215,43 @@ def test_accept_nodate(self): lines = output.splitlines() assert any('=== a line without a datetime ===' in l for l in lines) - def test_thread(self): - self.tool.run('%s --thread initandlisten'%self.logfile_path) + def _test_thread(self,path,thread): + self.tool.run('%s --thread mongosMain'%path ) output = sys.stdout.getvalue() for line in output.splitlines(): le = LogEvent(line) - assert(le.thread == 'initandlisten') + assert(le.thread == thread) + md = re.match("^.* connection accepted from [0-9\.:]+ #(?P[0-9]+) " , le.line_str) + if md is None: + assert(le.conn is None) + else: + assert(le.conn == "conn" + md.group('conn')) + + def _test_thread_conn1(self,path,thread): + self.tool.run('%s --thread conn1'%path ) + output = sys.stdout.getvalue() + for line in output.splitlines(): + le = LogEvent(line) + md = re.match("^.* connection accepted from [0-9\.:]+ #(?P[0-9]+) " , le.line_str) + assert(le.conn == 'conn1') + if md is None: + assert(le.thread == 'conn1') + else: + assert(le.thread == thread) + + def test_thread(self): + self._test_thread(self.logfile_path,'initandlisten') + + def test_thread_conn1(self): + self._test_thread_conn1(self.logfile_path,'initandlisten') + + def test_thread_mongos(self): + mongos_path = os.path.join(os.path.dirname(mtools.__file__), 'test/logfiles/', 'mongos.log') + self._test_thread(mongos_path,'mongosMain') + + def test_thread_mongos_conn1(self): + mongos_path = os.path.join(os.path.dirname(mtools.__file__), 'test/logfiles/', 'mongos.log') + self._test_thread_conn1(mongos_path,'mongosMain') def test_no_timestamp_format(self): self.tool.run('%s --timestamp-format none --timezone 5'%self.logfile_path) diff --git a/mtools/test/test_mloginfo.py b/mtools/test/test_mloginfo.py index 1d8d89d9..faee0b56 100644 --- a/mtools/test/test_mloginfo.py +++ b/mtools/test/test_mloginfo.py @@ -45,6 +45,7 @@ def test_basic(self): results[key.strip()] = val.strip() assert results['source'].endswith('mongod_225.log') + assert results['host'] == 'capslock.local:27017' assert results['start'].endswith('Aug 05 20:21:42') assert results['end'].endswith('Aug 05 21:04:52') assert results['date format'] == 'ctime-pre2.4' @@ -115,3 +116,46 @@ def test_restarts_output(self): lines = output.splitlines() assert any(map(lambda line: 'RESTARTS' in line, lines)) assert any(map(lambda line: 'version 2.2.5' in line, lines)) + + + def test_corrupt(self): + # load different logfile + logfile_path = os.path.join(os.path.dirname(mtools.__file__), 'test/logfiles/', 'mongod_26_corrupt.log') + self.tool.run('%s --queries' % logfile_path) + + output = sys.stdout.getvalue() + lines = output.splitlines() + assert any(map(lambda line: 'QUERIES' in line, lines)) + assert any(map(lambda line: line.startswith('namespace'), lines)) + + assert len(filter(lambda line: re.match(r'\w+\.\w+\.\w+\s+{', line), lines)) >= 1 + + + def test_rsstate_225(self): + pattern = r'^Aug 05' + expected = 13 + self._test_rsstate(self.logfile_path, pattern, expected) + + + def test_rsstate_26(self): + logfile_path = os.path.join(os.path.dirname(mtools.__file__), 'test/logfiles/', 'mongod_26.log') + pattern = r'^Apr 09' + expected = 17 + self._test_rsstate(logfile_path, pattern, expected) + + + def test_rsstate_mongos(self): + # different log file + logfile_path = os.path.join(os.path.dirname(mtools.__file__), 'test/logfiles/', 'mongos.log') + pattern = r' no rs state changes found' + expected = 1 + self._test_rsstate(logfile_path, pattern, expected) + + + def _test_rsstate(self, logfile_path, pattern, expected): + """ utility test runner for rsstate + """ + self.tool.run('%s --rsstate' % logfile_path) + output = sys.stdout.getvalue() + lines = output.splitlines() + assert len(filter(lambda line: re.match(pattern, line), lines)) == expected diff --git a/mtools/test/test_util_logfile.py b/mtools/test/test_util_logfile.py index 5dc22a74..6402842b 100644 --- a/mtools/test/test_util_logfile.py +++ b/mtools/test/test_util_logfile.py @@ -56,3 +56,22 @@ def test_rollover_detection(self): logfile = LogFile(self.file_year_rollover) assert logfile.datetime_format == "ctime" assert logfile.year_rollover == logfile.end + + + def test_hostname_port(self): + # mongod + logfile_path = os.path.join(os.path.dirname(mtools.__file__), 'test/logfiles/', 'mongod_26.log') + mongod_26 = open(logfile_path, 'r') + + logfile = LogFile(mongod_26) + assert logfile.hostname == 'enter.local' + assert logfile.port == '27019' + + # mongos + logfile_path = os.path.join(os.path.dirname(mtools.__file__), 'test/logfiles/', 'mongos.log') + mongos = open(logfile_path, 'r') + + logfile2 = LogFile(mongos) + print logfile2.hostname + assert logfile2.hostname == 'jimoleary.local' + assert logfile2.port == '27017' diff --git a/mtools/util/logevent.py b/mtools/util/logevent.py index b82c5098..ec36b114 100644 --- a/mtools/util/logevent.py +++ b/mtools/util/logevent.py @@ -96,6 +96,7 @@ def _reset(self): self._numYields = None self._r = None self._w = None + self._conn = None self.merge_marker_str = '' @@ -209,14 +210,20 @@ def set_datetime_hint(self, format, nextpos, rollover): self._datetime_nextpos = nextpos self._year_rollover = rollover - # fast check if timezone changed. if it has, trigger datetime evaluation + # fast check if timestamp format changed. if it has, trigger datetime evaluation if format.startswith('ctime'): if len(self.split_tokens) < 4 or self.split_tokens[self._datetime_nextpos-4] not in self.weekdays: _ = self.datetime return False return True else: + if len(self.split_tokens) == 0: + # empty line, no need to parse datetime + self._datetime_calculated = True + return False + if not self.split_tokens[self._datetime_nextpos-1][0].isdigit(): + # not the timestamp format that was hinted _ = self.datetime return False return True @@ -291,8 +298,23 @@ def thread(self): if match: self._thread = match.group(1) + if self._thread is not None: + if self._thread in ['initandlisten', 'mongosMain']: + if len(split_tokens) >= 5 and split_tokens[-5][0] == '#': + self._conn = 'conn' + split_tokens[-5][1:] + elif self._thread.startswith('conn'): + self._conn = self._thread return self._thread + @property + def conn(self): + """ + extract conn name if available (lazy) + this value is None for all lines except the log lines related to connections , + that is lines matching '\[conn[0-9]+\]' or '\[(initandlisten|mongosMain)\] .* connection accepted from' + """ + self.thread + return self._conn @property def operation(self): @@ -559,11 +581,11 @@ def _reformat_timestamp(self, format, force=False): dt_string += '.' + str(int(self.datetime.microsecond / 1000)).zfill(3) elif format == 'iso8601-local': dt_string = self.datetime.isoformat() - if not self.datetime.utcoffset(): + if self.datetime.utcoffset() == None: dt_string += '+00:00' ms_str = str(int(self.datetime.microsecond * 1000)).zfill(3)[:3] # change isoformat string to have 3 digit milliseconds and no : in offset - dt_string = re.sub(r'(\.\d+)?([+-])(\d\d):(\d\d)', '.%s\\2\\3\\4'%ms_str, dt_string) + dt_string = re.sub(r'(\.\d+)?([+-])(\d\d):(\d\d)', '.%s\\2\\3\\4'%ms_str, dt_string, count=1) elif format == 'iso8601-utc': if self.datetime.utcoffset(): dt_string = self.datetime.astimezone(tzutc()).strftime("%Y-%m-%dT%H:%M:%S") diff --git a/mtools/util/logfile.py b/mtools/util/logfile.py index 356229bb..1ff39c0e 100644 --- a/mtools/util/logfile.py +++ b/mtools/util/logfile.py @@ -5,6 +5,7 @@ from datetime import datetime import time import re +import os class LogFile(InputSource): """ wrapper class for log files, either as open file streams of from stdin. """ @@ -22,6 +23,9 @@ def __init__(self, filehandle): self._restarts = None self._binary = None self._timezone = None + self._hostname = None + self._port = None + self._rsstate = None self._datetime_format = None self._year_rollover = None @@ -89,6 +93,13 @@ def restarts(self): self._iterate_lines() return self._restarts + @property + def rsstate(self): + """ lazy evaluation of all restarts. """ + if not self._num_lines: + self._iterate_lines() + return self._rsstate + @property def binary(self): """ lazy evaluation of the binary name. """ @@ -96,6 +107,20 @@ def binary(self): self._iterate_lines() return self._binary + @property + def hostname(self): + """ lazy evaluation of the binary name. """ + if not self._num_lines: + self._iterate_lines() + return self._hostname + + @property + def port(self): + """ lazy evaluation of the binary name. """ + if not self._num_lines: + self._iterate_lines() + return self._port + @property def versions(self): """ return all version changes. """ @@ -124,7 +149,6 @@ def next(self): self._datetime_format = None self._datetime_nextpos = None elif le.datetime: - # print "not hinting" # gather new hint info from another logevent self._datetime_format = le.datetime_format self._datetime_nextpos = le._datetime_nextpos @@ -158,6 +182,7 @@ def __iter__(self): yield le + states = ['PRIMARY', 'SECONDARY', 'DOWN', 'STARTUP', 'STARTUP2', 'RECOVERING', 'ROLLBACK', 'ARBITER', 'UNKNOWN'] def __len__(self): """ return the number of lines in a log file. """ @@ -168,28 +193,59 @@ def _iterate_lines(self): """ count number of lines (can be expensive). """ self._num_lines = 0 self._restarts = [] + self._rsstate = [] l = 0 for l, line in enumerate(self.filehandle): - # find version string - if "version" in line: - - restart = None - # differentiate between different variations - if "mongos" in line or "MongoS" in line: - self._binary = 'mongos' - elif "db version v" in line: - self._binary = 'mongod' - - else: - continue + # find version string (fast check to eliminate most lines) + if "version" in line[:100]: + logevent = LogEvent(line) + restart = self._check_for_restart(logevent) + if restart: + self._restarts.append((restart, logevent)) + + if "starting :" in line or "starting:" in line: + # look for hostname, port + match = re.search('port=(?P\d+).*host=(?P\S+)', line) + if match: + self._hostname = match.group('host') + self._port = match.group('port') + + # if "is now in state" in line and next(state for state in states if line.endswith(state)): + if "is now in state" in line: + tokens = line.split() + # 2.6 + if tokens[1].endswith(']'): + pos = 4 + else: + pos = 7 + host = tokens[pos] + rsstate = tokens[-1] + state = (host, rsstate, LogEvent(line)) + self._rsstate.append(state) + continue + + if "[rsMgr] replSet" in line: + tokens = line.split() + if self._hostname: + host = self._hostname + ':' + self._port + else: + host = os.path.basename(self.name) + host += ' (self)' + if tokens[-1] in self.states: + rsstate = tokens[-1] + else: + # 2.6 + if tokens[1].endswith(']'): + pos = 2 + else: + pos = 6 + rsstate = ' '.join(tokens[pos:]) - version = re.search(r'(\d\.\d\.\d+)', line) - if version: - version = version.group(1) - restart = (version, LogEvent(line)) - self._restarts.append(restart) + state = (host, rsstate, LogEvent(line)) + self._rsstate.append(state) + continue self._num_lines = l+1 @@ -197,6 +253,25 @@ def _iterate_lines(self): self.filehandle.seek(0) + def _check_for_restart(self, logevent): + if logevent.thread == 'mongosMain': + self._binary = 'mongos' + + elif logevent.thread == 'initandlisten' and "db version v" in logevent.line_str: + self._binary = 'mongod' + + else: + return False + + version = re.search(r'(\d\.\d\.\d+)', logevent.line_str) + + if version: + version = version.group(1) + return version + else: + return False + + def _calculate_bounds(self): """ calculate beginning and end of logfile. """ diff --git a/mtools/util/print_table.py b/mtools/util/print_table.py index 0a8793da..fb1c15dd 100644 --- a/mtools/util/print_table.py +++ b/mtools/util/print_table.py @@ -1,6 +1,7 @@ def print_table( rows, override_headers=None, uppercase_headers=True ): """ rows needs to be a list of dictionaries, all with the same keys. """ - + if len(rows) == 0: + return keys = rows[0].keys() headers = override_headers or keys if uppercase_headers: @@ -14,7 +15,7 @@ def print_table( rows, override_headers=None, uppercase_headers=True ): for row in rows: if type(row) == str: print row - elif row == None: + elif row is None: print else: print template.format(**row) diff --git a/mtools/version.py b/mtools/version.py index 99d2a6fa..6ebd335c 100644 --- a/mtools/version.py +++ b/mtools/version.py @@ -1 +1 @@ -__version__ = '1.1.5' +__version__ = '1.1.6' diff --git a/setup.py b/setup.py index abb1b298..bbfdc37b 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ import dateutil except ImportError: install_requires.append('python-dateutil') - test_requires.append('ordereddict') + test_requires.append('python-dateutil') packages = find_packages() kws = {'install_requires': install_requires}