code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if isinstance(task_id, RegisteredTask): task_id = task_id.id def cloud_delete(api): api.delete(task_id) if len(self._threads): self.put(cloud_delete) else: cloud_delete(self._api) return self
def delete(self, task_id)
Deletes a task from a TaskQueue.
4.971556
4.619484
1.076215
body = { "payload": task.payload(), "queueName": self._queue_name, "groupByTag": True, "tag": task.__class__.__name__ } def cloud_insertion(): self._api.insert(body, delay_seconds) self._pool.spawn(cloud_insertion) return self
def insert(self, task, args=[], kwargs={}, delay_seconds=0)
Insert a task into an existing queue.
7.031524
6.824255
1.030372
return 'r' in action.type._mode and (action.default is None or getattr(action.default, 'name') not in (sys.stderr.name, sys.stdout.name))
def is_upload(action)
Checks if this should be a user upload :param action: :return: True if this is a file we intend to upload from the user
10.314958
14.458588
0.713414
exclude = {'name', 'model'} field_module = 'models' django_kwargs = {} if self.node_attrs['model'] == 'CharField': django_kwargs['max_length'] = 255 django_kwargs['blank'] = not self.node_attrs['required'] try: django_kwargs['default'] = self.node_attrs['value'] except KeyError: pass return u'{0} = {1}.{2}({3})'.format(self.node_attrs['name'], field_module, self.node_attrs['model'], ', '.join(['{0}={1}'.format(i,v) for i,v in six.iteritems(django_kwargs)]),)
def to_django(self)
This is a debug function to see what equivalent django models are being generated
3.242918
3.025565
1.071839
new_dict = {} for key in a_dict: if six.PY2 and isinstance(key, six.text_type): new_dict[str(key)] = a_dict[key] else: new_dict[key] = a_dict[key] return new_dict
def str_dict_keys(a_dict)
return a modified dict where all the keys that are anything but str get converted to str. E.g. >>> result = str_dict_keys({u'name': u'Peter', u'age': 99, 1: 2}) >>> # can't compare whole dicts in doctests >>> result['name'] u'Peter' >>> result['age'] 99 >>> result[1] 2 The reason for this is that in Python <= 2.6.4 doing ``MyClass(**{u'name': u'Peter'})`` would raise a TypeError Note that only unicode types are converted to str types. The reason for that is you might have a class that looks like this:: class Option(object): def __init__(self, foo=None, bar=None, **kwargs): ... And it's being used like this:: Option(**{u'foo':1, u'bar':2, 3:4}) Then you don't want to change that {3:4} part which becomes part of `**kwargs` inside the __init__ method. Using integers as parameter keys is a silly example but the point is that due to the python 2.6.4 bug only unicode keys are converted to str.
1.949499
2.257578
0.863535
if not isinstance(input_str, six.string_types): raise ValueError(input_str) input_str = str_quote_stripper(input_str) return input_str.lower() in ("true", "t", "1", "y", "yes")
def str_to_boolean(input_str)
a conversion function for boolean
3.057488
3.169597
0.96463
if not input_str: return None if six.PY3 and isinstance(input_str, six.binary_type): input_str = to_str(input_str) if not isinstance(input_str, six.string_types): # gosh, we didn't get a string, we can't convert anything but strings # we're going to assume that what we got is actually what was wanted # as the output return input_str input_str = str_quote_stripper(input_str) if '.' not in input_str and input_str in known_mapping_str_to_type: return known_mapping_str_to_type[input_str] parts = [x.strip() for x in input_str.split('.') if x.strip()] try: try: # first try as a complete module package = __import__(input_str) except ImportError: # it must be a class from a module if len(parts) == 1: # since it has only one part, it must be a class from __main__ parts = ('__main__', input_str) package = __import__('.'.join(parts[:-1]), globals(), locals(), []) obj = package for name in parts[1:]: obj = getattr(obj, name) return obj except AttributeError as x: raise CannotConvertError("%s cannot be found" % input_str) except ImportError as x: raise CannotConvertError(str(x))
def str_to_python_object(input_str)
a conversion that will import a module and class name
3.399806
3.355053
1.013339
if not isinstance(input_str, six.string_types): raise ValueError(input_str) input_str = str_quote_stripper(input_str) result = [ item_converter(x.strip()) for x in input_str.split(item_separator) if x.strip() ] if list_to_collection_converter is not None: return list_to_collection_converter(result) return result
def str_to_list( input_str, item_converter=lambda x: x, item_separator=',', list_to_collection_converter=None, )
a conversion function for list
2.146008
2.146252
0.999886
# is it None? if a_thing is None: return '' # is it already a string? if isinstance(a_thing, six.string_types): return a_thing if six.PY3 and isinstance(a_thing, six.binary_type): try: return a_thing.decode('utf-8') except UnicodeDecodeError: pass # does it have a to_str function? try: return a_thing.to_str() except (AttributeError, KeyError, TypeError): # AttributeError - no to_str function? # KeyError - DotDict has no to_str? # TypeError - problem converting # nope, no to_str function pass # is this a type proxy? try: return arbitrary_object_to_string(a_thing.a_type) except (AttributeError, KeyError, TypeError): # # nope, no a_type property pass # is it a built in? try: return known_mapping_type_to_str[a_thing] except (KeyError, TypeError): # nope, not a builtin pass # is it something from a loaded module? try: if a_thing.__module__ not in ('__builtin__', 'builtins', 'exceptions'): if a_thing.__module__ == "__main__": module_name = ( sys.modules['__main__'] .__file__[:-3] .replace('/', '.') .strip('.') ) else: module_name = a_thing.__module__ return "%s.%s" % (module_name, a_thing.__name__) except AttributeError: # nope, not one of these pass # maybe it has a __name__ attribute? try: return a_thing.__name__ except AttributeError: # nope, not one of these pass # punt and see what happens if we just cast it to string return str(a_thing)
def arbitrary_object_to_string(a_thing)
take a python object of some sort, and convert it into a human readable string. this function is used extensively to convert things like "subject" into "subject_key, function -> function_key, etc.
2.82373
2.853901
0.989428
generator = SourceGenerator(indent_with, add_line_information) generator.visit(node) return ''.join(str(s) for s in generator.result)
def to_source(node, indent_with=' ' * 4, add_line_information=False)
This function can convert a node tree back into python sourcecode. This is useful for debugging purposes, especially if you're dealing with custom asts not generated by python itself. It could be that the sourcecode is evaluable when the AST itself is not compilable / evaluable. The reason for this is that the AST contains some more data than regular sourcecode does, which is dropped during conversion. Each level of indentation is replaced with `indent_with`. Per default this parameter is equal to four spaces as suggested by PEP 8, but it might be adjusted to match the application's styleguide. If `add_line_information` is set to `True` comments for the line numbers of the nodes are added to the output. This can be used to spot wrong line number information of statement nodes.
3.068927
4.694682
0.653703
if not name: name = threading.currentThread().getName() if name in self.pool: return self.pool[name] self.pool[name] = FakeDatabaseConnection(self.dsn) return self.pool[name]
def connection(self, name=None)
return a named connection. This function will return a named connection by either finding one in its pool by the name or creating a new one. If no name is given, it will use the name of the current executing thread as the name of the connection. parameters: name - a name as a string
3.283142
3.297819
0.99555
if force: print('PostgresPooled - delegating connection closure') try: super(PostgresPooled, self).close_connection(connection, force) except self.operational_exceptions: print('PostgresPooled - failed closing') for name, conn in self.pool.iteritems(): if conn is connection: break del self.pool[name] else: print('PostgresPooled - refusing to close connection')
def close_connection(self, connection, force=False)
overriding the baseclass function, this routine will decline to close a connection at the end of a transaction context. This allows for reuse of connections.
5.085371
5.079271
1.001201
with self.config.db_transaction() as trans: function(trans, *args, **kwargs)
def do_transaction(self, function, *args, **kwargs)
execute a function within the context of a transaction
5.089314
5.104426
0.997039
for x in range(int(seconds)): if (self.config.wait_log_interval and not x % self.config.wait_log_interval): print('%s: %dsec of %dsec' % (wait_reason, x, seconds)) time.sleep(1.0)
def responsive_sleep(self, seconds, wait_reason='')
Sleep for the specified number of seconds, logging every 'wait_log_interval' seconds with progress info.
4.281519
3.610502
1.185852
for wait_in_seconds in self.backoff_generator(): try: with self.config.db_transaction() as trans: function(trans, *args, **kwargs) trans.commit() break except self.config.db_transaction.operational_exceptions: pass print(('failure in transaction - retry in %s seconds' % wait_in_seconds)) self.responsive_sleep(wait_in_seconds, "waiting for retry after failure in " "transaction")
def do_transaction(self, function, *args, **kwargs)
execute a function within the context of a transaction
5.303285
5.37901
0.985922
expanded_file_contents = [] with open(file_name) as f: for a_line in f: match = ConfigObjWithIncludes._include_re.match(a_line) if match: include_file = match.group(2) include_file = os.path.join( original_path, include_file ) new_lines = self._expand_files( include_file, os.path.dirname(include_file), indent + match.group(1) ) expanded_file_contents.extend(new_lines) else: expanded_file_contents.append(indent + a_line.rstrip()) return expanded_file_contents
def _expand_files(self, file_name, original_path, indent="")
This recursive function accepts a file name, opens the file and then spools the contents of the file into a list, examining each line as it does so. If it detects a line beginning with "+include", it assumes the string immediately following is a file name. Recursing, the file new file is openned and its contents are spooled into the accumulating list.
2.286568
2.255592
1.013733
if isinstance(infile, (six.binary_type, six.text_type)): infile = to_str(infile) original_path = os.path.dirname(infile) expanded_file_contents = self._expand_files(infile, original_path) super(ConfigObjWithIncludes, self)._load( expanded_file_contents, configspec ) else: super(ConfigObjWithIncludes, self)._load(infile, configspec)
def _load(self, infile, configspec)
this overrides the original ConfigObj method of the same name. It runs through the input file collecting lines into a list. When completed, this method submits the list of lines to the super class' function of the same name. ConfigObj proceeds, completely unaware that it's input file has been preprocessed.
3.268887
3.076449
1.062552
if self.delayed_parser_instantiation: try: app = config_manager._get_option('admin.application') source = "%s%s" % (app.value.app_name, file_name_extension) self.config_obj = configobj.ConfigObj(source) self.delayed_parser_instantiation = False except AttributeError: # we don't have enough information to get the ini file # yet. we'll ignore the error for now return obj_hook() # return empty dict of the obj_hook type if isinstance(self.config_obj, obj_hook): return self.config_obj return obj_hook(initializer=self.config_obj)
def get_values(self, config_manager, ignore_mismatches, obj_hook=DotDict)
Return a nested dictionary representing the values in the ini file. In the case of this ValueSource implementation, both parameters are dummies.
6.558214
6.437114
1.018813
options = [ value for value in source_dict.values() if isinstance(value, Option) ] options.sort(key=lambda x: x.name) indent_spacer = " " * (level * indent_size) for an_option in options: print("%s# %s" % (indent_spacer, an_option.doc), file=output_stream) option_value = to_str(an_option) if an_option.reference_value_from: print( '%s# see "%s.%s" for the default or override it here' % ( indent_spacer, an_option.reference_value_from, an_option.name ), file=output_stream ) if an_option.likely_to_be_changed or an_option.has_changed: option_format = '%s%s=%s\n' else: option_format = '%s#%s=%s\n' if isinstance(option_value, six.string_types) and \ ',' in option_value: # quote lists unless they're already quoted if option_value[0] not in '\'"': option_value = '"%s"' % option_value print(option_format % (indent_spacer, an_option.name, option_value), file=output_stream) next_level = level + 1 namespaces = [ (key, value) for key, value in source_dict.items() if isinstance(value, Namespace) ] namespaces.sort(key=ValueSource._namespace_reference_value_from_sort) for key, namespace in namespaces: next_level_spacer = " " * next_level * indent_size print("%s%s%s%s\n" % (indent_spacer, "[" * next_level, key, "]" * next_level), file=output_stream) if namespace._doc: print("%s%s" % (next_level_spacer, namespace._doc), file=output_stream) if namespace._reference_value_from: print("%s#+include ./common_%s.ini\n" % (next_level_spacer, key), file=output_stream) if namespace_name: ValueSource._write_ini( source_dict=namespace, namespace_name="%s.%s" % (namespace_name, key), level=level+1, indent_size=indent_size, output_stream=output_stream ) else: ValueSource._write_ini( source_dict=namespace, namespace_name=key, level=level+1, indent_size=indent_size, output_stream=output_stream )
def _write_ini(source_dict, namespace_name=None, level=0, indent_size=4, output_stream=sys.stdout)
this function prints the components of a configobj ini file. It is recursive for outputing the nested sections of the ini file.
2.568379
2.602199
0.987003
try: config_kwargs = {'mapping_class': kwargs.pop('mapping_class')} except KeyError: config_kwargs = {} cm = ConfigurationManager(*args, **kwargs) return cm.get_config(**config_kwargs)
def configuration(*args, **kwargs)
this function just instantiates a ConfigurationManager and returns the configuration dictionary. It accepts all the same parameters as the constructor for the ConfigurationManager class.
4.446777
3.474772
1.279732
if ( "not allowed" in message or "ignored" in message or "expected" in message or "invalid" in message or self.add_help ): # when we have "help" then we must also have proper error # processing. Without "help", we suppress the errors by # doing nothing here super(IntermediateConfigmanParser, self).error(message)
def error(self, message)
we need to suppress errors that might happen in earlier phases of the expansion/overlay process.
14.294982
13.760295
1.038857
subordinate_mappings = [] for key, value in six.iteritems(a_mapping): if isinstance(value, collections.Mapping): subordinate_mappings.append((key, value)) if include_dicts: yield key, value else: yield key, value for key, a_map in subordinate_mappings: for sub_key, value in iteritems_breadth_first(a_map, include_dicts): yield '%s.%s' % (key, sub_key), value
def iteritems_breadth_first(a_mapping, include_dicts=False)
a generator that returns all the keys in a set of nested Mapping instances. The keys take the form X.Y.Z
1.980411
2.087779
0.948573
configmanized_keys_dict = DotDict() for k, v in iteritems_breadth_first(a_mapping): if '__' in k and k != k.upper(): k = k.replace('__', '.') configmanized_keys_dict[k] = v return configmanized_keys_dict
def configman_keys(a_mapping)
return a DotDict that is a copy of the provided mapping with keys transformed into a configman compatible form: if the key is not all uppercase then all doubled underscores will be replaced with the '.' character. This has a specific use with the os.environ. Linux shells generally do not allow the dot character in an identifier. Configman relies on the dot character to separate namespaces. If the environment is processed through this function, then doubled underscores will be interpretted as if they were the dot character.
4.198634
3.892976
1.078515
#========================================================================== class DotDictWithKeyTranslations(base_class): def __init__(self, *args, **kwargs): self.__dict__['_translation_tuples'] = translation_tuples super(DotDictWithKeyTranslations, self).__init__(*args, **kwargs) #---------------------------------------------------------------------- @memoize() def _translate_key(self, key): for original, replacement in self._translation_tuples: key = key.replace(original, replacement) return key #---------------------------------------------------------------------- def assign(self, key, value): super(DotDictWithKeyTranslations, self).assign( self._translate_key(key), value ) #---------------------------------------------------------------------- def __setattr__(self, key, value): super(DotDictWithKeyTranslations, self).__setattr__( self._translate_key(key), value ) #---------------------------------------------------------------------- def __getattr__(self, key): alt_key = self._translate_key(key) if alt_key == key: return super(DotDictWithKeyTranslations, self).__getattr__(key) try: return getattr(self, alt_key) except KeyError: raise KeyError(key) #---------------------------------------------------------------------- def __delattr__(self, key): super(DotDictWithKeyTranslations, self).__delattr__( self._translate_key(key) ) if six.PY2: new_class_name = six.binary_type(new_class_name) DotDictWithKeyTranslations.__name__ = new_class_name return DotDictWithKeyTranslations
def create_key_translating_dot_dict( new_class_name, translation_tuples, base_class=DotDict )
this function will generate a DotDict derivative class that has key translation built in. If the key is not found, translations (as specified by the translation_tuples) are performed on the key and the lookup is tried again. Only on failure of this second lookup will the KeyError exception be raised. parameters: new_class_name - the name of the returned class translation_tuples - a sequence of 2-tuples of the form: (original_substring, substitution_string) base_class - the baseclass on which this new class is to be based
1.798298
1.769778
1.016115
namespaces = [] for key in self._key_order: if isinstance(getattr(self, key), DotDict): namespaces.append(key) if include_dicts: yield key else: yield key for a_namespace in namespaces: for key in self[a_namespace].keys_breadth_first(include_dicts): yield '%s.%s' % (a_namespace, key)
def keys_breadth_first(self, include_dicts=False)
a generator that returns all the keys in a set of nested DotDict instances. The keys take the form X.Y.Z
2.823673
2.52653
1.117609
key_split = key.split('.') cur_dict = self for k in key_split[:-1]: try: cur_dict = cur_dict[k] except KeyError: cur_dict[k] = self.__class__() # so that derived classes # remain true to type cur_dict = cur_dict[k] cur_dict[key_split[-1]] = value
def assign(self, key, value)
an alternative method for assigning values to nested DotDict instances. It accepts keys in the form of X.Y.Z. If any nested DotDict instances don't yet exist, they will be created.
3.391995
3.063779
1.107128
parent_key = '.'.join(key.split('.')[:-1]) if not parent_key: return None else: return self[parent_key]
def parent(self, key)
when given a key of the form X.Y.Z, this method will return the parent DotDict of the 'Z' key.
3.250749
2.83217
1.147794
def wrapper(f): @wraps(f) def fn(*args, **kwargs): if kwargs: key = (args, tuple(kwargs.items())) else: key = args try: return fn.cache[key] except KeyError: if fn.count >= max_cache_size: fn.cache = {} fn.count = 0 result = f(*args, **kwargs) fn.cache[key] = result fn.count += 1 return result except TypeError: return f(*args, **kwargs) fn.cache = {} fn.count = 0 return fn return wrapper
def memoize(max_cache_size=1000)
Python 2.4 compatible memoize decorator. It creates a cache that has a maximum size. If the cache exceeds the max, it is thrown out and a new one made. With such behavior, it is wise to set the cache just a little larger that the maximum expected need. Parameters: max_cache_size - the size to which a cache can grow
1.628307
1.780959
0.914286
if self.default is None or force: self.default = val self.set_value(val) self.has_changed = True else: raise OptionError( "cannot override existing default without using the 'force' " "option" )
def set_default(self, val, force=False)
this function allows a default to be set on an option that dosen't have one. It is used when a base class defines an Option for use in derived classes but cannot predict what value would useful to the derived classes. This gives the derived classes the opportunity to set a logical default appropriate for the derived class' context. For example: class A(RequiredConfig): required_config = Namespace() required_config.add_option( 'x', default=None ) class B(A): A.required_config.x.set_default(68) parameters: val - the value for the default force - normally this function only works on Options that have not had a default set (default is None). This boolean allows you to override an existing default.
4.739007
4.587006
1.033137
o = Option( name=self.name, default=self.default, doc=self.doc, from_string_converter=self.from_string_converter, to_string_converter=self.to_string_converter, value=self.value, short_form=self.short_form, exclude_from_print_conf=self.exclude_from_print_conf, exclude_from_dump_conf=self.exclude_from_dump_conf, is_argument=self.is_argument, likely_to_be_changed=self.likely_to_be_changed, not_for_definition=self.not_for_definition, reference_value_from=self.reference_value_from, secret=self.secret, has_changed=self.has_changed, foreign_data=self.foreign_data, ) return o
def copy(self)
return a copy
2.949749
2.907945
1.014376
config = None try: config = self.get_config(mapping_class=mapping_class) yield config finally: if config: self._walk_and_close(config)
def context(self, mapping_class=DotDictWithAcquisition)
return a config as a context that calls close on every item when it goes out of scope
5.15729
3.614023
1.427022
if self.app_name or self.app_description: print('Application: ', end='', file=output_stream) if self.app_name: print(self.app_name, self.app_version, file=output_stream) if self.app_description: print(self.app_description, file=output_stream) if self.app_name or self.app_description: print('', file=output_stream) names_list = self.get_option_names() print( "usage:\n%s [OPTIONS]... " % self.app_invocation_name, end='', file=output_stream ) bracket_count = 0 # this section prints the non-switch command line arguments for key in names_list: an_option = self.option_definitions[key] if an_option.is_argument: if an_option.default is None: # there's no option, assume the user must set this print(an_option.name, end='', file=output_stream) elif ( inspect.isclass(an_option.value) or inspect.ismodule(an_option.value) ): # this is already set and it could have expanded, most # likely this is a case where a sub-command has been # loaded and we're looking to show the help for it. # display show it as a constant already provided rather # than as an option the user must provide print(an_option.default, end='', file=output_stream) else: # this is an argument that the user may alternatively # provide print("[ %s" % an_option.name, end='', file=output_stream) bracket_count += 1 print(']' * bracket_count, '\n', file=output_stream) names_list.sort() if names_list: print('OPTIONS:', file=output_stream) pad = ' ' * 4 for name in names_list: if name in self.options_banned_from_help: continue option = self._get_option(name) line = ' ' * 2 # always start with 2 spaces if option.short_form: line += '-%s, ' % option.short_form line += '--%s' % name line += '\n' doc = option.doc if option.doc is not None else '' if doc: line += '%s%s\n' % (pad, doc) try: value = option.value type_of_value = type(value) converter_function = to_string_converters[type_of_value] default = converter_function(value) except KeyError: default = option.value if default is not None: if ( (option.secret or 'password' in name.lower()) and not self.option_definitions.admin.expose_secrets.default ): default = '*********' if name not in ('help',): # don't bother with certain dead obvious ones line += '%s(default: %s)\n' % (pad, default) print(line, file=output_stream)
def output_summary(self, output_stream=sys.stdout)
outputs a usage tip and the list of acceptable commands. This is useful as the output of the 'help' option. parameters: output_stream - an open file-like object suitable for use as the target of a print function
4.079296
4.046088
1.008207
config_file_type = self._get_option('admin.print_conf').value @contextlib.contextmanager def stdout_opener(): yield sys.stdout skip_keys = [ k for (k, v) in six.iteritems(self.option_definitions) if isinstance(v, Option) and v.exclude_from_print_conf ] self.write_conf(config_file_type, stdout_opener, skip_keys=skip_keys)
def print_conf(self)
write a config file to the pathname specified in the parameter. The file extention determines the type of file written and must match a registered type. parameters: config_pathname - the full path and filename of the target config file.
5.641661
5.893188
0.957319
if not config_pathname: config_pathname = self._get_option('admin.dump_conf').value opener = functools.partial(open, config_pathname, 'w') config_file_type = os.path.splitext(config_pathname)[1][1:] skip_keys = [ k for (k, v) in six.iteritems(self.option_definitions) if isinstance(v, Option) and v.exclude_from_dump_conf ] self.write_conf(config_file_type, opener, skip_keys=skip_keys)
def dump_conf(self, config_pathname=None)
write a config file to the pathname specified in the parameter. The file extention determines the type of file written and must match a registered type. parameters: config_pathname - the full path and filename of the target config file.
3.824357
3.934266
0.972064
blocked_keys = self.keys_blocked_from_output if skip_keys: blocked_keys.extend(skip_keys) if blocked_keys: option_defs = self.option_definitions.safe_copy() for a_blocked_key in blocked_keys: try: del option_defs[a_blocked_key] except (AttributeError, KeyError): # okay that key isn't here pass # remove empty namespaces all_keys = [k for k in option_defs.keys_breadth_first(include_dicts=True)] for key in all_keys: candidate = option_defs[key] if (isinstance(candidate, Namespace) and not len(candidate)): del option_defs[key] else: option_defs = self.option_definitions # find all of the secret options and overwrite their values with # '*' * 16 if not self.option_definitions.admin.expose_secrets.default: for a_key in option_defs.keys_breadth_first(): an_option = option_defs[a_key] if ( (not a_key.startswith('admin')) and isinstance(an_option, Option) and an_option.secret ): # force the option to be a string of * option_defs[a_key].value = '*' * 16 option_defs[a_key].from_string_converter = str dispatch_request_to_write(config_file_type, option_defs, opener)
def write_conf(self, config_file_type, opener, skip_keys=None)
write a configuration file to a file-like object. parameters: config_file_type - a string containing a registered file type OR a for_XXX module from the value_source package. Passing in an string that is unregistered will result in a KeyError opener - a callable object or function that returns a file like object that works as a context in a with statement.
4.060795
4.14633
0.979371
logger.info("app_name: %s", self.app_name) logger.info("app_version: %s", self.app_version) logger.info("current configuration:") config = [(key, self.option_definitions[key].value) for key in self.option_definitions.keys_breadth_first() if key not in self.keys_blocked_from_output] config.sort() for key, val in config: if ( self.option_definitions[key].secret or 'password' in key.lower() ): logger.info('%s: *********', key) else: try: logger.info('%s: %s', key, to_string_converters[type(key)](val)) except KeyError: logger.info('%s: %s', key, val)
def log_config(self, logger)
write out the current configuration to a log-like object. parameters: logger - a object that implements a method called 'info' with the same semantics as the call to 'logger.info
3.563027
3.553238
1.002755
return [x for x in self.option_definitions.keys_breadth_first() if isinstance(self.option_definitions[x], Option)]
def get_option_names(self)
returns a list of fully qualified option names. returns: a list of strings representing the Options in the source Namespace list. Each item will be fully qualified with dot delimited Namespace names.
7.241497
8.191916
0.883981
# a set of known reference_value_from_links set_of_reference_value_option_names = set() for key in keys: if key in finished_keys: continue an_option = self.option_definitions[key] if an_option.reference_value_from: fully_qualified_reference_name = '.'.join(( an_option.reference_value_from, an_option.name )) if fully_qualified_reference_name in keys: continue # this referenced value has already been defined # no need to repeat it - skip on to the next key reference_option = an_option.copy() reference_option.reference_value_from = None reference_option.name = fully_qualified_reference_name # wait, aren't we setting a fully qualified dotted name into # the name field? Yes, 'add_option' below sees that # full pathname and does the right thing with it to ensure # that the reference_option is created within the # correct namespace set_of_reference_value_option_names.add( fully_qualified_reference_name ) self.option_definitions.add_option(reference_option) for a_reference_value_option_name in set_of_reference_value_option_names: for x in range(a_reference_value_option_name.count('.')): namespace_path = \ a_reference_value_option_name.rsplit('.', x + 1)[0] self.option_definitions[namespace_path].ref_value_namespace() return set_of_reference_value_option_names
def _create_reference_value_options(self, keys, finished_keys)
this method steps through the option definitions looking for alt paths. On finding one, it creates the 'reference_value_from' links within the option definitions and populates it with copied options.
4.133586
3.790395
1.090542
for a_value_source in self.values_source_list: try: if a_value_source.always_ignore_mismatches: continue except AttributeError: # ok, this values source doesn't have the concept # always igoring mismatches, we won't tolerate mismatches pass # we want to fetch the keys from the value sources so that we can # check for mismatches. Commandline value sources, are different, # we never want to allow unmatched keys from the command line. # By detecting if this value source is a command line source, we # can employ the command line's own mismatch detection. The # boolean 'allow_mismatches' controls application of the tollerance # for mismatches. if hasattr(a_value_source, 'command_line_value_source'): allow_mismatches = False else: allow_mismatches = True # make a set of all the keys from a value source in the form # of strings like this: 'x.y.z' value_source_mapping = a_value_source.get_values( self, allow_mismatches, self.value_source_object_hook ) value_source_keys_set = set([ k for k in DotDict(value_source_mapping).keys_breadth_first() ]) # make a set of the keys that didn't match any of the known # keys in the requirements unmatched_keys = value_source_keys_set.difference(known_keys) # some of the unmatched keys may actually be ok because the were # used during acquisition. # remove keys of the form 'y.z' if they match a known key of the # form 'x.y.z' for key in unmatched_keys.copy(): key_is_okay = six.moves.reduce( lambda x, y: x or y, (known_key.endswith(key) for known_key in known_keys) ) if key_is_okay: unmatched_keys.remove(key) # anything left in the unmatched_key set is a badly formed key. # issue a warning if unmatched_keys: if self.option_definitions.admin.strict.default: # raise hell... if len(unmatched_keys) > 1: raise NotAnOptionError( "%s are not valid Options" % unmatched_keys ) elif len(unmatched_keys) == 1: raise NotAnOptionError( "%s is not a valid Option" % unmatched_keys.pop() ) else: warnings.warn( 'Invalid options: %s' % ', '.join(sorted(unmatched_keys)) )
def _check_for_mismatches(self, known_keys)
check for bad options from value sources
4.813418
4.652338
1.034624
config = mapping_class() self._walk_config_copy_values( self.option_definitions, config, mapping_class ) return config
def _generate_config(self, mapping_class)
This routine generates a copy of the DotDict based config
8.578183
7.760006
1.105435
print("PGPooledTransaction - shutting down connection pool") for name, conn in self.pool.iteritems(): conn.close() print("PGPooledTransaction - connection %s closed" % name)
def close(self)
close all pooled connections
7.97668
6.274931
1.271198
target_type = type(target_action_instance) for key, value in six.iteritems(registry['action']): if value is target_type: if key is None: return 'store' return key return None
def find_action_name_by_value(registry, target_action_instance)
the association of a name of an action class with a human readable string is exposed externally only at the time of argument definitions. This routine, when given a reference to argparse's internal action registry and an action, will find that action and return the name under which it was registered.
4.025529
4.808742
0.837127
args = inspect.getargspec(an_action.__class__.__init__).args kwargs = dict( (an_attr, getattr(an_action, an_attr)) for an_attr in args if ( an_attr not in ('self', 'required') and getattr(an_action, an_attr) is not None ) ) action_name = find_action_name_by_value( parser._optionals._registries, an_action ) if 'required' in kwargs: del kwargs['required'] kwargs['action'] = action_name if 'option_strings' in kwargs: args = tuple(kwargs['option_strings']) del kwargs['option_strings'] else: args = () return args, kwargs
def get_args_and_values(parser, an_action)
this rountine attempts to reconstruct the kwargs that were used in the creation of an action object
2.830444
2.7421
1.032218
#"""assume that source is of type argparse try: destination.update(source.get_required_config()) except AttributeError: # looks like the user passed in a real arpgapse parser rather than our # bastardized version of one. No problem, we can work with it, # though the translation won't be as perfect. our_parser = ArgumentParser() for i, an_action in enumerate(source._actions): args, kwargs = get_args_and_values(source, an_action) dest = kwargs.get('dest', '') if dest in ('help', 'version'): continue our_parser.add_argument(*args, **kwargs) destination.update(our_parser.get_required_config())
def setup_definitions(source, destination)
this method stars the process of configman reading and using an argparse instance as a source of configuration definitions.
8.147935
7.792304
1.045639
# save a local copy of the namespace self.namespaces[name] = a_namespace # iterate through the namespace branding each of the options with the # name of the subparser to which they belong for k in a_namespace.keys_breadth_first(): an_option = a_namespace[k] if not an_option.foreign_data: an_option.foreign_data = DotDict() an_option.foreign_data['argparse.owning_subparser_name'] = name
def add_namespace(self, name, a_namespace)
as we build up argparse, the actions that define a subparser are translated into configman options. Each of those options must be tagged with the value of the subparse to which they correspond.
7.631522
6.162298
1.238422
command_name = args[0] new_kwargs = kwargs.copy() new_kwargs['configman_subparsers_option'] = self._configman_option new_kwargs['subparser_name'] = command_name subparsers = self._configman_option.foreign_data.argparse.subparsers a_subparser = super(ConfigmanSubParsersAction, self).add_parser( *args, **new_kwargs ) subparsers[command_name] = DotDict({ "args": args, "kwargs": new_kwargs, "subparser": a_subparser }) return a_subparser
def add_parser(self, *args, **kwargs)
each time a subparser action is used to create a new parser object we must save the original args & kwargs. In a later phase of configman, we'll need to reproduce the subparsers exactly without resorting to copying. We save the args & kwargs in the 'foreign_data' section of the configman option that corresponds with the subparser action.
3.9779
2.990343
1.330249
required_config = Namespace() # add current options to a copy of required config for k, v in iteritems_breadth_first(self.required_config): required_config[k] = v # get any option found in any subparsers try: subparser_namespaces = ( self.configman_subparsers_option.foreign_data .argparse.subprocessor_from_string_converter ) subparsers = ( self._argparse_subparsers._configman_option.foreign_data .argparse.subparsers ) # each subparser needs to have its configman options set up # in the subparser's configman option. This routine copies # the required_config of each subparser into the # SubparserFromStringConverter defined above. for subparser_name, subparser_data in six.iteritems(subparsers): subparser_namespaces.add_namespace( subparser_name, subparser_data.subparser.get_required_config() ) except AttributeError: # there is no subparser pass return required_config
def get_required_config(self)
because of the exsistance of subparsers, the configman options that correspond with argparse arguments are not a constant. We need to produce a copy of the namespace rather than the actual embedded namespace.
7.04535
6.040234
1.166403
kwargs['parser_class'] = self.__class__ kwargs['action'] = ConfigmanSubParsersAction subparser_action = super(ArgumentParser, self).add_subparsers( *args, **kwargs ) self._argparse_subparsers = subparser_action if "dest" not in kwargs or kwargs['dest'] is None: kwargs['dest'] = 'subcommand' configman_name = kwargs['dest'] configman_default = None configman_doc = kwargs.get('help', '') subprocessor_from_string_converter = SubparserFromStringConverter() configman_to_string = str configman_is_argument = True configman_not_for_definition = True # it's finally time to create the configman Option object and add it # to the required_config. self.required_config.add_option( name=configman_name, default=configman_default, doc=configman_doc, from_string_converter=subprocessor_from_string_converter, to_string_converter=configman_to_string, is_argument=configman_is_argument, not_for_definition=configman_not_for_definition, # we're going to save the input parameters that created the # argparse Action. This enables us to perfectly reproduce the # the original Action object later during the configman overlay # process. foreign_data=DotDict({ 'argparse.flags.subcommand': subparser_action, 'argparse.args': args, 'argparse.kwargs': kwargs, 'argparse.subparsers': DotDict(), 'argparse.subprocessor_from_string_converter': subprocessor_from_string_converter }) ) self.configman_subparsers_option = self.required_config[configman_name] subparser_action.add_configman_option(self.configman_subparsers_option) return subparser_action
def add_subparsers(self, *args, **kwargs)
When adding a subparser, we need to ensure that our version of the SubparserAction object is returned. We also need to create the corresponding configman Option object for the subparser and pack it's foreign data section with the original args & kwargs.
4.385029
3.720403
1.178643
# load the config_manager within the scope of the method that uses it # so that we avoid circular references in the outer scope from configman.config_manager import ConfigurationManager configuration_manager = ConfigurationManager( definition_source=[self.get_required_config()], values_source_list=self.value_source_list, argv_source=args, app_name=self.prog, app_version=self.version, app_description=self.description, use_auto_help=False, ) # it is apparent a common idiom that commandline options may have # embedded '-' characters in them. Configman requires that option # follow the Python Identifier rules. Fortunately, Configman has a # class that will perform dynamic translation of keys. In this # code fragment, we fetch the final configuration from configman # using a Mapping that will translate keys with '-' into keys with # '_' instead. conf = configuration_manager.get_config( mapping_class=create_key_translating_dot_dict( "HyphenUnderscoreDict", (('-', '_'),) ) ) # here is where we add the values given to "set_defaults" method # of argparse. if self.configman_subparsers_option: subparser_name = conf[self.configman_subparsers_option.name] try: conf.update( self.configman_subparsers_option.foreign_data.argparse .subparsers[subparser_name].subparser .extra_defaults ) except (AttributeError, KeyError): # no extra_defaults skip on pass if hasattr(self, 'extra_defaults'): conf.update(self.extra_defaults) return conf
def parse_args(self, args=None, namespace=None)
this method hijacks the normal argparse Namespace generation, shimming configman into the process. The return value will be a configman DotDict rather than an argparse Namespace.
8.752536
8.40763
1.041023
# load the config_manager within the scope of the method that uses it # so that we avoid circular references in the outer scope from configman.config_manager import ConfigurationManager configuration_manager = ConfigurationManager( definition_source=[self.get_required_config()], values_source_list=self.value_source_list, argv_source=args, app_name=self.prog, app_version=self.version, app_description=self.description, use_auto_help=False, ) conf = configuration_manager.get_config( mapping_class=create_key_translating_dot_dict( "HyphenUnderscoreDict", (('-', '_'),) ) ) return conf
def parse_known_args(self, args=None, namespace=None)
this method hijacks the normal argparse Namespace generation, shimming configman into the process. The return value will be a configman DotDict rather than an argparse Namespace.
8.487123
7.786192
1.090022
return "%s%s%s" % ( open_bracket_char, delimiter.join( local_to_str(x) for x in a_list ), close_bracket_char )
def sequence_to_string( a_list, open_bracket_char='[', close_bracket_char=']', delimiter=", " )
a dedicated function that turns a list into a comma delimited string of items converted. This method will flatten nested lists.
2.696515
2.728419
0.988307
t_as_string = to_str(t) if not is_identifier(t_as_string): # this class expanded into something other than a single identifier # we can ignore it. This is the case when we encounter something # like the configman.converter.str_to_classes_in_namespaces # InnerClassList. We can safely ignore these things here. return (None, None) if '.' in t_as_string: parts = t_as_string.split('.') return ('.'.join(parts[:-1]), parts[-1]) else: if t_as_string in known_mapping_str_to_type: return (None, None) return (None, t_as_string)
def get_import_for_type(t)
given a type, return a tuple of the (module-path, type_name) or (None, None) if it is a built in.
6.181697
5.862257
1.054491
try: return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S') except ValueError: try: return datetime.datetime.strptime(s, '%Y-%m-%d') except ValueError: return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f')
def datetime_from_ISO_string(s)
Take an ISO date string of the form YYYY-MM-DDTHH:MM:SS.S and convert it into an instance of datetime.datetime
1.459596
1.512805
0.964828
try: input_str = input_str.replace(' ', ':') except (TypeError, AttributeError): from configman.converters import to_str raise TypeError('%s should have been a string' % to_str(input_str)) days, hours, minutes, seconds = 0, 0, 0, 0 details = input_str.split(':') if len(details) >= 4: days = int(details[-4]) if len(details) >= 3: hours = int(details[-3]) if len(details) >= 2: minutes = int(details[-2]) if len(details) >= 1: seconds = int(details[-1]) return datetime.timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)
def str_to_timedelta(input_str)
a string conversion function for timedelta for strings in the format DD:HH:MM:SS or D HH:MM:SS
2.170573
2.172407
0.999156
days = aTimedelta.days temp_seconds = aTimedelta.seconds hours = int(temp_seconds / 3600) minutes = int((temp_seconds - hours * 3600) / 60) seconds = temp_seconds - hours * 3600 - minutes * 60 return '%d %02d:%02d:%02d' % (days, hours, minutes, seconds)
def timedelta_to_str(aTimedelta)
a conversion function for time deltas to string in the form DD:HH:MM:SS
1.710615
1.743377
0.981208
with open(file_name, 'r') as f: s = f.read() nodes = ast.parse(s) module_imports = get_nodes_by_instance_type(nodes, _ast.Import) specific_imports = get_nodes_by_instance_type(nodes, _ast.ImportFrom) assignment_objs = get_nodes_by_instance_type(nodes, _ast.Assign) call_objects = get_nodes_by_instance_type(nodes, _ast.Call) argparse_assignments = get_nodes_by_containing_attr(assignment_objs, 'ArgumentParser') group_arg_assignments = get_nodes_by_containing_attr(assignment_objs, 'add_argument_group') add_arg_assignments = get_nodes_by_containing_attr(call_objects, 'add_argument') parse_args_assignment = get_nodes_by_containing_attr(call_objects, 'parse_args') # there are cases where we have custom argparsers, such as subclassing ArgumentParser. The above # will fail on this. However, we can use the methods known to ArgumentParser to do a duck-type like # approach to finding what is the arg parser if not argparse_assignments: aa_references = set([i.func.value.id for i in chain(add_arg_assignments, parse_args_assignment)]) argparse_like_objects = [getattr(i.value.func, 'id', None) for p_ref in aa_references for i in get_nodes_by_containing_attr(assignment_objs, p_ref)] argparse_like_objects = filter(None, argparse_like_objects) argparse_assignments = [get_nodes_by_containing_attr(assignment_objs, i) for i in argparse_like_objects] # for now, we just choose one try: argparse_assignments = argparse_assignments[0] except IndexError: pass # get things that are assigned inside ArgumentParser or its methods argparse_assigned_variables = get_node_args_and_keywords(assignment_objs, argparse_assignments, 'ArgumentParser') add_arg_assigned_variables = get_node_args_and_keywords(assignment_objs, add_arg_assignments, 'add_argument') parse_args_assigned_variables = get_node_args_and_keywords(assignment_objs, parse_args_assignment, 'parse_args') ast_argparse_source = chain( module_imports, specific_imports, argparse_assigned_variables, add_arg_assigned_variables, parse_args_assigned_variables, argparse_assignments, group_arg_assignments, add_arg_assignments, ) return ast_argparse_source
def parse_source_file(file_name)
Parses the AST of Python file for lines containing references to the argparse module. returns the collection of ast objects found. Example client code: 1. parser = ArgumentParser(desc="My help Message") 2. parser.add_argument('filename', help="Name of the file to load") 3. parser.add_argument('-f', '--format', help='Format of output \nOptions: ['md', 'html'] 4. args = parser.parse_args() Variables: * nodes Primary syntax tree object * argparse_assignments The assignment of the ArgumentParser (line 1 in example code) * add_arg_assignments Calls to add_argument() (lines 2-3 in example code) * parser_var_name The instance variable of the ArgumentParser (line 1 in example code) * ast_source The curated collection of all parser related nodes in the client code
3.141175
3.093374
1.015452
short_options_str, long_options_list = self.getopt_create_opts( config_manager.option_definitions ) try: if ignore_mismatches: fn = ValueSource.getopt_with_ignore else: fn = getopt.gnu_getopt # here getopt looks through the command line arguments and # consumes the defined switches. The things that are not # consumed are then offered as the 'args' variable of the # parent configuration_manager getopt_options, config_manager.args = fn(self.argv_source, short_options_str, long_options_list) except getopt.GetoptError as x: raise NotAnOptionError(str(x)) command_line_values = obj_hook() for opt_name, opt_val in getopt_options: if opt_name.startswith('--'): name = opt_name[2:] else: name = self.find_name_with_short_form( opt_name[1:], config_manager.option_definitions, '' ) if not name: raise NotAnOptionError( '%s is not a valid short form option' % opt_name[1:] ) option_ = config_manager._get_option(name) if option_.from_string_converter == boolean_converter: command_line_values[name] = not option_.default else: command_line_values[name] = opt_val for name, value in zip( self._get_arguments( config_manager.option_definitions, command_line_values ), config_manager.args ): command_line_values[name] = value return command_line_values
def get_values(self, config_manager, ignore_mismatches, obj_hook=DotDict)
This is the black sheep of the crowd of ValueSource implementations. It needs to know ahead of time all of the parameters that it will need, but we cannot give it. We may not know all the parameters because not all classes may have been expanded yet. The two parameters allow this ValueSource implementation to know what the parameters have already been defined. The 'ignore_mismatches' parameter tells the implementation if it can or cannot ignore extraneous commandline options. The last time this function is called, it will be required to test for illegal commandline options and respond accordingly. Unlike many of the Value sources, this method cannot be "memoized". The return result depends on an internal state within the parameter 'config_manager'. Any memoize decorator for this method would requrire capturing that internal state in the memoize cache key.
4.007024
3.994838
1.00305
opts = [] prog_args = [] if isinstance(longopts, str): longopts = [longopts] else: longopts = list(longopts) while args: if args[0] == '--': prog_args += args[1:] break if args[0].startswith('--'): try: opts, args = getopt.do_longs( opts, args[0][2:], longopts, args[1:] ) except getopt.GetoptError: args = args[1:] elif args[0][0] == '-': try: opts, args = getopt.do_shorts( opts, args[0][1:], shortopts, args[1:] ) except getopt.GetoptError: args = args[1:] else: prog_args.append(args[0]) args = args[1:] return opts, prog_args
def getopt_with_ignore(args, shortopts, longopts=[])
my_getopt(args, options[, long_options]) -> opts, args This function works like gnu_getopt(), except that unknown parameters are ignored rather than raising an error.
2.037348
2.058907
0.989529
if isinstance(name, Option): an_option = name name = an_option.name else: an_option = Option(name, *args, **kwargs) current_namespace = self name_parts = name.split('.') for a_path_component in name_parts[:-1]: if a_path_component not in current_namespace: current_namespace[a_path_component] = Namespace() current_namespace = current_namespace[a_path_component] an_option.name = name_parts[-1] setattr(current_namespace, an_option.name, an_option) return an_option
def add_option(self, name, *args, **kwargs)
add an option to the namespace. This can take two forms: 'name' is a string representing the name of an option and the kwargs are its parameters, or 'name' is an instance of an Option object
2.111186
2.164821
0.975224
if y.ndim == 1: y = y.reshape(-1, 1) if x.ndim == 1: x = x.reshape(-1, 1) xscaled = self.x_scaler.fit_transform(x) yscaled = self.y_scaler.fit_transform(y) ssx_comp = list() ssy_comp = list() # Obtain residual sum of squares for whole data set and per component SSX = np.sum(xscaled ** 2) SSY = np.sum(yscaled ** 2) ssx_comp = list() ssy_comp = list() for curr_comp in range(1, self.ncomps + 1): model = self._reduce_ncomps(curr_comp) ypred = self.y_scaler.transform(model.predict(x, y=None)) xpred = self.x_scaler.transform(model.predict(x=None, y=y)) rssy = np.sum((yscaled - ypred) ** 2) rssx = np.sum((xscaled - xpred) ** 2) ssx_comp.append(rssx) ssy_comp.append(rssy) cumulative_fit = {'SSX': SSX, 'SSY': SSY, 'SSXcomp': np.array(ssx_comp), 'SSYcomp': np.array(ssy_comp)} return cumulative_fit
def _cummulativefit(self, x, y)
Measure the cumulative Regression sum of Squares for each individual component. :param x: Data matrix to fit the PLS model. :type x: numpy.ndarray, shape [n_samples, n_features] :param y: Data matrix to fit the PLS model. :type y: numpy.ndarray, shape [n_samples, n_features] :return: dictionary object containing the total Regression Sum of Squares and the Sum of Squares per components, for both the X and Y data blocks. :rtype: dict
2.30857
2.233251
1.033726
plt.figure() lev = self.leverages() plt.xlabel('Sample Index') plt.ylabel('Leverage') plt.bar(left=range(lev.size), height=lev) plt.hlines(y=1/lev.size, xmin=0, xmax=lev.size, colors='r', linestyles='--') plt.show() return None
def plot_leverages(self)
Leverage (h) per observation, with a red line plotted at y = 1/Number of samples (expected :return: Plot with observation leverages (h)
3.150679
2.869222
1.098095
# if we are fitting on 1D arrays, scale might be a scalar if numpy.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, numpy.ndarray): if copy: # New array to avoid side-effects scale = scale.copy() scale[scale == 0.0] = 1.0 return scale
def _handle_zeros_in_scale(scale, copy=True)
Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features.
3.64846
3.4364
1.06171
# Checking one attribute is enough, because they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.mean_ del self.var_
def _reset(self)
Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched.
7.073109
4.664693
1.516307
# Reset internal state before fitting self._reset() return self.partial_fit(X, y)
def fit(self, X, y=None)
Compute the mean and standard deviation from a dataset to use in future scaling operations. :param X: Data matrix to scale. :type X: numpy.ndarray, shape [n_samples, n_features] :param y: Passthrough for Scikit-learn ``Pipeline`` compatibility. :type y: None :return: Fitted object. :rtype: pyChemometrics.ChemometricsScaler
5.817584
8.295847
0.701265
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) # Even in the case of `with_mean=False`, we update the mean anyway # This is needed for the incremental computation of the var # See incr_mean_variance_axis and _incremental_mean_variance_axis if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.with_std: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_, self.var_ = mean_variance_axis(X, axis=0) self.n_samples_seen_ = X.shape[0] # Next passes else: self.mean_, self.var_, self.n_samples_seen_ = \ incr_mean_variance_axis(X, axis=0, last_mean=self.mean_, last_var=self.var_, last_n=self.n_samples_seen_) else: self.mean_ = None self.var_ = None else: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_ = .0 self.n_samples_seen_ = 0 if self.with_std: self.var_ = .0 else: self.var_ = None self.mean_, self.var_, self.n_samples_seen_ = \ _incremental_mean_and_var(X, self.mean_, self.var_, self.n_samples_seen_) if self.with_std: self.scale_ = _handle_zeros_in_scale(numpy.sqrt(self.var_)) ** self.scale_power else: self.scale_ = None return self
def partial_fit(self, X, y=None)
Performs online computation of mean and standard deviation on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. The algorithm for incremental mean and std is given in Equation 1.5a,b in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms for computing the sample variance: Analysis and recommendations." The American Statistician 37.3 (1983): 242-247 :param X: Data matrix to scale. :type X: numpy.ndarray, shape [n_samples, n_features] :param y: Passthrough for Scikit-learn ``Pipeline`` compatibility. :type y: None :return: Fitted object. :rtype: pyChemometrics.ChemometricsScaler
2.317875
2.362975
0.980914
check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.scale_ is not None: inplace_column_scale(X, 1 / self.scale_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.scale_ return X
def transform(self, X, y=None, copy=None)
Perform standardization by centering and scaling using the parameters. :param X: Data matrix to scale. :type X: numpy.ndarray, shape [n_samples, n_features] :param y: Passthrough for scikit-learn ``Pipeline`` compatibility. :type y: None :param bool copy: Copy the X matrix. :return: Scaled version of the X data matrix. :rtype: numpy.ndarray, shape [n_samples, n_features]
2.231431
2.273724
0.981399
check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.scale_ is not None: inplace_column_scale(X, self.scale_) else: X = numpy.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.scale_ if self.with_mean: X += self.mean_ return X
def inverse_transform(self, X, copy=None)
Scale back the data to the original representation. :param X: Scaled data matrix. :type X: numpy.ndarray, shape [n_samples, n_features] :param bool copy: Copy the X data matrix. :return: X data matrix with the scaling operation reverted. :rtype: numpy.ndarray, shape [n_samples, n_features]
2.609232
2.722942
0.95824
# TODO check with matlab and simca try: if block == 'X': return np.dot(self.scores_t, np.dot(np.linalg.inv(np.dot(self.scores_t.T, self.scores_t), self.scores_t.T))) elif block == 'Y': return np.dot(self.scores_u, np.dot(np.linalg.inv(np.dot(self.scores_u.T, self.scores_u), self.scores_u.T))) else: raise ValueError except ValueError as verr: raise ValueError('block option must be either X or Y')
def leverages(self, block='X')
Calculate the leverages for each observation :return: :rtype:
3.042248
2.979678
1.020999
if isinstance(obj, list): return [_recurse_replace(x, key, new_key, sub, remove) for x in obj] if isinstance(obj, dict): for k, v in list(obj.items()): if k == key and v in sub: obj[new_key] = sub[v] if remove: del obj[key] else: obj[k] = _recurse_replace(v, key, new_key, sub, remove) return obj
def _recurse_replace(obj, key, new_key, sub, remove)
Recursive helper for `replace_by_key`
1.571859
1.585512
0.991389
if not new_key: new_key = key remove = False orig = pif.as_dictionary() new = _recurse_replace(orig, to_camel_case(key), to_camel_case(new_key), subs, remove) return pypif.pif.loads(json.dumps(new))
def replace_by_key(pif, key, subs, new_key=None, remove=False)
Replace values that match a key Deeply traverses the pif object, looking for `key` and replacing values in accordance with `subs`. If `new_key` is set, the replaced values are assigned to that key. If `remove` is `True`, the old `key` pairs are removed.
5.831777
6.064272
0.961661
warn("This method has been deprecated in favor of get_property_by_name") return next((x for x in pif.properties if x.name == name), None)
def get_propety_by_name(pif, name)
Get a property by name
5.160807
4.582904
1.1261
return next((x for x in pif.properties if x.name == name), None)
def get_property_by_name(pif, name)
Get a property by name
4.481555
4.237411
1.057616
if key in ambig: return if key in unambig and value != unambig[key]: ambig.add(key) del unambig[key] return unambig[key] = value return
def new_keypair(key, value, ambig, unambig)
Check new keypair against existing unambiguous dict :param key: of pair :param value: of pair :param ambig: set of keys with ambig decoding :param unambig: set of keys with unambig decoding :return:
3.184354
3.128762
1.017768
for k in child_ambig: ambig.add(k) if k in unambig: del unambig[k] for k, v in child_unambig.items(): new_keypair(k, v, ambig, unambig) return
def add_child_ambig(child_ambig, child_unambig, ambig, unambig)
Add information about decodings of a child object :param child_ambig: ambiguous set from child :param child_unambig: unambiguous set from child :param ambig: set of keys storing ambig decodings :param unambig: dictionary storing unambiguous decodings :return:
3.021266
3.355937
0.900275
if 'CITRINATION_API_KEY' not in environ: raise ValueError("'CITRINATION_API_KEY' is not set as an environment variable") if not site: site = environ.get("CITRINATION_SITE", "https://citrination.com") return CitrinationClient(environ['CITRINATION_API_KEY'], site)
def get_client(site=None)
Get a citrination client
2.767723
2.163822
1.27909
if not uids: uids = [str(hash(dumps(x))) for x in pifs] for pif, uid in zip(pifs, uids): pif.uid = uid return pifs
def set_uids(pifs, uids=None)
Set the uids in a PIF, explicitly if the list of UIDs is passed in :param pifs: to set UIDs in :param uids: to set; defaults to a hash of the object :return:
2.89048
3.184905
0.907556
return "{site}/datasets/{dataset}/version/{version}/pif/{uid}".format( uid=pif.uid, version=version, dataset=dataset, site=site )
def get_url(pif, dataset, version=1, site="https://citrination.com")
Construct the URL of a PIF on a site :param pif: to construct URL for :param dataset: the pif will belong to :param version: of the PIF (default: 1) :param site: for the dataset (default: https://citrination.com) :return: the URL as a string
4.47513
4.438865
1.00817
if not isinstance(pif, ChemicalSystem): return pif if not pif.chemical_formula: return pif else: expanded_formula_no_special_char = _expand_formula_( pif.chemical_formula) element_array = _create_emprical_compositional_array_( expanded_formula_no_special_char) appended_e_array = _add_atomic_percents_(element_array) for e in appended_e_array: # Checks if a Composition element decribing that element already # exists. if _get_element_in_pif_composition_(pif, e["symbol"]): # If it exists, it removes the old Composition object, and # inserts a new one with ideal atomic percent added. in_pif = _get_element_in_pif_composition_(pif, e["symbol"]) comp = in_pif[0] pif.composition.pop(in_pif[1]) comp.idealAtomicPercent = e["atomic_percent"] pif.composition.append(comp) else: # If not, it creates a new Composition object with the element # and ideal atomic percent. comp = Composition() comp.element = e["symbol"] comp.idealAtomicPercent = e["atomic_percent"] pif.composition.append(comp) return pif
def calculate_ideal_atomic_percent(pif)
Calculates ideal atomic percents from a chemical formula string from a pif. Returns an appended pif with composition elements modified or added. :param pif: a ChemicalSystem pif :return: modified pif object
3.729179
3.359209
1.110136
if not isinstance(pif, ChemicalSystem): return pif if not pif.chemical_formula: return pif else: expanded_formula_no_special_char = _expand_formula_( pif.chemical_formula) element_array = _create_emprical_compositional_array_( expanded_formula_no_special_char) appended_e_array = _add_ideal_atomic_weights_(element_array) a_array_with_pcts = _add_ideal_weight_percent_(appended_e_array) for e in a_array_with_pcts: # Checks if a Composition element decribing that element already # exists. if _get_element_in_pif_composition_(pif, e["symbol"]): # If it exists, it removes the old Composition object, and # inserts a new one with ideal atomic weight percent added in_pif = _get_element_in_pif_composition_(pif, e["symbol"]) comp = in_pif[0] pif.composition.pop(in_pif[1]) comp.idealWeightPercent = e["weight_percent"] pif.composition.append(comp) else: # If not, it creates a new Composition object with the element # and ideal atomic weight percent. comp = Composition() comp.element = e["symbol"] comp.idealWeightPercent = e["weight_percent"] pif.composition.append(comp) return pif
def calculate_ideal_weight_percent(pif)
Calculates ideal atomic weight percents from a chemical formula string from a pif. Returns an appended pif with composition elements modified or added. :param pif: a ChemicalSystem pif :return: modified pif object
3.779255
3.427906
1.102497
formula_string = re.sub(r'[^A-Za-z0-9\(\)\[\]\·\.]+', '', formula_string) hydrate_pos = formula_string.find('·') if hydrate_pos >= 0: formula_string = _expand_hydrate_(hydrate_pos, formula_string) search_result = re.search( r'(?:[\(\[]([A-Za-z0-9]+)[\)\]](\d*))', formula_string) if search_result is None: return formula_string this_start = search_result.start() this_end = search_result.end() this_string = search_result.group() this_expansion_array = re.findall( r'(?:[\(\[]([A-Za-z0-9]+)[\)\]](\d*))', this_string) for a in this_expansion_array: if a[1] == "": a = (a[0], 1) parenth_expanded = "" multiplier = float(a[1]) element_array = re.findall('[A-Z][^A-Z]*', a[0]) for e in element_array: occurance_array = re.findall('[0-9][^0-9]*', e) if len(occurance_array) == 0: occurance_array.append(1) for o in occurance_array: symbol = re.findall('[A-Z][a-z]*', e) total_num = float(o) * multiplier if total_num.is_integer(): total_num = int(total_num) total_str = str(total_num) if total_str == "1": total_str = "" new_string = symbol[0] + total_str parenth_expanded += new_string formula_string = formula_string[0:this_start] + \ parenth_expanded + formula_string[this_end:] return _expand_formula_(formula_string)
def _expand_formula_(formula_string)
Accounts for the many ways a user may write a formula string, and returns an expanded chemical formula string. Assumptions: -The Chemical Formula string it is supplied is well-written, and has no hanging parethneses -The number of repeats occurs after the elemental symbol or ) ] character EXCEPT in the case of a hydrate where it is assumed to be in front of the first element -All hydrates explicitly use the · symbol -Only (, (,[, ], ., · are "important" symbols to intrepreting the string. -IONS ARE NOT HANDLED :param formula_string: a messy chemical formula string :return: a non-emperical but expanded formula string
2.459179
2.37036
1.037471
hydrate = formula_string[hydrate_pos + 1:] hydrate_string = "" multiplier = float(re.search(r'^[\d\.]+', hydrate).group()) element_array = re.findall('[A-Z][^A-Z]*', hydrate) for e in element_array: occurance_array = re.findall('[0-9][^0-9]*', e) if len(occurance_array) == 0: occurance_array.append(1) for o in occurance_array: symbol = re.findall('[A-Z][a-z]*', e) total_num = float(o) * multiplier if total_num.is_integer(): total_num = int(total_num) total_str = str(total_num) if total_str == "1": total_str = "" new_string = symbol[0] + total_str hydrate_string += new_string return formula_string[:hydrate_pos] + hydrate_string
def _expand_hydrate_(hydrate_pos, formula_string)
Handles the expansion of hydrate portions of a chemical formula, and expands out the coefficent to all elements :param hydrate_pos: the index in the formula_string of the · symbol :param formula_string: the unexpanded formula string :return: a formula string without the · character with the hydrate portion expanded out
2.377455
2.429917
0.97841
element_array = re.findall( '[A-Z][^A-Z]*', expanded_chemical_formaula_string) split_element_array = [] for s in element_array: m = re.match(r"([a-zA-Z]+)([0-9\.]*)", s, re.I) if m: items = m.groups() if items[1] == "": items = (items[0], 1) this_e = {"symbol": items[0], "occurances": float(items[1])} split_element_array.append(this_e) return split_element_array
def _create_compositional_array_(expanded_chemical_formaula_string)
Splits an expanded chemical formula string into an array of dictionaries containing information about each element :param expanded_chemical_formaula_string: a clean (not necessarily emperical, but without any special characters) chemical formula string, as returned by _expand_formula_() :return: an array of dictionaries
2.978487
3.020912
0.985956
condensed_array = [] for e in elemental_array: exists = False for k in condensed_array: if k["symbol"] == e["symbol"]: exists = True k["occurances"] += e["occurances"] break if not exists: condensed_array.append(e) return condensed_array
def _consolidate_elemental_array_(elemental_array)
Accounts for non-empirical chemical formulas by taking in the compositional array generated by _create_compositional_array_() and returning a consolidated array of dictionaries with no repeating elements :param elemental_array: an elemental array generated from _create_compositional_array_() :return: an array of element dictionaries
2.311465
2.328144
0.992836
for a in elemental_array: this_atomic_weight = elements_data[a["symbol"]]["atomic_weight"] a["weight"] = a["occurances"] * this_atomic_weight return elemental_array
def _add_ideal_atomic_weights_(elemental_array)
Uses elements.json to find the molar mass of the element in question, and then multiplies that by the occurances of the element. Adds the "weight" property to each of the dictionaries in elemental_array :param elemental_array: an array of dictionaries containing information about the elements in the system :return: the appended elemental_array
4.359716
3.570675
1.220978
t_mass = _calculate_total_mass_(elemental_array) for a in elemental_array: a["weight_percent"] = a["weight"] / t_mass * 100 return elemental_array
def _add_ideal_weight_percent_(elemental_array)
Adds the "weight_percent" property to each of the dictionaries in elemental_array :param elemental_array: an array of dictionaries containing information about the elements in the system :return: the appended elemental_array
3.056474
3.286702
0.929952
n_atoms = _calculate_n_atoms_(elemental_array) for e in elemental_array: e["atomic_percent"] = e["occurances"] / n_atoms * 100 return elemental_array
def _add_atomic_percents_(elemental_array)
Adds ideal atomic percents to a emperical compositional element array generated using _create_emprical_compositional_array_() :param elemental_array: an array of dictionaries containing information about the elements in the system :return: the elemental_array with the atomic percent of each element added
3.186079
3.257197
0.978166
if pif.composition is None: pif.composition = [] for i, c in enumerate(pif.composition): if c.element == elemental_symbol or c.element.lower( ) == elements_data[elemental_symbol]["name"].lower(): return [c, i] i += 1 return False
def _get_element_in_pif_composition_(pif, elemental_symbol)
If the element in question if in the composition array in the pif, it returns that Composition object and the position in the composition array otherwise it returns False :param pif: ChemicalSystem Pif in question :param elemental_symbol: string of the atomic symbol of the element in question :return: either False if not found in the composition or the Compositional object along with its index in the composition array in the pif
4.308942
3.5917
1.199694
name = Name() if "," in full_name: toks = full_name.split(",") name.family = toks[0] name.given = ",".join(toks[1:]).strip() else: toks = full_name.split() name.given = toks[0] name.family = " ".join(toks[1:]).strip() return name
def parse_name_string(full_name)
Parse a full name into a Name object :param full_name: e.g. "John Smith" or "Smith, John" :return: Name object
2.01458
2.029478
0.99266
name = Name() if "creatorName" in creator: name = parse_name_string(creator["creatorName"]) if "familyName" in creator: name.family = creator["familyName"] if "givenName" in creator: name.given = creator["givenName"] person = Person(name=name, tags=creator.get("affiliations")) return person
def creator_to_person(creator)
Parse the creator block in datacite into a Person :param creator: block in datacite format :return: Person
2.960871
2.965219
0.998533
ref = Reference() if dc.get('identifier', {}).get('identifierType') == "DOI": ref.doi = dc.get('identifier', {}).get('identifier') ref.title = dc.get('title') ref.publisher = dc.get('publisher') ref.year = dc.get('publicationYear') ref.authors = [creator_to_person(x).name for x in dc.get('creators', [])] or None return ref
def datacite_to_pif_reference(dc)
Parse a top-level datacite dictionary into a Reference :param dc: dictionary containing datacite metadata :return: Reference corresponding to that datacite entry
2.694265
2.774379
0.971123
if not query and not dataset_id: raise ValueError("Either query or dataset_id must be specified") if query and dataset_id: raise ValueError("Both query and dataset_id were specified; pick one or the other.") if not query: query = PifSystemReturningQuery( query=DataQuery( dataset=DatasetQuery( id=Filter(equal=dataset_id) ) ), size = 10000 # Don't pull down all the results by default ) client = get_client() if not mdf_acl: raise ValueError('Access controls (mdf_acl) must be specified. Use ["public"] for public access') pif_result = client.pif_search(query) if len(pif_result.hits) == 0: return [] example_uid = pif_result.hits[0].system.uid dataset_query = DatasetReturningQuery( query=DataQuery( system=PifSystemQuery( uid=Filter(equal=example_uid) ) ), size = 1 # we only expect one dataset to hit ) dataset_result = client.dataset_search(dataset_query) records = [] for hit in pif_result.hits: records.append(pif_to_mdf_record(hit.system, dataset_result.hits[0], mdf_acl)) return records
def query_to_mdf_records(query=None, dataset_id=None, mdf_acl=None)
Evaluate a query and return a list of MDF records If a datasetID is specified by there is no query, a simple whole dataset query is formed for the user
3.497529
3.545161
0.986564
res = {} res["mdf"] = _to_meta_data(pif_obj, dataset_hit, mdf_acl) res[res["mdf"]["source_name"]] = _to_user_defined(pif_obj) return dumps(res)
def pif_to_mdf_record(pif_obj, dataset_hit, mdf_acl)
Convert a PIF into partial MDF record
4.83288
4.696744
1.028985
pif = pif_obj.as_dictionary() dataset = dataset_hit.as_dictionary() mdf = {} try: if pif.get("names"): mdf["title"] = pif["names"][0] else: mdf["title"] = "Citrine PIF " + str(pif["uid"]) if pif.get("chemicalFormula"): mdf["composition"] = pif["chemicalFormula"] elif pif.get("composition"): mdf["composition"] = ''.join([comp["element"] for comp in pif["composition"] if comp["element"]]) if not mdf["composition"]: mdf.pop("composition") mdf["acl"] = mdf_acl mdf["source_name"] = _construct_new_key(dataset["name"]) if pif.get("contacts"): mdf["data_contact"] = [] for contact in pif["contacts"]: data_c = { "given_name": contact["name"]["given"], #REQ "family_name": contact["name"]["family"] #REQ } if contact.get("email"): data_c["email"] = contact.get("email", "") if contact.get("orcid"): data_c["orcid"] = contact.get("orcid", "") mdf["data_contact"].append(data_c) if not mdf["data_contact"]: mdf.pop("data_contact") mdf["data_contributor"] = [{}] if "owner" in dataset: name = dataset["owner"].split() contributor = { "given_name": name[0], "family_name": name[1], "email": dataset["email"] } mdf["data_contributor"] = [contributor] mdf["links"] = { "landing_page": "https://citrination.com/datasets/{}".format(dataset["id"]), "publication": [] } if pif.get("references"): mdf["author"] = [] mdf["citation"] = [] for ref in pif["references"]: if ref.get("doi"): mdf["citation"].append(ref["doi"]) #TODO: Make actual citation mdf["links"]["publication"].append(ref["doi"]) if ref.get("authors"): for author in ref["authors"]: if author.get("given") and author.get("family"): mdf["author"].append({ "given_name": author["given"], "family_name": author["family"] }) # Remove fields if blank if not mdf["author"]: mdf.pop("author") if not mdf["citation"]: mdf.pop("citation") if not mdf["links"]["publication"]: mdf["links"].pop("publication") if pif.get("licenses", [{}])[0].get("url"): mdf["license"] = pif["licenses"][0]["url"] if pif.get("tags"): mdf["tags"] = pif["tags"] # If required MDF metadata is missing from PIF, abort except KeyError as e: print("Error: Required MDF metadata", str(e), "not found in PIF", pif["uid"]) return None return mdf
def _to_meta_data(pif_obj, dataset_hit, mdf_acl)
Convert the meta-data from the PIF into MDF
2.531132
2.468193
1.0255
res = {} # make a read view to flatten the hierarchy rv = ReadView(pif_obj) # Iterate over the keys in the read view for k in rv.keys(): name, value = _extract_key_value(rv[k].raw) # add any objects that can be extracted if name and value is not None: res[name] = value # Grab interesting values not in the ReadView pif = pif_obj.as_dictionary() elements = {} if pif.get("composition"): for comp in pif["composition"]: if comp.get("actualAtomicPercent"): elements[comp["element"]] = float(comp["actualAtomicPercent"]["value"]) elif comp.get("actualWeightPercent"): elements[comp["element"]] = float(comp["actualWeightPercent"]["value"]) if elements: res["elemental_percent"] = elements elif pif.get("chemicalFormula"): symbol = "" num = "" # Chemical formulae are comprised of letters, numbers, and potentially characters we don't care about for char in pif["chemicalFormula"]: # Uppercase char indicates beginning of new symbol if char.isupper(): # If there is already a symbol in holding, process it if symbol: try: elements[symbol] = int(num) # If num is a float, raises ValueError except ValueError: elements[symbol] = float(num) if num else 1 symbol = "" num = "" symbol += char # Lowercase chars or digits are continuations of a symbol elif char.islower(): symbol += char elif char.isdigit(): num += char elif char == ".": num += char # All other chars are not useful if elements: res["elemental_proportion"] = elements return res
def _to_user_defined(pif_obj)
Read the systems in the PIF to populate the user-defined portion
4.846842
4.812161
1.007207
to_replace = ["/", "\\", "*", "^", "#", " ", "\n", "\t", ",", ".", ")", "(", "'", "`", "-"] to_remove = ["$", "{", "}"] cat = name if units: cat = "_".join([name, units]) for c in to_replace: cat = cat.replace(c, "_") for c in to_remove: cat = cat.replace(c, "") cat = re.sub('_+','_', cat) return cat
def _construct_new_key(name, units=None)
Construct an MDF safe key from the name and units
3.995826
3.827089
1.04409
key = None; value = None # Parse a Value object, which includes Properties if isinstance(obj, Value): key = _construct_new_key(obj.name, obj.units) value = [] if obj.scalars: value = [(val.value if isinstance(val, Scalar) else val) for val in obj.scalars] elif obj.vectors and len(obj.vectors) == 1: value = [(val.value if isinstance(val, Scalar) else val) for val in obj.vectors[0]] if len(value) == 1: value = value[0] elif len(value) == 0: value = None # If there is a process step, pul out its name as the value # TODO: resolve duplicates if isinstance(obj, ProcessStep): key = "Processing" value = obj.name return key, value
def _extract_key_value(obj)
Extract the value from the object and make a descriptive key
4.150035
4.095294
1.013367
from polysh.control_commands_helpers import handle_control_command data = the_stdin_thread.input_buffer.get() remote_dispatcher.log(b'> ' + data) if data.startswith(b':'): try: handle_control_command(data[1:-1].decode()) except UnicodeDecodeError as e: console_output(b'Could not decode command.') return if data.startswith(b'!'): try: retcode = subprocess.call(data[1:], shell=True) except OSError as e: if e.errno == errno.EINTR: console_output(b'Child was interrupted\n') retcode = 0 else: raise if retcode > 128 and retcode <= 192: retcode = 128 - retcode if retcode > 0: console_output('Child returned {:d}\n'.format(retcode).encode()) elif retcode < 0: console_output('Child was terminated by signal {:d}\n'.format( -retcode).encode()) return for r in dispatchers.all_instances(): try: r.dispatch_command(data) except asyncore.ExitNow as e: raise e except Exception as msg: raise msg console_output('{} for {}, disconnecting\n'.format( str(msg), r.display_name).encode()) r.disconnect() else: if r.enabled and r.state is remote_dispatcher.STATE_IDLE: r.change_state(remote_dispatcher.STATE_RUNNING)
def process_input_buffer()
Send the content of the input buffer to all remote processes, this must be called in the main thread
4.095895
4.014865
1.020182
the_stdin_thread.socket_write.send(c) while True: try: the_stdin_thread.socket_write.recv(1) except socket.error as e: if e.errno != errno.EINTR: raise else: break
def write_main_socket(c)
Synchronous write to the main socket, wait for ACK
3.723359
3.534795
1.053345
if cached_result is None: try: tasks = os.listdir('/proc/self/task') except OSError as e: if e.errno != errno.ENOENT: raise cached_result = os.getpid() else: tasks.remove(str(os.getpid())) assert len(tasks) == 1 cached_result = int(tasks[0]) return cached_result
def get_stdin_pid(cached_result=None)
Try to get the PID of the stdin thread, otherwise get the whole process ID
2.454861
2.40247
1.021807
dupped_stdin = os.dup(0) # Backup the stdin fd assert not the_stdin_thread.interrupt_asked # Sanity check the_stdin_thread.interrupt_asked = True # Not user triggered os.lseek(tempfile_fd, 0, 0) # Rewind in the temp file os.dup2(tempfile_fd, 0) # This will make raw_input() return pid = get_stdin_pid() os.kill(pid, signal.SIGWINCH) # Try harder to wake up raw_input() the_stdin_thread.out_of_raw_input.wait() # Wait for this return the_stdin_thread.interrupt_asked = False # Restore sanity os.dup2(dupped_stdin, 0) # Restore stdin os.close(dupped_stdin)
def interrupt_stdin_thread()
The stdin thread may be in raw_input(), get out of it
4.975932
4.707263
1.057076