_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q10800
Base.to_dict
train
def to_dict(self): """ Convert attributes and properties to a dict, so that it can be serialized. """ return {k: getattr(self, k) for k in filter( lambda k: not k.startswith('_') and k != 'to_dict', dir(self))}
python
{ "resource": "" }
q10801
_HtmlHeaderNode.to_dict
train
def to_dict(self): """Convert self to a dict object for serialization.""" return { 'level': self.level, 'id': self.id, 'text': self.text, 'inner_html': self.inner_html, 'children': [child.to_dict() for child in self.children] }
python
{ "resource": "" }
q10802
HtmlTocParser.toc
train
def toc(self, depth=6, lowest_level=6): """ Get table of content of currently fed HTML string. :param depth: the depth of TOC :param lowest_level: the allowed lowest level of header tag :return: a list representing the TOC """ depth = min(max(depth, 0), 6) depth = 6 if depth == 0 else depth lowest_level = min(max(lowest_level, 1), 6) toc = self._root.to_dict()['children'] def traverse(curr_toc, dep, lowest_lvl, curr_depth=1): if curr_depth > dep: # clear all items of this depth and exit the recursion curr_toc.clear() return items_to_remove = [] for item in curr_toc: if item['level'] > lowest_lvl: # record item with low header level, for removing it later items_to_remove.append(item) else: traverse(item['children'], dep, lowest_lvl, curr_depth + 1) [curr_toc.remove(item) for item in items_to_remove] traverse(toc, depth, lowest_level) return toc
python
{ "resource": "" }
q10803
HtmlTocParser.toc_html
train
def toc_html(self, depth=6, lowest_level=6): """ Get TOC of currently fed HTML string in form of HTML string. :param depth: the depth of TOC :param lowest_level: the allowed lowest level of header tag :return: an HTML string """ toc = self.toc(depth=depth, lowest_level=lowest_level) if not toc: return '' def map_toc_list(toc_list): result = '' if toc_list: result += '<ul>\n' result += ''.join( map(lambda x: '<li>' '<a href="#{}">{}</a>{}' '</li>\n'.format( x['id'], x['inner_html'], map_toc_list(x['children'])), toc_list) ) result += '</ul>' return result return map_toc_list(toc)
python
{ "resource": "" }
q10804
HtmlTocParser._get_level
train
def _get_level(tag): """ Match the header level in the given tag name, or None if it's not a header tag. """ m = re.match(r'^h([123456])$', tag, flags=re.IGNORECASE) if not m: return None return int(m.group(1))
python
{ "resource": "" }
q10805
MVisionProcess.requiredGPU_MB
train
def requiredGPU_MB(self, n): """Required GPU memory in MBytes """ from darknet.core import darknet_with_cuda if (darknet_with_cuda()): # its using cuda free = getFreeGPU_MB() print("Yolo: requiredGPU_MB: required, free", n, free) if (free == -1): # could not detect .. return True return (free>=n) else: return True
python
{ "resource": "" }
q10806
Inspect.typename
train
def typename(self): ''' get the type of val there are multiple places where we want to know if val is an object, or a string, or whatever, this method allows us to find out that information since -- 7-10-12 val -- mixed -- the value to check return -- string -- the type ''' t = 'DEFAULT' # http://docs.python.org/2/library/types.html # func_types = ( # types.FunctionType, # types.BuiltinFunctionType, # types.MethodType, # types.UnboundMethodType, # types.BuiltinFunctionType, # types.BuiltinMethodType, # classmethod # ) if self.is_primitive(): t = 'DEFAULT' elif self.is_dict(): t = 'DICT' elif self.is_list(): t = 'LIST' elif self.is_array(): t = 'ARRAY' elif self.is_tuple(): t = 'TUPLE' elif self.is_type(): t = 'TYPE' elif self.is_binary(): t = 'BINARY' elif self.is_str(): t = 'STRING' elif self.is_exception(): t = 'EXCEPTION' elif self.is_module(): # this has to go before the object check since a module will pass the object tests t = 'MODULE' elif self.is_callable(): t = 'FUNCTION' # not doing this one since it can cause the class instance to do unexpected # things just to print it out #elif isinstance(val, property): # uses the @property decorator and the like #t = 'PROPERTY' elif self.is_dict_proxy(): # maybe we have a dict proxy? t = 'DICT_PROXY' elif self.is_generator(): t = 'GENERATOR' elif self.is_set(): t = 'SET' elif self.is_object(): t = 'OBJECT' # elif isinstance(val, func_types) and hasattr(val, '__call__'): # # this has to go after object because lots of times objects can be classified as functions # # http://stackoverflow.com/questions/624926/ # t = 'FUNCTION' elif self.is_regex(): t = 'REGEX' else: t = 'DEFAULT' return t
python
{ "resource": "" }
q10807
Inspect.is_primitive
train
def is_primitive(self): """is the value a built-in type?""" if is_py2: return isinstance( self.val, ( types.NoneType, types.BooleanType, types.IntType, types.LongType, types.FloatType ) ) else: return isinstance( self.val, ( type(None), bool, int, float ) )
python
{ "resource": "" }
q10808
Value._str_iterator
train
def _str_iterator(self, iterator, name_callback=None, prefix="\n", left_paren='[', right_paren=']', depth=0): ''' turn an iteratable value into a string representation iterator -- iterator -- the value to be iterated through name_callback -- callback -- if not None, a function that will take the key of each iteration prefix -- string -- what will be prepended to the generated value left_paren -- string -- what will open the generated value right_paren -- string -- what will close the generated value depth -- integer -- how deep into recursion we are return -- string ''' indent = 1 if depth > 0 else 0 s = [] s.append('{}{}'.format(prefix, self._add_indent(left_paren, indent))) s_body = [] for k, v in iterator: k = k if name_callback is None else name_callback(k) v = Value(v, depth+1) try: # TODO -- right here we should check some flag or something to # see if lists should render objects if k is None: s_body.append("{}".format(v)) else: s_body.append("{}: {}".format(k, v)) except RuntimeError as e: # I've never gotten this to work s_body.append("{}: ... Recursion error ...".format(k)) except UnicodeError as e: print(v.val) print(type(v.val)) s_body = ",\n".join(s_body) s_body = self._add_indent(s_body, indent + 1) s.append(s_body) s.append("{}".format(self._add_indent(right_paren, indent))) return "\n".join(s)
python
{ "resource": "" }
q10809
ObjectValue._get_src_file
train
def _get_src_file(self, val, default='Unknown'): ''' return the source file path since -- 7-19-12 val -- mixed -- the value whose path you want return -- string -- the path, or something like 'Unknown' if you can't find the path ''' path = default try: # http://stackoverflow.com/questions/6761337/inspect-getfile-vs-inspect-getsourcefile # first try and get the actual source file source_file = inspect.getsourcefile(val) if not source_file: # get the raw file since val doesn't have a source file (could be a .pyc or .so file) source_file = inspect.getfile(val) if source_file: path = os.path.realpath(source_file) except TypeError as e: path = default return path
python
{ "resource": "" }
q10810
handle_decode_replace
train
def handle_decode_replace(e): """this handles replacing bad characters when printing out http://www.programcreek.com/python/example/3643/codecs.register_error http://bioportal.weizmann.ac.il/course/python/PyMOTW/PyMOTW/docs/codecs/index.html https://pymotw.com/2/codecs/ """ count = e.end - e.start #return "." * count, e.end global ENCODING_REPLACE_CHAR return ENCODING_REPLACE_CHAR * count, e.end
python
{ "resource": "" }
q10811
MovementDetector.reset
train
def reset(self): """Reset analyzer state """ self.prevframe = None self.wasmoving = False self.t0 = 0 self.ismoving = False
python
{ "resource": "" }
q10812
rule
train
def rule(rules, strict_slashes=False, api_func=None, *args, **kwargs): """ Add a API route to the 'api' blueprint. :param rules: rule string or string list :param strict_slashes: same to Blueprint.route, but default value is False :param api_func: a function that returns a JSON serializable object or a Flask Response, or raises ApiException :param args: other args that should be passed to Blueprint.route :param kwargs: other kwargs that should be passed to Blueprint.route :return: """ return url_rule(api_blueprint, rules, strict_slashes=strict_slashes, view_func=json_api(api_func) if api_func else None, *args, **kwargs)
python
{ "resource": "" }
q10813
GPUHandler.findXScreens
train
def findXScreens(self): qapp = QtCore.QCoreApplication.instance() if not qapp: # QApplication has not been started return screens = qapp.screens() """ let's find out which screens are virtual screen, siblings: One big virtual desktop: A [A, B, C] B [A, B, C] C [A, B, C] A & B in one xscreen, C in another: A [A, B] B [A, B] C [C] """ virtual_screens = set() for screen in screens: # if screen has been deemed as "virtual", don't check its siblings if (screen not in virtual_screens): siblings = screen.virtualSiblings() # remove the current screen under scrutiny from the siblings # list virtual_screens.update(set(siblings).difference(set([screen]))) # .. the ones left over are virtual # print("GPUHandler: findXScreens: virtual screens",virtual_screens) true_screens = list(set(screens) - virtual_screens) # sort'em for screen in true_screens: self.true_screens.insert(screens.index(screen), screen) print("GPUHandler: findXScreens: true screens:", self.true_screens)
python
{ "resource": "" }
q10814
MyGui.generateMethods
train
def generateMethods(self): """Generate some member functions """ for i in range(1, 5): # adds member function grid_ixi_slot(self) self.make_grid_slot(i, i) for cl in self.mvision_classes: self.make_mvision_slot(cl)
python
{ "resource": "" }
q10815
MyGui.QCapsulate
train
def QCapsulate(self, widget, name, blocking = False, nude = False): """Helper function that encapsulates QWidget into a QMainWindow """ class QuickWindow(QtWidgets.QMainWindow): class Signals(QtCore.QObject): close = QtCore.Signal() show = QtCore.Signal() def __init__(self, blocking = False, parent = None, nude = False): super().__init__(parent) self.propagate = True # send signals or not self.setStyleSheet(style.main_gui) if (blocking): self.setWindowModality(QtCore.Qt.ApplicationModal) if (nude): # http://doc.qt.io/qt-5/qt.html#WindowType-enum # TODO: create a widget for a proper splashscreen (omitting X11 and centering manually) # self.setWindowFlags(QtCore.Qt.Popup) # Qt 5.9+ : setFlags() # self.setWindowFlags(QtCore.Qt.SplashScreen | QtCore.Qt.WindowStaysOnTopHint) self.setWindowFlags(QtCore.Qt.Dialog) self.signals = self.Signals() def closeEvent(self, e): if (self.propagate): self.signals.close.emit() e.accept() def showEvent(self, e): if (self.propagate): self.signals.show.emit() e.accept() def setPropagate(self): self.propagate = True def unSetPropagate(self): self.propagate = False win = QuickWindow(blocking = blocking, nude = nude) win.setCentralWidget(widget) win.setLayout(QtWidgets.QHBoxLayout()) win.setWindowTitle(name) return win
python
{ "resource": "" }
q10816
MyGui.make_grid_slot
train
def make_grid_slot(self, n, m): """Create a n x m video grid, show it and add it to the list of video containers """ def slot_func(): cont = container.VideoContainerNxM(gpu_handler=self.gpu_handler, filterchain_group=self.filterchain_group, n_dim=n, m_dim=m) cont.signals.closing.connect(self.rem_container_slot) self.containers.append(cont) setattr(self, "grid_%ix%i_slot" % (n, m), slot_func)
python
{ "resource": "" }
q10817
main_inject
train
def main_inject(args): """ mapped to pout.inject on the command line, makes it easy to make pout global without having to actually import it in your python environment .. since:: 2018-08-13 :param args: Namespace, the parsed CLI arguments passed into the application :returns: int, the return code of the CLI """ ret = 0 try: filepath = SiteCustomizeFile() if filepath.is_injected(): logger.info("Pout has already been injected into {}".format(filepath)) else: if filepath.inject(): logger.info("Injected pout into {}".format(filepath)) else: logger.info("Failed to inject pout into {}".format(filepath)) except IOError as e: ret = 1 logger.info(str(e)) return ret
python
{ "resource": "" }
q10818
main_info
train
def main_info(args): """Just prints out info about the pout installation .. since:: 2018-08-20 :param args: Namespace, the parsed CLI arguments passed into the application :returns: int, the return code of the CLI """ if args.site_packages: logger.info(SitePackagesDir()) else: logger.info("Python executable: {}".format(sys.executable)) logger.info("Python version: {}".format(platform.python_version())) logger.info("Python site-packages: {}".format(SitePackagesDir())) logger.info("Python sitecustomize: {}".format(SiteCustomizeFile())) # https://stackoverflow.com/questions/4152963/get-the-name-of-current-script-with-python #logger.info("Pout executable: {}".format(subprocess.check_output(["which", "pout"]))) logger.info("Pout executable: {}".format(os.path.abspath(os.path.expanduser(str(sys.argv[0]))))) logger.info("Pout version: {}".format(pout.__version__)) filepath = SiteCustomizeFile() logger.info("Pout injected: {}".format(filepath.is_injected()))
python
{ "resource": "" }
q10819
catch
train
def catch(func, *args, **kwargs): """ Call the supplied function with the supplied arguments, catching and returning any exception that it throws. Arguments: func: the function to run. *args: positional arguments to pass into the function. **kwargs: keyword arguments to pass into the function. Returns: If the function throws an exception, return the exception. If the function does not throw an exception, return None. """ try: func(*args, **kwargs) except Exception as e: return e
python
{ "resource": "" }
q10820
time
train
def time(func, *args, **kwargs): """ Call the supplied function with the supplied arguments, and return the total execution time as a float in seconds. The precision of the returned value depends on the precision of `time.time()` on your platform. Arguments: func: the function to run. *args: positional arguments to pass into the function. **kwargs: keyword arguments to pass into the function. Returns: Execution time of the function as a float in seconds. """ start_time = time_module.time() func(*args, **kwargs) end_time = time_module.time() return end_time - start_time
python
{ "resource": "" }
q10821
LmomDistrMixin.lmom_fit
train
def lmom_fit(self, data=[], lmom_ratios=[]): """ Fit the distribution function to the given data or given L-moments. :param data: Data to use in calculating the distribution parameters :type data: array_like :param lmom_ratios: L-moments (ratios) l1, l2, t3, t4, .. to use in calculating the distribution parameters :type lmom_ratios: array_like :returns: Distribution parameters in `scipy` order, e.g. scale, loc, shape :rtype: :class:`OrderedDict` """ n_min = self.numargs + 2 if len(data) > 0: if len(data) <= n_min: raise ValueError("At least {} data points must be provided.".format(n_min)) lmom_ratios = lm.lmom_ratios(data, nmom=n_min) elif not lmom_ratios: raise Exception("Either `data` or `lmom_ratios` must be provided.") elif len(lmom_ratios) < n_min: raise ValueError("At least {} number of L-moments must be provided.".format(n_min)) return self._lmom_fit(lmom_ratios)
python
{ "resource": "" }
q10822
run
train
def run(): """ Run all the test classes in the main module. Returns: exit code as an integer. The default behaviour (which may be overridden by plugins) is to return a 0 exit code if the test run succeeded, and 1 if it failed. """ plugin_list = load_plugins() module = sys.modules['__main__'] plugin_list.insert(0, ObjectSupplier(module)) return run_with_plugins(plugin_list)
python
{ "resource": "" }
q10823
run_with_plugins
train
def run_with_plugins(plugin_list): """ Carry out a test run with the supplied list of plugin instances. The plugins are expected to identify the object to run. Parameters: plugin_list: a list of plugin instances (objects which implement some subset of PluginInterface) Returns: exit code as an integer. The default behaviour (which may be overridden by plugins) is to return a 0 exit code if the test run succeeded, and 1 if it failed. """ composite = core.PluginComposite(plugin_list) to_run = composite.get_object_to_run() test_run = core.TestRun(to_run, composite) test_run.run() return composite.get_exit_code()
python
{ "resource": "" }
q10824
lmom_ratios
train
def lmom_ratios(data, nmom=5): """ Estimate `nmom` number of L-moments from a sample `data`. :param data: Sequence of (sample) data :type data: list or array-like sequence :param nmom: number of L-moments to estimate :type nmom: int :return: L-moment ratios like this: l1, l2, t3, t4, t5, .. . As in: items 3 and higher are L-moment ratios. :rtype: list """ if nmom <= 5: return _samlmusmall(data, nmom) else: return _samlmularge(data, nmom)
python
{ "resource": "" }
q10825
POEditorAPI._apiv1_run
train
def _apiv1_run(self, action, headers=None, **kwargs): """ Kept for backwards compatibility of this client See "self.clear_reference_language" """ warnings.warn( "POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2", DeprecationWarning, stacklevel=2 ) url = "https://poeditor.com/api/" payload = kwargs payload.update({'action': action, 'api_token': self.api_token}) return self._make_request(url, payload, headers)
python
{ "resource": "" }
q10826
POEditorAPI.list_projects
train
def list_projects(self): """ Returns the list of projects owned by user. """ data = self._run( url_path="projects/list" ) projects = data['result'].get('projects', []) return [self._project_formatter(item) for item in projects]
python
{ "resource": "" }
q10827
POEditorAPI.view_project_details
train
def view_project_details(self, project_id): """ Returns project's details. """ data = self._run( url_path="projects/view", id=project_id ) return self._project_formatter(data['result']['project'])
python
{ "resource": "" }
q10828
POEditorAPI.add_language_to_project
train
def add_language_to_project(self, project_id, language_code): """ Adds a new language to project """ self._run( url_path="languages/add", id=project_id, language=language_code ) return True
python
{ "resource": "" }
q10829
POEditorAPI.update_terms
train
def update_terms(self, project_id, data, fuzzy_trigger=None): """ Updates project terms. Lets you change the text, context, reference, plural and tags. >>> data = [ { "term": "Add new list", "context": "", "new_term": "Save list", "new_context": "", "reference": "\/projects", "plural": "", "comment": "", "tags": [ "first_tag", "second_tag" ] }, { "term": "Display list", "context": "", "new_term": "Show list", "new_context": "" } ] """ kwargs = {} if fuzzy_trigger is not None: kwargs['fuzzy_trigger'] = fuzzy_trigger data = self._run( url_path="terms/update", id=project_id, data=json.dumps(data), **kwargs ) return data['result']['terms']
python
{ "resource": "" }
q10830
POEditorAPI.update_terms_translations
train
def update_terms_translations(self, project_id, file_path=None, language_code=None, overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None): """ Updates terms translations overwrite: set it to True if you want to overwrite translations sync_terms: set it to True if you want to sync your terms (terms that are not found in the uploaded file will be deleted from project and the new ones added). Ignored if updating = translations tags: Add tags to the project terms; available when updating terms or terms_translations; you can use the following keys: "all" - for the all the imported terms, "new" - for the terms which aren't already in the project, "obsolete" - for the terms which are in the project but not in the imported file and "overwritten_translations" - for the terms for which translations change fuzzy_trigger: set it to True to mark corresponding translations from the other languages as fuzzy for the updated values """ return self._upload( project_id=project_id, updating=self.UPDATING_TERMS_TRANSLATIONS, file_path=file_path, language_code=language_code, overwrite=overwrite, sync_terms=sync_terms, tags=tags, fuzzy_trigger=fuzzy_trigger )
python
{ "resource": "" }
q10831
POEditorAPI.list_contributors
train
def list_contributors(self, project_id=None, language_code=None): """ Returns the list of contributors """ data = self._run( url_path="contributors/list", id=project_id, language=language_code ) return data['result'].get('contributors', [])
python
{ "resource": "" }
q10832
POEditorAPI.remove_contributor
train
def remove_contributor(self, project_id, email, language): """ Removes a contributor """ self._run( url_path="contributors/remove", id=project_id, email=email, language=language ) return True
python
{ "resource": "" }
q10833
parse_stations
train
def parse_stations(html): """ Strips JS code, loads JSON """ html = html.replace('SLs.sls=', '').replace(';SLs.showSuggestion();', '') html = json.loads(html) return html['suggestions']
python
{ "resource": "" }
q10834
parse_delay
train
def parse_delay(data): """ Prase the delay """ # parse data from the details view rsp = requests.get(data['details']) soup = BeautifulSoup(rsp.text, "html.parser") # get departure delay delay_departure_raw = soup.find('div', class_="routeStart").find('span', class_=["delay", "delayOnTime"]) if delay_departure_raw: delay_departure = calculate_delay(data['departure'], delay_departure_raw.text) else: delay_departure = 0 # get arrival delay delay_arrival_raw = soup.find('div', class_=["routeEnd","routeEndAdditional"]).find('span', class_=["delay", "delayOnTime"]) if delay_arrival_raw: delay_arrival = calculate_delay(data['arrival'], delay_arrival_raw.text) else: delay_arrival = 0 # save the parsed data if delay_departure + delay_arrival == 0: data['ontime'] = True else: data['ontime'] = False data['delay'] = { 'delay_departure': int(delay_departure), 'delay_arrival': int(delay_arrival) } # TODO: this should not be hardcoded! data['canceled'] = False return data
python
{ "resource": "" }
q10835
calculate_delay
train
def calculate_delay(original, delay): """ Calculate the delay """ original = datetime.strptime(original, '%H:%M') delayed = datetime.strptime(delay, '%H:%M') diff = delayed - original return diff.total_seconds() // 60
python
{ "resource": "" }
q10836
Schiene.stations
train
def stations(self, station, limit=10): """ Find stations for given queries Args: station (str): search query limit (int): limit number of results """ query = { 'start': 1, 'S': station + '?', 'REQ0JourneyStopsB': limit } rsp = requests.get('http://reiseauskunft.bahn.de/bin/ajax-getstop.exe/dn', params=query) return parse_stations(rsp.text)
python
{ "resource": "" }
q10837
Schiene.connections
train
def connections(self, origin, destination, dt=datetime.now(), only_direct=False): """ Find connections between two stations Args: origin (str): origin station destination (str): destination station dt (datetime): date and time for query only_direct (bool): only direct connections """ query = { 'S': origin, 'Z': destination, 'date': dt.strftime("%d.%m.%y"), 'time': dt.strftime("%H:%M"), 'start': 1, 'REQ0JourneyProduct_opt0': 1 if only_direct else 0 } rsp = requests.get('http://mobile.bahn.de/bin/mobil/query.exe/dox?', params=query) return parse_connections(rsp.text)
python
{ "resource": "" }
q10838
_scatter
train
def _scatter(sequence, n): """Scatters elements of ``sequence`` into ``n`` blocks.""" chunklen = int(math.ceil(float(len(sequence)) / float(n))) return [ sequence[ i*chunklen : (i+1)*chunklen ] for i in range(n) ]
python
{ "resource": "" }
q10839
SuperTaskQueue.purge
train
def purge(self): """Deletes all tasks in the queue.""" try: return self._api.purge() except AttributeError: while True: lst = self.list() if len(lst) == 0: break for task in lst: self.delete(task) self.wait() return self
python
{ "resource": "" }
q10840
TaskQueue.delete
train
def delete(self, task_id): """Deletes a task from a TaskQueue.""" if isinstance(task_id, RegisteredTask): task_id = task_id.id def cloud_delete(api): api.delete(task_id) if len(self._threads): self.put(cloud_delete) else: cloud_delete(self._api) return self
python
{ "resource": "" }
q10841
is_upload
train
def is_upload(action): """Checks if this should be a user upload :param action: :return: True if this is a file we intend to upload from the user """ return 'r' in action.type._mode and (action.default is None or getattr(action.default, 'name') not in (sys.stderr.name, sys.stdout.name))
python
{ "resource": "" }
q10842
ArgParseNode.to_django
train
def to_django(self): """ This is a debug function to see what equivalent django models are being generated """ exclude = {'name', 'model'} field_module = 'models' django_kwargs = {} if self.node_attrs['model'] == 'CharField': django_kwargs['max_length'] = 255 django_kwargs['blank'] = not self.node_attrs['required'] try: django_kwargs['default'] = self.node_attrs['value'] except KeyError: pass return u'{0} = {1}.{2}({3})'.format(self.node_attrs['name'], field_module, self.node_attrs['model'], ', '.join(['{0}={1}'.format(i,v) for i,v in six.iteritems(django_kwargs)]),)
python
{ "resource": "" }
q10843
str_dict_keys
train
def str_dict_keys(a_dict): """return a modified dict where all the keys that are anything but str get converted to str. E.g. >>> result = str_dict_keys({u'name': u'Peter', u'age': 99, 1: 2}) >>> # can't compare whole dicts in doctests >>> result['name'] u'Peter' >>> result['age'] 99 >>> result[1] 2 The reason for this is that in Python <= 2.6.4 doing ``MyClass(**{u'name': u'Peter'})`` would raise a TypeError Note that only unicode types are converted to str types. The reason for that is you might have a class that looks like this:: class Option(object): def __init__(self, foo=None, bar=None, **kwargs): ... And it's being used like this:: Option(**{u'foo':1, u'bar':2, 3:4}) Then you don't want to change that {3:4} part which becomes part of `**kwargs` inside the __init__ method. Using integers as parameter keys is a silly example but the point is that due to the python 2.6.4 bug only unicode keys are converted to str. """ new_dict = {} for key in a_dict: if six.PY2 and isinstance(key, six.text_type): new_dict[str(key)] = a_dict[key] else: new_dict[key] = a_dict[key] return new_dict
python
{ "resource": "" }
q10844
str_to_boolean
train
def str_to_boolean(input_str): """ a conversion function for boolean """ if not isinstance(input_str, six.string_types): raise ValueError(input_str) input_str = str_quote_stripper(input_str) return input_str.lower() in ("true", "t", "1", "y", "yes")
python
{ "resource": "" }
q10845
str_to_python_object
train
def str_to_python_object(input_str): """ a conversion that will import a module and class name """ if not input_str: return None if six.PY3 and isinstance(input_str, six.binary_type): input_str = to_str(input_str) if not isinstance(input_str, six.string_types): # gosh, we didn't get a string, we can't convert anything but strings # we're going to assume that what we got is actually what was wanted # as the output return input_str input_str = str_quote_stripper(input_str) if '.' not in input_str and input_str in known_mapping_str_to_type: return known_mapping_str_to_type[input_str] parts = [x.strip() for x in input_str.split('.') if x.strip()] try: try: # first try as a complete module package = __import__(input_str) except ImportError: # it must be a class from a module if len(parts) == 1: # since it has only one part, it must be a class from __main__ parts = ('__main__', input_str) package = __import__('.'.join(parts[:-1]), globals(), locals(), []) obj = package for name in parts[1:]: obj = getattr(obj, name) return obj except AttributeError as x: raise CannotConvertError("%s cannot be found" % input_str) except ImportError as x: raise CannotConvertError(str(x))
python
{ "resource": "" }
q10846
str_to_classes_in_namespaces
train
def str_to_classes_in_namespaces( template_for_namespace="cls%d", name_of_class_option='cls', instantiate_classes=False ): """take a comma delimited list of class names, convert each class name into an actual class as an option within a numbered namespace. This function creates a closure over a new function. That new function, in turn creates a class derived from RequiredConfig. The inner function, 'class_list_converter', populates the InnerClassList with a Namespace for each of the classes in the class list. In addition, it puts the each class itself into the subordinate Namespace. The requirement discovery mechanism of configman then reads the InnerClassList's requried config, pulling in the namespaces and associated classes within. For example, if we have a class list like this: "Alpha, Beta", then this converter will add the following Namespaces and options to the configuration: "cls0" - the subordinate Namespace for Alpha "cls0.cls" - the option containing the class Alpha itself "cls1" - the subordinate Namespace for Beta "cls1.cls" - the option containing the class Beta itself Optionally, the 'class_list_converter' inner function can embue the InnerClassList's subordinate namespaces with aggregates that will instantiate classes from the class list. This is a convenience to the programmer who would otherwise have to know ahead of time what the namespace names were so that the classes could be instantiated within the context of the correct namespace. Remember the user could completely change the list of classes at run time, so prediction could be difficult. "cls0" - the subordinate Namespace for Alpha "cls0.cls" - the option containing the class Alpha itself "cls0.cls_instance" - an instance of the class Alpha "cls1" - the subordinate Namespace for Beta "cls1.cls" - the option containing the class Beta itself "cls1.cls_instance" - an instance of the class Beta parameters: template_for_namespace - a template for the names of the namespaces that will contain the classes and their associated required config options. The namespaces will be numbered sequentially. By default, they will be "cls1", "cls2", etc. class_option_name - the name to be used for the class option within the nested namespace. By default, it will choose: "cls1.cls", "cls2.cls", etc. instantiate_classes - a boolean to determine if there should be an aggregator added to each namespace that instantiates each class. If True, then each Namespace will contain elements for the class, as well as an aggregator that will instantiate the class. """ # these are only used within this method. No need to pollute the module # scope with them and avoid potential circular imports from configman.namespace import Namespace from configman.required_config import RequiredConfig #-------------------------------------------------------------------------- def class_list_converter(class_list_str): """This function becomes the actual converter used by configman to take a string and convert it into the nested sequence of Namespaces, one for each class in the list. It does this by creating a proxy class stuffed with its own 'required_config' that's dynamically generated.""" if isinstance(class_list_str, six.string_types): class_list = [x.strip() for x in class_list_str.split(',')] if class_list == ['']: class_list = [] else: raise TypeError('must be derivative of %s' % six.string_types) #====================================================================== class InnerClassList(RequiredConfig): """This nested class is a proxy list for the classes. It collects all the config requirements for the listed classes and places them each into their own Namespace. """ # we're dynamically creating a class here. The following block of # code is actually adding class level attributes to this new class required_config = Namespace() # 1st requirement for configman subordinate_namespace_names = [] # to help the programmer know # what Namespaces we added namespace_template = template_for_namespace # save the template # for future reference class_option_name = name_of_class_option # save the class's option # name for the future # for each class in the class list for namespace_index, a_class in enumerate(class_list): # figure out the Namespace name namespace_name = template_for_namespace % namespace_index subordinate_namespace_names.append(namespace_name) # create the new Namespace required_config[namespace_name] = Namespace() # add the option for the class itself required_config[namespace_name].add_option( name_of_class_option, #doc=a_class.__doc__ # not helpful if too verbose default=a_class, from_string_converter=class_converter ) if instantiate_classes: # add an aggregator to instantiate the class required_config[namespace_name].add_aggregation( "%s_instance" % name_of_class_option, lambda c, lc, a: lc[name_of_class_option](lc) ) @classmethod def to_str(cls): """this method takes this inner class object and turns it back into the original string of classnames. This is used primarily as for the output of the 'help' option""" return ', '.join( py_obj_to_str(v[name_of_class_option].value) for v in cls.get_required_config().values() if isinstance(v, Namespace) ) return InnerClassList # result of class_list_converter return class_list_converter
python
{ "resource": "" }
q10847
str_to_list
train
def str_to_list( input_str, item_converter=lambda x: x, item_separator=',', list_to_collection_converter=None, ): """ a conversion function for list """ if not isinstance(input_str, six.string_types): raise ValueError(input_str) input_str = str_quote_stripper(input_str) result = [ item_converter(x.strip()) for x in input_str.split(item_separator) if x.strip() ] if list_to_collection_converter is not None: return list_to_collection_converter(result) return result
python
{ "resource": "" }
q10848
arbitrary_object_to_string
train
def arbitrary_object_to_string(a_thing): """take a python object of some sort, and convert it into a human readable string. this function is used extensively to convert things like "subject" into "subject_key, function -> function_key, etc.""" # is it None? if a_thing is None: return '' # is it already a string? if isinstance(a_thing, six.string_types): return a_thing if six.PY3 and isinstance(a_thing, six.binary_type): try: return a_thing.decode('utf-8') except UnicodeDecodeError: pass # does it have a to_str function? try: return a_thing.to_str() except (AttributeError, KeyError, TypeError): # AttributeError - no to_str function? # KeyError - DotDict has no to_str? # TypeError - problem converting # nope, no to_str function pass # is this a type proxy? try: return arbitrary_object_to_string(a_thing.a_type) except (AttributeError, KeyError, TypeError): # # nope, no a_type property pass # is it a built in? try: return known_mapping_type_to_str[a_thing] except (KeyError, TypeError): # nope, not a builtin pass # is it something from a loaded module? try: if a_thing.__module__ not in ('__builtin__', 'builtins', 'exceptions'): if a_thing.__module__ == "__main__": module_name = ( sys.modules['__main__'] .__file__[:-3] .replace('/', '.') .strip('.') ) else: module_name = a_thing.__module__ return "%s.%s" % (module_name, a_thing.__name__) except AttributeError: # nope, not one of these pass # maybe it has a __name__ attribute? try: return a_thing.__name__ except AttributeError: # nope, not one of these pass # punt and see what happens if we just cast it to string return str(a_thing)
python
{ "resource": "" }
q10849
ConfigObjWithIncludes._expand_files
train
def _expand_files(self, file_name, original_path, indent=""): """This recursive function accepts a file name, opens the file and then spools the contents of the file into a list, examining each line as it does so. If it detects a line beginning with "+include", it assumes the string immediately following is a file name. Recursing, the file new file is openned and its contents are spooled into the accumulating list.""" expanded_file_contents = [] with open(file_name) as f: for a_line in f: match = ConfigObjWithIncludes._include_re.match(a_line) if match: include_file = match.group(2) include_file = os.path.join( original_path, include_file ) new_lines = self._expand_files( include_file, os.path.dirname(include_file), indent + match.group(1) ) expanded_file_contents.extend(new_lines) else: expanded_file_contents.append(indent + a_line.rstrip()) return expanded_file_contents
python
{ "resource": "" }
q10850
ConfigObjWithIncludes._load
train
def _load(self, infile, configspec): """this overrides the original ConfigObj method of the same name. It runs through the input file collecting lines into a list. When completed, this method submits the list of lines to the super class' function of the same name. ConfigObj proceeds, completely unaware that it's input file has been preprocessed.""" if isinstance(infile, (six.binary_type, six.text_type)): infile = to_str(infile) original_path = os.path.dirname(infile) expanded_file_contents = self._expand_files(infile, original_path) super(ConfigObjWithIncludes, self)._load( expanded_file_contents, configspec ) else: super(ConfigObjWithIncludes, self)._load(infile, configspec)
python
{ "resource": "" }
q10851
ValueSource.get_values
train
def get_values(self, config_manager, ignore_mismatches, obj_hook=DotDict): """Return a nested dictionary representing the values in the ini file. In the case of this ValueSource implementation, both parameters are dummies.""" if self.delayed_parser_instantiation: try: app = config_manager._get_option('admin.application') source = "%s%s" % (app.value.app_name, file_name_extension) self.config_obj = configobj.ConfigObj(source) self.delayed_parser_instantiation = False except AttributeError: # we don't have enough information to get the ini file # yet. we'll ignore the error for now return obj_hook() # return empty dict of the obj_hook type if isinstance(self.config_obj, obj_hook): return self.config_obj return obj_hook(initializer=self.config_obj)
python
{ "resource": "" }
q10852
ValueSource._write_ini
train
def _write_ini(source_dict, namespace_name=None, level=0, indent_size=4, output_stream=sys.stdout): """this function prints the components of a configobj ini file. It is recursive for outputing the nested sections of the ini file.""" options = [ value for value in source_dict.values() if isinstance(value, Option) ] options.sort(key=lambda x: x.name) indent_spacer = " " * (level * indent_size) for an_option in options: print("%s# %s" % (indent_spacer, an_option.doc), file=output_stream) option_value = to_str(an_option) if an_option.reference_value_from: print( '%s# see "%s.%s" for the default or override it here' % ( indent_spacer, an_option.reference_value_from, an_option.name ), file=output_stream ) if an_option.likely_to_be_changed or an_option.has_changed: option_format = '%s%s=%s\n' else: option_format = '%s#%s=%s\n' if isinstance(option_value, six.string_types) and \ ',' in option_value: # quote lists unless they're already quoted if option_value[0] not in '\'"': option_value = '"%s"' % option_value print(option_format % (indent_spacer, an_option.name, option_value), file=output_stream) next_level = level + 1 namespaces = [ (key, value) for key, value in source_dict.items() if isinstance(value, Namespace) ] namespaces.sort(key=ValueSource._namespace_reference_value_from_sort) for key, namespace in namespaces: next_level_spacer = " " * next_level * indent_size print("%s%s%s%s\n" % (indent_spacer, "[" * next_level, key, "]" * next_level), file=output_stream) if namespace._doc: print("%s%s" % (next_level_spacer, namespace._doc), file=output_stream) if namespace._reference_value_from: print("%s#+include ./common_%s.ini\n" % (next_level_spacer, key), file=output_stream) if namespace_name: ValueSource._write_ini( source_dict=namespace, namespace_name="%s.%s" % (namespace_name, key), level=level+1, indent_size=indent_size, output_stream=output_stream ) else: ValueSource._write_ini( source_dict=namespace, namespace_name=key, level=level+1, indent_size=indent_size, output_stream=output_stream )
python
{ "resource": "" }
q10853
configuration
train
def configuration(*args, **kwargs): """this function just instantiates a ConfigurationManager and returns the configuration dictionary. It accepts all the same parameters as the constructor for the ConfigurationManager class.""" try: config_kwargs = {'mapping_class': kwargs.pop('mapping_class')} except KeyError: config_kwargs = {} cm = ConfigurationManager(*args, **kwargs) return cm.get_config(**config_kwargs)
python
{ "resource": "" }
q10854
iteritems_breadth_first
train
def iteritems_breadth_first(a_mapping, include_dicts=False): """a generator that returns all the keys in a set of nested Mapping instances. The keys take the form X.Y.Z""" subordinate_mappings = [] for key, value in six.iteritems(a_mapping): if isinstance(value, collections.Mapping): subordinate_mappings.append((key, value)) if include_dicts: yield key, value else: yield key, value for key, a_map in subordinate_mappings: for sub_key, value in iteritems_breadth_first(a_map, include_dicts): yield '%s.%s' % (key, sub_key), value
python
{ "resource": "" }
q10855
DotDict.keys_breadth_first
train
def keys_breadth_first(self, include_dicts=False): """a generator that returns all the keys in a set of nested DotDict instances. The keys take the form X.Y.Z""" namespaces = [] for key in self._key_order: if isinstance(getattr(self, key), DotDict): namespaces.append(key) if include_dicts: yield key else: yield key for a_namespace in namespaces: for key in self[a_namespace].keys_breadth_first(include_dicts): yield '%s.%s' % (a_namespace, key)
python
{ "resource": "" }
q10856
DotDict.assign
train
def assign(self, key, value): """an alternative method for assigning values to nested DotDict instances. It accepts keys in the form of X.Y.Z. If any nested DotDict instances don't yet exist, they will be created.""" key_split = key.split('.') cur_dict = self for k in key_split[:-1]: try: cur_dict = cur_dict[k] except KeyError: cur_dict[k] = self.__class__() # so that derived classes # remain true to type cur_dict = cur_dict[k] cur_dict[key_split[-1]] = value
python
{ "resource": "" }
q10857
DotDict.parent
train
def parent(self, key): """when given a key of the form X.Y.Z, this method will return the parent DotDict of the 'Z' key.""" parent_key = '.'.join(key.split('.')[:-1]) if not parent_key: return None else: return self[parent_key]
python
{ "resource": "" }
q10858
Option.set_default
train
def set_default(self, val, force=False): """this function allows a default to be set on an option that dosen't have one. It is used when a base class defines an Option for use in derived classes but cannot predict what value would useful to the derived classes. This gives the derived classes the opportunity to set a logical default appropriate for the derived class' context. For example: class A(RequiredConfig): required_config = Namespace() required_config.add_option( 'x', default=None ) class B(A): A.required_config.x.set_default(68) parameters: val - the value for the default force - normally this function only works on Options that have not had a default set (default is None). This boolean allows you to override an existing default. """ if self.default is None or force: self.default = val self.set_value(val) self.has_changed = True else: raise OptionError( "cannot override existing default without using the 'force' " "option" )
python
{ "resource": "" }
q10859
Option.copy
train
def copy(self): """return a copy""" o = Option( name=self.name, default=self.default, doc=self.doc, from_string_converter=self.from_string_converter, to_string_converter=self.to_string_converter, value=self.value, short_form=self.short_form, exclude_from_print_conf=self.exclude_from_print_conf, exclude_from_dump_conf=self.exclude_from_dump_conf, is_argument=self.is_argument, likely_to_be_changed=self.likely_to_be_changed, not_for_definition=self.not_for_definition, reference_value_from=self.reference_value_from, secret=self.secret, has_changed=self.has_changed, foreign_data=self.foreign_data, ) return o
python
{ "resource": "" }
q10860
ConfigurationManager.context
train
def context(self, mapping_class=DotDictWithAcquisition): """return a config as a context that calls close on every item when it goes out of scope""" config = None try: config = self.get_config(mapping_class=mapping_class) yield config finally: if config: self._walk_and_close(config)
python
{ "resource": "" }
q10861
ConfigurationManager.output_summary
train
def output_summary(self, output_stream=sys.stdout): """outputs a usage tip and the list of acceptable commands. This is useful as the output of the 'help' option. parameters: output_stream - an open file-like object suitable for use as the target of a print function """ if self.app_name or self.app_description: print('Application: ', end='', file=output_stream) if self.app_name: print(self.app_name, self.app_version, file=output_stream) if self.app_description: print(self.app_description, file=output_stream) if self.app_name or self.app_description: print('', file=output_stream) names_list = self.get_option_names() print( "usage:\n%s [OPTIONS]... " % self.app_invocation_name, end='', file=output_stream ) bracket_count = 0 # this section prints the non-switch command line arguments for key in names_list: an_option = self.option_definitions[key] if an_option.is_argument: if an_option.default is None: # there's no option, assume the user must set this print(an_option.name, end='', file=output_stream) elif ( inspect.isclass(an_option.value) or inspect.ismodule(an_option.value) ): # this is already set and it could have expanded, most # likely this is a case where a sub-command has been # loaded and we're looking to show the help for it. # display show it as a constant already provided rather # than as an option the user must provide print(an_option.default, end='', file=output_stream) else: # this is an argument that the user may alternatively # provide print("[ %s" % an_option.name, end='', file=output_stream) bracket_count += 1 print(']' * bracket_count, '\n', file=output_stream) names_list.sort() if names_list: print('OPTIONS:', file=output_stream) pad = ' ' * 4 for name in names_list: if name in self.options_banned_from_help: continue option = self._get_option(name) line = ' ' * 2 # always start with 2 spaces if option.short_form: line += '-%s, ' % option.short_form line += '--%s' % name line += '\n' doc = option.doc if option.doc is not None else '' if doc: line += '%s%s\n' % (pad, doc) try: value = option.value type_of_value = type(value) converter_function = to_string_converters[type_of_value] default = converter_function(value) except KeyError: default = option.value if default is not None: if ( (option.secret or 'password' in name.lower()) and not self.option_definitions.admin.expose_secrets.default ): default = '*********' if name not in ('help',): # don't bother with certain dead obvious ones line += '%s(default: %s)\n' % (pad, default) print(line, file=output_stream)
python
{ "resource": "" }
q10862
ConfigurationManager.write_conf
train
def write_conf(self, config_file_type, opener, skip_keys=None): """write a configuration file to a file-like object. parameters: config_file_type - a string containing a registered file type OR a for_XXX module from the value_source package. Passing in an string that is unregistered will result in a KeyError opener - a callable object or function that returns a file like object that works as a context in a with statement.""" blocked_keys = self.keys_blocked_from_output if skip_keys: blocked_keys.extend(skip_keys) if blocked_keys: option_defs = self.option_definitions.safe_copy() for a_blocked_key in blocked_keys: try: del option_defs[a_blocked_key] except (AttributeError, KeyError): # okay that key isn't here pass # remove empty namespaces all_keys = [k for k in option_defs.keys_breadth_first(include_dicts=True)] for key in all_keys: candidate = option_defs[key] if (isinstance(candidate, Namespace) and not len(candidate)): del option_defs[key] else: option_defs = self.option_definitions # find all of the secret options and overwrite their values with # '*' * 16 if not self.option_definitions.admin.expose_secrets.default: for a_key in option_defs.keys_breadth_first(): an_option = option_defs[a_key] if ( (not a_key.startswith('admin')) and isinstance(an_option, Option) and an_option.secret ): # force the option to be a string of * option_defs[a_key].value = '*' * 16 option_defs[a_key].from_string_converter = str dispatch_request_to_write(config_file_type, option_defs, opener)
python
{ "resource": "" }
q10863
ConfigurationManager.log_config
train
def log_config(self, logger): """write out the current configuration to a log-like object. parameters: logger - a object that implements a method called 'info' with the same semantics as the call to 'logger.info'""" logger.info("app_name: %s", self.app_name) logger.info("app_version: %s", self.app_version) logger.info("current configuration:") config = [(key, self.option_definitions[key].value) for key in self.option_definitions.keys_breadth_first() if key not in self.keys_blocked_from_output] config.sort() for key, val in config: if ( self.option_definitions[key].secret or 'password' in key.lower() ): logger.info('%s: *********', key) else: try: logger.info('%s: %s', key, to_string_converters[type(key)](val)) except KeyError: logger.info('%s: %s', key, val)
python
{ "resource": "" }
q10864
ConfigurationManager.get_option_names
train
def get_option_names(self): """returns a list of fully qualified option names. returns: a list of strings representing the Options in the source Namespace list. Each item will be fully qualified with dot delimited Namespace names. """ return [x for x in self.option_definitions.keys_breadth_first() if isinstance(self.option_definitions[x], Option)]
python
{ "resource": "" }
q10865
ConfigurationManager._create_reference_value_options
train
def _create_reference_value_options(self, keys, finished_keys): """this method steps through the option definitions looking for alt paths. On finding one, it creates the 'reference_value_from' links within the option definitions and populates it with copied options.""" # a set of known reference_value_from_links set_of_reference_value_option_names = set() for key in keys: if key in finished_keys: continue an_option = self.option_definitions[key] if an_option.reference_value_from: fully_qualified_reference_name = '.'.join(( an_option.reference_value_from, an_option.name )) if fully_qualified_reference_name in keys: continue # this referenced value has already been defined # no need to repeat it - skip on to the next key reference_option = an_option.copy() reference_option.reference_value_from = None reference_option.name = fully_qualified_reference_name # wait, aren't we setting a fully qualified dotted name into # the name field? Yes, 'add_option' below sees that # full pathname and does the right thing with it to ensure # that the reference_option is created within the # correct namespace set_of_reference_value_option_names.add( fully_qualified_reference_name ) self.option_definitions.add_option(reference_option) for a_reference_value_option_name in set_of_reference_value_option_names: for x in range(a_reference_value_option_name.count('.')): namespace_path = \ a_reference_value_option_name.rsplit('.', x + 1)[0] self.option_definitions[namespace_path].ref_value_namespace() return set_of_reference_value_option_names
python
{ "resource": "" }
q10866
ConfigurationManager._overlay_expand
train
def _overlay_expand(self): """This method overlays each of the value sources onto the default in each of the defined options. It does so using a breadth first iteration, overlaying and expanding each level of the tree in turn. As soon as no changes were made to any level, the loop breaks and the work is done. The actual action of the overlay is to take the value from the source and copy into the 'default' member of each Option object. "expansion" means converting an option value into its real type from string. The conversion is accomplished by simply calling the 'set_value' method of the Option object. If the resultant type has its own configuration options, bring those into the current namespace and then proceed to overlay/expand those. """ new_keys_have_been_discovered = True # loop control, False breaks loop finished_keys = set() all_reference_values = {} while new_keys_have_been_discovered: # loop until nothing more is done # names_of_all_exsting_options holds a list of all keys in the # option definitons in breadth first order using this form: # [ 'x', 'y', 'z', 'x.a', 'x.b', 'z.a', 'z.b', 'x.a.j', 'x.a.k', # 'x.b.h'] names_of_all_exsting_options = [ x for x in self.option_definitions.keys_breadth_first() if isinstance(self.option_definitions[x], Option) ] new_keys_have_been_discovered = False # setup to break loop # create alternate paths options set_of_reference_value_option_names = \ self._create_reference_value_options( names_of_all_exsting_options, finished_keys ) for a_ref_option_name in set_of_reference_value_option_names: if a_ref_option_name not in all_reference_values: all_reference_values[a_ref_option_name] = [] all_keys = list(set_of_reference_value_option_names) \ + names_of_all_exsting_options # previous versions of this method pulled the values from the # values sources deeper within the following nested loops. # that was not necessary and caused a lot of redundant work. # the 'values_from_all_sources' now holds all the the values # from each of the value sources. values_from_all_sources = [ a_value_source.get_values( self, # pass in the config_manager itself True, # ignore mismatches self.value_source_object_hook # build with this class ) for a_value_source in self.values_source_list ] # overlay process: # fetch all the default values from the value sources before # applying the from string conversions for key in all_keys: if key in finished_keys: continue #if not isinstance(an_option, Option): # continue # aggregations and other types are ignored # loop through all the value sources looking for values # that match this current key. if self.option_definitions[key].reference_value_from: reference_value_from = ( self.option_definitions[key].reference_value_from ) top_key = key.split('.')[-1] self.option_definitions[key].default = ( self.option_definitions[reference_value_from] [top_key].default ) all_reference_values[ '.'.join((reference_value_from, top_key)) ].append( key ) an_option = self.option_definitions[key] if key in all_reference_values: # make sure that this value gets propagated to keys # even if the keys have already been overlaid finished_keys -= set( all_reference_values[key] ) for val_src_dict in values_from_all_sources: try: # overlay the default with the new value from # the value source. This assignment may come # via acquisition, so the key given may not have # been an exact match for what was returned. an_option.has_changed = ( an_option.default != val_src_dict[key] ) an_option.default = val_src_dict[key] if key in all_reference_values: # make sure that this value gets propagated to keys # even if the keys have already been overlaid finished_keys -= set( all_reference_values[key] ) except KeyError as x: pass # okay, that source doesn't have this value # expansion process: # step through all the keys converting them to their proper # types and bringing in any new keys in the process for key in all_keys: if key in finished_keys: continue # mark this key as having been seen and processed finished_keys.add(key) an_option = self.option_definitions[key] #if not isinstance(an_option, Option): # continue # aggregations, namespaces are ignored # apply the from string conversion to make the real value an_option.set_value(an_option.default) # new values have been seen, don't let loop break new_keys_have_been_discovered = True try: try: # try to fetch new requirements from this value new_requirements = \ an_option.value.get_required_config() except (AttributeError, KeyError): new_requirements = getattr( an_option.value, 'required_config', None ) # make sure what we got as new_req is actually a # Mapping of some sort if not isinstance(new_requirements, collections.Mapping): # we didn't get a mapping, perhaps the option value # was a Mock object - in any case we can't try to # interpret 'new_req' as a configman requirement # collection. We must abandon processing this # option further continue if not isinstance(new_requirements, Namespace): new_requirements = Namespace( initializer=new_requirements ) # get the parent namespace current_namespace = self.option_definitions.parent(key) if current_namespace is None: # we're at the top level, use the base namespace current_namespace = self.option_definitions if current_namespace._reference_value_from: # don't expand things that are in reference value # namespaces, they will be populated by expanding the # targets continue # some new Options to be brought in may have already been # seen and in the finished_keys set. They must be reset # as unfinished so that a new default doesn't permanently # overwrite any of the values already placed by the # overlays. So we've got to remove those keys from the # finished keys list. # Before we can do that however, we need the fully # qualified names for the new keys. qualified_parent_name_list = key.rsplit('.', 1) if len(qualified_parent_name_list) > 1: qualified_parent_name = qualified_parent_name_list[0] else: qualified_parent_name = '' finished_keys = finished_keys.difference( '.'.join((qualified_parent_name, ref_option_name)) for ref_option_name in new_requirements ) # add the new Options to the namespace new_namespace = new_requirements.safe_copy( an_option.reference_value_from ) for new_key in new_namespace.keys_breadth_first(): if new_key not in current_namespace: current_namespace[new_key] = new_namespace[new_key] except AttributeError as x: # there are apparently no new Options to bring in from # this option's value pass return finished_keys
python
{ "resource": "" }
q10867
ConfigurationManager._check_for_mismatches
train
def _check_for_mismatches(self, known_keys): """check for bad options from value sources""" for a_value_source in self.values_source_list: try: if a_value_source.always_ignore_mismatches: continue except AttributeError: # ok, this values source doesn't have the concept # always igoring mismatches, we won't tolerate mismatches pass # we want to fetch the keys from the value sources so that we can # check for mismatches. Commandline value sources, are different, # we never want to allow unmatched keys from the command line. # By detecting if this value source is a command line source, we # can employ the command line's own mismatch detection. The # boolean 'allow_mismatches' controls application of the tollerance # for mismatches. if hasattr(a_value_source, 'command_line_value_source'): allow_mismatches = False else: allow_mismatches = True # make a set of all the keys from a value source in the form # of strings like this: 'x.y.z' value_source_mapping = a_value_source.get_values( self, allow_mismatches, self.value_source_object_hook ) value_source_keys_set = set([ k for k in DotDict(value_source_mapping).keys_breadth_first() ]) # make a set of the keys that didn't match any of the known # keys in the requirements unmatched_keys = value_source_keys_set.difference(known_keys) # some of the unmatched keys may actually be ok because the were # used during acquisition. # remove keys of the form 'y.z' if they match a known key of the # form 'x.y.z' for key in unmatched_keys.copy(): key_is_okay = six.moves.reduce( lambda x, y: x or y, (known_key.endswith(key) for known_key in known_keys) ) if key_is_okay: unmatched_keys.remove(key) # anything left in the unmatched_key set is a badly formed key. # issue a warning if unmatched_keys: if self.option_definitions.admin.strict.default: # raise hell... if len(unmatched_keys) > 1: raise NotAnOptionError( "%s are not valid Options" % unmatched_keys ) elif len(unmatched_keys) == 1: raise NotAnOptionError( "%s is not a valid Option" % unmatched_keys.pop() ) else: warnings.warn( 'Invalid options: %s' % ', '.join(sorted(unmatched_keys)) )
python
{ "resource": "" }
q10868
ConfigurationManager._generate_config
train
def _generate_config(self, mapping_class): """This routine generates a copy of the DotDict based config""" config = mapping_class() self._walk_config_copy_values( self.option_definitions, config, mapping_class ) return config
python
{ "resource": "" }
q10869
PGPooledTransaction.close
train
def close(self): """close all pooled connections""" print("PGPooledTransaction - shutting down connection pool") for name, conn in self.pool.iteritems(): conn.close() print("PGPooledTransaction - connection %s closed" % name)
python
{ "resource": "" }
q10870
find_action_name_by_value
train
def find_action_name_by_value(registry, target_action_instance): """the association of a name of an action class with a human readable string is exposed externally only at the time of argument definitions. This routine, when given a reference to argparse's internal action registry and an action, will find that action and return the name under which it was registered. """ target_type = type(target_action_instance) for key, value in six.iteritems(registry['action']): if value is target_type: if key is None: return 'store' return key return None
python
{ "resource": "" }
q10871
get_args_and_values
train
def get_args_and_values(parser, an_action): """this rountine attempts to reconstruct the kwargs that were used in the creation of an action object""" args = inspect.getargspec(an_action.__class__.__init__).args kwargs = dict( (an_attr, getattr(an_action, an_attr)) for an_attr in args if ( an_attr not in ('self', 'required') and getattr(an_action, an_attr) is not None ) ) action_name = find_action_name_by_value( parser._optionals._registries, an_action ) if 'required' in kwargs: del kwargs['required'] kwargs['action'] = action_name if 'option_strings' in kwargs: args = tuple(kwargs['option_strings']) del kwargs['option_strings'] else: args = () return args, kwargs
python
{ "resource": "" }
q10872
SubparserFromStringConverter.add_namespace
train
def add_namespace(self, name, a_namespace): """as we build up argparse, the actions that define a subparser are translated into configman options. Each of those options must be tagged with the value of the subparse to which they correspond.""" # save a local copy of the namespace self.namespaces[name] = a_namespace # iterate through the namespace branding each of the options with the # name of the subparser to which they belong for k in a_namespace.keys_breadth_first(): an_option = a_namespace[k] if not an_option.foreign_data: an_option.foreign_data = DotDict() an_option.foreign_data['argparse.owning_subparser_name'] = name
python
{ "resource": "" }
q10873
ConfigmanSubParsersAction.add_parser
train
def add_parser(self, *args, **kwargs): """each time a subparser action is used to create a new parser object we must save the original args & kwargs. In a later phase of configman, we'll need to reproduce the subparsers exactly without resorting to copying. We save the args & kwargs in the 'foreign_data' section of the configman option that corresponds with the subparser action.""" command_name = args[0] new_kwargs = kwargs.copy() new_kwargs['configman_subparsers_option'] = self._configman_option new_kwargs['subparser_name'] = command_name subparsers = self._configman_option.foreign_data.argparse.subparsers a_subparser = super(ConfigmanSubParsersAction, self).add_parser( *args, **new_kwargs ) subparsers[command_name] = DotDict({ "args": args, "kwargs": new_kwargs, "subparser": a_subparser }) return a_subparser
python
{ "resource": "" }
q10874
ArgumentParser.get_required_config
train
def get_required_config(self): """because of the exsistance of subparsers, the configman options that correspond with argparse arguments are not a constant. We need to produce a copy of the namespace rather than the actual embedded namespace.""" required_config = Namespace() # add current options to a copy of required config for k, v in iteritems_breadth_first(self.required_config): required_config[k] = v # get any option found in any subparsers try: subparser_namespaces = ( self.configman_subparsers_option.foreign_data .argparse.subprocessor_from_string_converter ) subparsers = ( self._argparse_subparsers._configman_option.foreign_data .argparse.subparsers ) # each subparser needs to have its configman options set up # in the subparser's configman option. This routine copies # the required_config of each subparser into the # SubparserFromStringConverter defined above. for subparser_name, subparser_data in six.iteritems(subparsers): subparser_namespaces.add_namespace( subparser_name, subparser_data.subparser.get_required_config() ) except AttributeError: # there is no subparser pass return required_config
python
{ "resource": "" }
q10875
ArgumentParser.add_subparsers
train
def add_subparsers(self, *args, **kwargs): """When adding a subparser, we need to ensure that our version of the SubparserAction object is returned. We also need to create the corresponding configman Option object for the subparser and pack it's foreign data section with the original args & kwargs.""" kwargs['parser_class'] = self.__class__ kwargs['action'] = ConfigmanSubParsersAction subparser_action = super(ArgumentParser, self).add_subparsers( *args, **kwargs ) self._argparse_subparsers = subparser_action if "dest" not in kwargs or kwargs['dest'] is None: kwargs['dest'] = 'subcommand' configman_name = kwargs['dest'] configman_default = None configman_doc = kwargs.get('help', '') subprocessor_from_string_converter = SubparserFromStringConverter() configman_to_string = str configman_is_argument = True configman_not_for_definition = True # it's finally time to create the configman Option object and add it # to the required_config. self.required_config.add_option( name=configman_name, default=configman_default, doc=configman_doc, from_string_converter=subprocessor_from_string_converter, to_string_converter=configman_to_string, is_argument=configman_is_argument, not_for_definition=configman_not_for_definition, # we're going to save the input parameters that created the # argparse Action. This enables us to perfectly reproduce the # the original Action object later during the configman overlay # process. foreign_data=DotDict({ 'argparse.flags.subcommand': subparser_action, 'argparse.args': args, 'argparse.kwargs': kwargs, 'argparse.subparsers': DotDict(), 'argparse.subprocessor_from_string_converter': subprocessor_from_string_converter }) ) self.configman_subparsers_option = self.required_config[configman_name] subparser_action.add_configman_option(self.configman_subparsers_option) return subparser_action
python
{ "resource": "" }
q10876
sequence_to_string
train
def sequence_to_string( a_list, open_bracket_char='[', close_bracket_char=']', delimiter=", " ): """a dedicated function that turns a list into a comma delimited string of items converted. This method will flatten nested lists.""" return "%s%s%s" % ( open_bracket_char, delimiter.join( local_to_str(x) for x in a_list ), close_bracket_char )
python
{ "resource": "" }
q10877
ValueSource.get_values
train
def get_values(self, config_manager, ignore_mismatches, obj_hook=DotDict): """This is the black sheep of the crowd of ValueSource implementations. It needs to know ahead of time all of the parameters that it will need, but we cannot give it. We may not know all the parameters because not all classes may have been expanded yet. The two parameters allow this ValueSource implementation to know what the parameters have already been defined. The 'ignore_mismatches' parameter tells the implementation if it can or cannot ignore extraneous commandline options. The last time this function is called, it will be required to test for illegal commandline options and respond accordingly. Unlike many of the Value sources, this method cannot be "memoized". The return result depends on an internal state within the parameter 'config_manager'. Any memoize decorator for this method would requrire capturing that internal state in the memoize cache key. """ short_options_str, long_options_list = self.getopt_create_opts( config_manager.option_definitions ) try: if ignore_mismatches: fn = ValueSource.getopt_with_ignore else: fn = getopt.gnu_getopt # here getopt looks through the command line arguments and # consumes the defined switches. The things that are not # consumed are then offered as the 'args' variable of the # parent configuration_manager getopt_options, config_manager.args = fn(self.argv_source, short_options_str, long_options_list) except getopt.GetoptError as x: raise NotAnOptionError(str(x)) command_line_values = obj_hook() for opt_name, opt_val in getopt_options: if opt_name.startswith('--'): name = opt_name[2:] else: name = self.find_name_with_short_form( opt_name[1:], config_manager.option_definitions, '' ) if not name: raise NotAnOptionError( '%s is not a valid short form option' % opt_name[1:] ) option_ = config_manager._get_option(name) if option_.from_string_converter == boolean_converter: command_line_values[name] = not option_.default else: command_line_values[name] = opt_val for name, value in zip( self._get_arguments( config_manager.option_definitions, command_line_values ), config_manager.args ): command_line_values[name] = value return command_line_values
python
{ "resource": "" }
q10878
ChemometricsPLS_LDA._cummulativefit
train
def _cummulativefit(self, x, y): """ Measure the cumulative Regression sum of Squares for each individual component. :param x: Data matrix to fit the PLS model. :type x: numpy.ndarray, shape [n_samples, n_features] :param y: Data matrix to fit the PLS model. :type y: numpy.ndarray, shape [n_samples, n_features] :return: dictionary object containing the total Regression Sum of Squares and the Sum of Squares per components, for both the X and Y data blocks. :rtype: dict """ if y.ndim == 1: y = y.reshape(-1, 1) if x.ndim == 1: x = x.reshape(-1, 1) xscaled = self.x_scaler.fit_transform(x) yscaled = self.y_scaler.fit_transform(y) ssx_comp = list() ssy_comp = list() # Obtain residual sum of squares for whole data set and per component SSX = np.sum(xscaled ** 2) SSY = np.sum(yscaled ** 2) ssx_comp = list() ssy_comp = list() for curr_comp in range(1, self.ncomps + 1): model = self._reduce_ncomps(curr_comp) ypred = self.y_scaler.transform(model.predict(x, y=None)) xpred = self.x_scaler.transform(model.predict(x=None, y=y)) rssy = np.sum((yscaled - ypred) ** 2) rssx = np.sum((xscaled - xpred) ** 2) ssx_comp.append(rssx) ssy_comp.append(rssy) cumulative_fit = {'SSX': SSX, 'SSY': SSY, 'SSXcomp': np.array(ssx_comp), 'SSYcomp': np.array(ssy_comp)} return cumulative_fit
python
{ "resource": "" }
q10879
_handle_zeros_in_scale
train
def _handle_zeros_in_scale(scale, copy=True): """ Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features. """ # if we are fitting on 1D arrays, scale might be a scalar if numpy.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, numpy.ndarray): if copy: # New array to avoid side-effects scale = scale.copy() scale[scale == 0.0] = 1.0 return scale
python
{ "resource": "" }
q10880
ChemometricsScaler.fit
train
def fit(self, X, y=None): """ Compute the mean and standard deviation from a dataset to use in future scaling operations. :param X: Data matrix to scale. :type X: numpy.ndarray, shape [n_samples, n_features] :param y: Passthrough for Scikit-learn ``Pipeline`` compatibility. :type y: None :return: Fitted object. :rtype: pyChemometrics.ChemometricsScaler """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y)
python
{ "resource": "" }
q10881
ChemometricsScaler.partial_fit
train
def partial_fit(self, X, y=None): """ Performs online computation of mean and standard deviation on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. The algorithm for incremental mean and std is given in Equation 1.5a,b in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms for computing the sample variance: Analysis and recommendations." The American Statistician 37.3 (1983): 242-247 :param X: Data matrix to scale. :type X: numpy.ndarray, shape [n_samples, n_features] :param y: Passthrough for Scikit-learn ``Pipeline`` compatibility. :type y: None :return: Fitted object. :rtype: pyChemometrics.ChemometricsScaler """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) # Even in the case of `with_mean=False`, we update the mean anyway # This is needed for the incremental computation of the var # See incr_mean_variance_axis and _incremental_mean_variance_axis if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.with_std: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_, self.var_ = mean_variance_axis(X, axis=0) self.n_samples_seen_ = X.shape[0] # Next passes else: self.mean_, self.var_, self.n_samples_seen_ = \ incr_mean_variance_axis(X, axis=0, last_mean=self.mean_, last_var=self.var_, last_n=self.n_samples_seen_) else: self.mean_ = None self.var_ = None else: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_ = .0 self.n_samples_seen_ = 0 if self.with_std: self.var_ = .0 else: self.var_ = None self.mean_, self.var_, self.n_samples_seen_ = \ _incremental_mean_and_var(X, self.mean_, self.var_, self.n_samples_seen_) if self.with_std: self.scale_ = _handle_zeros_in_scale(numpy.sqrt(self.var_)) ** self.scale_power else: self.scale_ = None return self
python
{ "resource": "" }
q10882
ChemometricsScaler.transform
train
def transform(self, X, y=None, copy=None): """ Perform standardization by centering and scaling using the parameters. :param X: Data matrix to scale. :type X: numpy.ndarray, shape [n_samples, n_features] :param y: Passthrough for scikit-learn ``Pipeline`` compatibility. :type y: None :param bool copy: Copy the X matrix. :return: Scaled version of the X data matrix. :rtype: numpy.ndarray, shape [n_samples, n_features] """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.scale_ is not None: inplace_column_scale(X, 1 / self.scale_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.scale_ return X
python
{ "resource": "" }
q10883
ChemometricsScaler.inverse_transform
train
def inverse_transform(self, X, copy=None): """ Scale back the data to the original representation. :param X: Scaled data matrix. :type X: numpy.ndarray, shape [n_samples, n_features] :param bool copy: Copy the X data matrix. :return: X data matrix with the scaling operation reverted. :rtype: numpy.ndarray, shape [n_samples, n_features] """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.scale_ is not None: inplace_column_scale(X, self.scale_) else: X = numpy.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.scale_ if self.with_mean: X += self.mean_ return X
python
{ "resource": "" }
q10884
_recurse_replace
train
def _recurse_replace(obj, key, new_key, sub, remove): """Recursive helper for `replace_by_key`""" if isinstance(obj, list): return [_recurse_replace(x, key, new_key, sub, remove) for x in obj] if isinstance(obj, dict): for k, v in list(obj.items()): if k == key and v in sub: obj[new_key] = sub[v] if remove: del obj[key] else: obj[k] = _recurse_replace(v, key, new_key, sub, remove) return obj
python
{ "resource": "" }
q10885
replace_by_key
train
def replace_by_key(pif, key, subs, new_key=None, remove=False): """Replace values that match a key Deeply traverses the pif object, looking for `key` and replacing values in accordance with `subs`. If `new_key` is set, the replaced values are assigned to that key. If `remove` is `True`, the old `key` pairs are removed. """ if not new_key: new_key = key remove = False orig = pif.as_dictionary() new = _recurse_replace(orig, to_camel_case(key), to_camel_case(new_key), subs, remove) return pypif.pif.loads(json.dumps(new))
python
{ "resource": "" }
q10886
new_keypair
train
def new_keypair(key, value, ambig, unambig): """ Check new keypair against existing unambiguous dict :param key: of pair :param value: of pair :param ambig: set of keys with ambig decoding :param unambig: set of keys with unambig decoding :return: """ if key in ambig: return if key in unambig and value != unambig[key]: ambig.add(key) del unambig[key] return unambig[key] = value return
python
{ "resource": "" }
q10887
add_child_ambig
train
def add_child_ambig(child_ambig, child_unambig, ambig, unambig): """ Add information about decodings of a child object :param child_ambig: ambiguous set from child :param child_unambig: unambiguous set from child :param ambig: set of keys storing ambig decodings :param unambig: dictionary storing unambiguous decodings :return: """ for k in child_ambig: ambig.add(k) if k in unambig: del unambig[k] for k, v in child_unambig.items(): new_keypair(k, v, ambig, unambig) return
python
{ "resource": "" }
q10888
get_client
train
def get_client(site=None): """Get a citrination client""" if 'CITRINATION_API_KEY' not in environ: raise ValueError("'CITRINATION_API_KEY' is not set as an environment variable") if not site: site = environ.get("CITRINATION_SITE", "https://citrination.com") return CitrinationClient(environ['CITRINATION_API_KEY'], site)
python
{ "resource": "" }
q10889
calculate_ideal_atomic_percent
train
def calculate_ideal_atomic_percent(pif): """ Calculates ideal atomic percents from a chemical formula string from a pif. Returns an appended pif with composition elements modified or added. :param pif: a ChemicalSystem pif :return: modified pif object """ if not isinstance(pif, ChemicalSystem): return pif if not pif.chemical_formula: return pif else: expanded_formula_no_special_char = _expand_formula_( pif.chemical_formula) element_array = _create_emprical_compositional_array_( expanded_formula_no_special_char) appended_e_array = _add_atomic_percents_(element_array) for e in appended_e_array: # Checks if a Composition element decribing that element already # exists. if _get_element_in_pif_composition_(pif, e["symbol"]): # If it exists, it removes the old Composition object, and # inserts a new one with ideal atomic percent added. in_pif = _get_element_in_pif_composition_(pif, e["symbol"]) comp = in_pif[0] pif.composition.pop(in_pif[1]) comp.idealAtomicPercent = e["atomic_percent"] pif.composition.append(comp) else: # If not, it creates a new Composition object with the element # and ideal atomic percent. comp = Composition() comp.element = e["symbol"] comp.idealAtomicPercent = e["atomic_percent"] pif.composition.append(comp) return pif
python
{ "resource": "" }
q10890
calculate_ideal_weight_percent
train
def calculate_ideal_weight_percent(pif): """ Calculates ideal atomic weight percents from a chemical formula string from a pif. Returns an appended pif with composition elements modified or added. :param pif: a ChemicalSystem pif :return: modified pif object """ if not isinstance(pif, ChemicalSystem): return pif if not pif.chemical_formula: return pif else: expanded_formula_no_special_char = _expand_formula_( pif.chemical_formula) element_array = _create_emprical_compositional_array_( expanded_formula_no_special_char) appended_e_array = _add_ideal_atomic_weights_(element_array) a_array_with_pcts = _add_ideal_weight_percent_(appended_e_array) for e in a_array_with_pcts: # Checks if a Composition element decribing that element already # exists. if _get_element_in_pif_composition_(pif, e["symbol"]): # If it exists, it removes the old Composition object, and # inserts a new one with ideal atomic weight percent added in_pif = _get_element_in_pif_composition_(pif, e["symbol"]) comp = in_pif[0] pif.composition.pop(in_pif[1]) comp.idealWeightPercent = e["weight_percent"] pif.composition.append(comp) else: # If not, it creates a new Composition object with the element # and ideal atomic weight percent. comp = Composition() comp.element = e["symbol"] comp.idealWeightPercent = e["weight_percent"] pif.composition.append(comp) return pif
python
{ "resource": "" }
q10891
_expand_hydrate_
train
def _expand_hydrate_(hydrate_pos, formula_string): """ Handles the expansion of hydrate portions of a chemical formula, and expands out the coefficent to all elements :param hydrate_pos: the index in the formula_string of the · symbol :param formula_string: the unexpanded formula string :return: a formula string without the · character with the hydrate portion expanded out """ hydrate = formula_string[hydrate_pos + 1:] hydrate_string = "" multiplier = float(re.search(r'^[\d\.]+', hydrate).group()) element_array = re.findall('[A-Z][^A-Z]*', hydrate) for e in element_array: occurance_array = re.findall('[0-9][^0-9]*', e) if len(occurance_array) == 0: occurance_array.append(1) for o in occurance_array: symbol = re.findall('[A-Z][a-z]*', e) total_num = float(o) * multiplier if total_num.is_integer(): total_num = int(total_num) total_str = str(total_num) if total_str == "1": total_str = "" new_string = symbol[0] + total_str hydrate_string += new_string return formula_string[:hydrate_pos] + hydrate_string
python
{ "resource": "" }
q10892
_create_compositional_array_
train
def _create_compositional_array_(expanded_chemical_formaula_string): """ Splits an expanded chemical formula string into an array of dictionaries containing information about each element :param expanded_chemical_formaula_string: a clean (not necessarily emperical, but without any special characters) chemical formula string, as returned by _expand_formula_() :return: an array of dictionaries """ element_array = re.findall( '[A-Z][^A-Z]*', expanded_chemical_formaula_string) split_element_array = [] for s in element_array: m = re.match(r"([a-zA-Z]+)([0-9\.]*)", s, re.I) if m: items = m.groups() if items[1] == "": items = (items[0], 1) this_e = {"symbol": items[0], "occurances": float(items[1])} split_element_array.append(this_e) return split_element_array
python
{ "resource": "" }
q10893
_add_ideal_atomic_weights_
train
def _add_ideal_atomic_weights_(elemental_array): """ Uses elements.json to find the molar mass of the element in question, and then multiplies that by the occurances of the element. Adds the "weight" property to each of the dictionaries in elemental_array :param elemental_array: an array of dictionaries containing information about the elements in the system :return: the appended elemental_array """ for a in elemental_array: this_atomic_weight = elements_data[a["symbol"]]["atomic_weight"] a["weight"] = a["occurances"] * this_atomic_weight return elemental_array
python
{ "resource": "" }
q10894
_add_ideal_weight_percent_
train
def _add_ideal_weight_percent_(elemental_array): """ Adds the "weight_percent" property to each of the dictionaries in elemental_array :param elemental_array: an array of dictionaries containing information about the elements in the system :return: the appended elemental_array """ t_mass = _calculate_total_mass_(elemental_array) for a in elemental_array: a["weight_percent"] = a["weight"] / t_mass * 100 return elemental_array
python
{ "resource": "" }
q10895
_get_element_in_pif_composition_
train
def _get_element_in_pif_composition_(pif, elemental_symbol): """ If the element in question if in the composition array in the pif, it returns that Composition object and the position in the composition array otherwise it returns False :param pif: ChemicalSystem Pif in question :param elemental_symbol: string of the atomic symbol of the element in question :return: either False if not found in the composition or the Compositional object along with its index in the composition array in the pif """ if pif.composition is None: pif.composition = [] for i, c in enumerate(pif.composition): if c.element == elemental_symbol or c.element.lower( ) == elements_data[elemental_symbol]["name"].lower(): return [c, i] i += 1 return False
python
{ "resource": "" }
q10896
parse_name_string
train
def parse_name_string(full_name): """ Parse a full name into a Name object :param full_name: e.g. "John Smith" or "Smith, John" :return: Name object """ name = Name() if "," in full_name: toks = full_name.split(",") name.family = toks[0] name.given = ",".join(toks[1:]).strip() else: toks = full_name.split() name.given = toks[0] name.family = " ".join(toks[1:]).strip() return name
python
{ "resource": "" }
q10897
query_to_mdf_records
train
def query_to_mdf_records(query=None, dataset_id=None, mdf_acl=None): """Evaluate a query and return a list of MDF records If a datasetID is specified by there is no query, a simple whole dataset query is formed for the user """ if not query and not dataset_id: raise ValueError("Either query or dataset_id must be specified") if query and dataset_id: raise ValueError("Both query and dataset_id were specified; pick one or the other.") if not query: query = PifSystemReturningQuery( query=DataQuery( dataset=DatasetQuery( id=Filter(equal=dataset_id) ) ), size = 10000 # Don't pull down all the results by default ) client = get_client() if not mdf_acl: raise ValueError('Access controls (mdf_acl) must be specified. Use ["public"] for public access') pif_result = client.pif_search(query) if len(pif_result.hits) == 0: return [] example_uid = pif_result.hits[0].system.uid dataset_query = DatasetReturningQuery( query=DataQuery( system=PifSystemQuery( uid=Filter(equal=example_uid) ) ), size = 1 # we only expect one dataset to hit ) dataset_result = client.dataset_search(dataset_query) records = [] for hit in pif_result.hits: records.append(pif_to_mdf_record(hit.system, dataset_result.hits[0], mdf_acl)) return records
python
{ "resource": "" }
q10898
pif_to_mdf_record
train
def pif_to_mdf_record(pif_obj, dataset_hit, mdf_acl): """Convert a PIF into partial MDF record""" res = {} res["mdf"] = _to_meta_data(pif_obj, dataset_hit, mdf_acl) res[res["mdf"]["source_name"]] = _to_user_defined(pif_obj) return dumps(res)
python
{ "resource": "" }
q10899
_to_user_defined
train
def _to_user_defined(pif_obj): """Read the systems in the PIF to populate the user-defined portion""" res = {} # make a read view to flatten the hierarchy rv = ReadView(pif_obj) # Iterate over the keys in the read view for k in rv.keys(): name, value = _extract_key_value(rv[k].raw) # add any objects that can be extracted if name and value is not None: res[name] = value # Grab interesting values not in the ReadView pif = pif_obj.as_dictionary() elements = {} if pif.get("composition"): for comp in pif["composition"]: if comp.get("actualAtomicPercent"): elements[comp["element"]] = float(comp["actualAtomicPercent"]["value"]) elif comp.get("actualWeightPercent"): elements[comp["element"]] = float(comp["actualWeightPercent"]["value"]) if elements: res["elemental_percent"] = elements elif pif.get("chemicalFormula"): symbol = "" num = "" # Chemical formulae are comprised of letters, numbers, and potentially characters we don't care about for char in pif["chemicalFormula"]: # Uppercase char indicates beginning of new symbol if char.isupper(): # If there is already a symbol in holding, process it if symbol: try: elements[symbol] = int(num) # If num is a float, raises ValueError except ValueError: elements[symbol] = float(num) if num else 1 symbol = "" num = "" symbol += char # Lowercase chars or digits are continuations of a symbol elif char.islower(): symbol += char elif char.isdigit(): num += char elif char == ".": num += char # All other chars are not useful if elements: res["elemental_proportion"] = elements return res
python
{ "resource": "" }