code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def build_knowledge_graph(self, json_ontology: dict) -> List: """ The idea if to be able to build a json knowledge graph from a json like ontology representation, eg: kg_object_ontology = { "uri": doc.doc_id, "country": doc.select_segments("$.Location"), "type": [ "Event", doc.extract(self.incomp_decoder, doc.select_segments("$.Incomp")[0]), doc.extract(self.int_decoder, doc.select_segments("$.Int")[0]) ] } Currently json ontology representation is supported, might add new ways later Args: json_ontology: a json ontology representation of a knowledge graph Returns: returns a list of nested documents created """ nested_docs = list() if json_ontology: for key in list(json_ontology): j_values = json_ontology[key] if not isinstance(j_values, list): j_values = [j_values] for j_value in j_values: if not isinstance(j_value, dict): if self.kg: if key not in ['doc_id', 'uri']: self.kg.add_value(key, value=j_value) else: """Now we have to create a nested document, assign it a doc_id and add the doc_id to parent document's knowledge graph""" child_doc_id = None if 'uri' in j_value: child_doc_id = j_value['uri'] elif 'doc_id' in j_value: child_doc_id = j_value['doc_id'] child_doc = Document(self.etk, cdr_document=dict(), mime_type='json', url='') nested_docs.extend(child_doc.build_knowledge_graph(j_value)) if not child_doc_id: child_doc_id = Utility.create_doc_id_from_json(child_doc.kg._kg) if self.kg: self.kg.add_value(key, value=child_doc_id) child_doc.cdr_document["doc_id"] = child_doc_id nested_docs.append(child_doc) return nested_docs
The idea if to be able to build a json knowledge graph from a json like ontology representation, eg: kg_object_ontology = { "uri": doc.doc_id, "country": doc.select_segments("$.Location"), "type": [ "Event", doc.extract(self.incomp_decoder, doc.select_segments("$.Incomp")[0]), doc.extract(self.int_decoder, doc.select_segments("$.Int")[0]) ] } Currently json ontology representation is supported, might add new ways later Args: json_ontology: a json ontology representation of a knowledge graph Returns: returns a list of nested documents created
def lessgreater(x, y): """ Return True if x < y or x > y and False otherwise. This function returns False whenever x and/or y is a NaN. """ x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return mpfr.mpfr_lessgreater_p(x, y)
Return True if x < y or x > y and False otherwise. This function returns False whenever x and/or y is a NaN.
def cudnnSetTensor(handle, srcDesc, srcData, value): """" Set all data points of a tensor to a given value : srcDest = alpha. Parameters ---------- handle : cudnnHandle Handle to a previously created cuDNN context. srcDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. srcData : void_p Pointer to data of the tensor described by srcDesc descriptor. value : float Value that all elements of the tensor will be set to. """ dataType, _, _, _, _, _, _, _, _ = cudnnGetTensor4dDescriptor(srcDesc) if dataType == cudnnDataType['CUDNN_DATA_DOUBLE']: alphaRef = ctypes.byref(ctypes.c_double(alpha)) else: alphaRef = ctypes.byref(ctypes.c_float(alpha)) status = _libcudnn.cudnnSetTensor(handle, srcDesc, srcData, alphaRef) cudnnCheckStatus(status)
Set all data points of a tensor to a given value : srcDest = alpha. Parameters ---------- handle : cudnnHandle Handle to a previously created cuDNN context. srcDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. srcData : void_p Pointer to data of the tensor described by srcDesc descriptor. value : float Value that all elements of the tensor will be set to.
def _index_to_ansi_values(self, index): ''' Converts an palette index to the corresponding ANSI color. Arguments: index - an int (from 0-15) Returns: index as str in a list for compatibility with values. ''' if self.__class__.__name__[0] == 'F': # Foreground if index < 8: index += ANSI_FG_LO_BASE else: index += (ANSI_FG_HI_BASE - 8) # 82 else: # Background if index < 8: index += ANSI_BG_LO_BASE else: index += (ANSI_BG_HI_BASE - 8) # 92 return [str(index)]
Converts an palette index to the corresponding ANSI color. Arguments: index - an int (from 0-15) Returns: index as str in a list for compatibility with values.
def set_max_waypoint_items(self, max_waypoint_items): """This determines how many waypoint items will be seen for a scaffolded wrong answer""" if self.get_max_waypoint_items_metadata().is_read_only(): raise NoAccess() if not self.my_osid_object_form._is_valid_cardinal(max_waypoint_items, self.get_max_waypoint_items_metadata()): raise InvalidArgument() self.my_osid_object_form._my_map['maxWaypointItems'] = max_waypoint_items
This determines how many waypoint items will be seen for a scaffolded wrong answer
def _run_qmc(self, boot): """ Runs quartet max-cut QMC on the quartets qdump file. """ ## build command self._tmp = os.path.join(self.dirs, ".tmptre") cmd = [ip.bins.qmc, "qrtt="+self.files.qdump, "otre="+self._tmp] ## run it proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) res = proc.communicate() if proc.returncode: raise IPyradWarningExit(res[1]) ## parse tmp file written by qmc into a tree and rename it with open(self._tmp, 'r') as intree: tre = ete3.Tree(intree.read().strip()) names = tre.get_leaves() for name in names: name.name = self.samples[int(name.name)] tmptre = tre.write(format=9) ## save the tree to file if boot: self.trees.boots = os.path.join(self.dirs, self.name+".boots") with open(self.trees.boots, 'a') as outboot: outboot.write(tmptre+"\n") else: self.trees.tree = os.path.join(self.dirs, self.name+".tree") with open(self.trees.tree, 'w') as outtree: outtree.write(tmptre) ## save the file self._save()
Runs quartet max-cut QMC on the quartets qdump file.
def _crop(self, bounds, xsize=None, ysize=None, resampling=Resampling.cubic): """Crop raster outside vector (convex hull). :param bounds: bounds on image :param xsize: output raster width, None for full resolution :param ysize: output raster height, None for full resolution :param resampling: reprojection resampling method, default `cubic` :return: GeoRaster2 """ out_raster = self[ int(bounds[0]): int(bounds[2]), int(bounds[1]): int(bounds[3]) ] if xsize and ysize: if not (xsize == out_raster.width and ysize == out_raster.height): out_raster = out_raster.resize(dest_width=xsize, dest_height=ysize, resampling=resampling) return out_raster
Crop raster outside vector (convex hull). :param bounds: bounds on image :param xsize: output raster width, None for full resolution :param ysize: output raster height, None for full resolution :param resampling: reprojection resampling method, default `cubic` :return: GeoRaster2
def iselect(self, tag, limit=0): """Iterate the specified tags.""" for el in CSSMatch(self.selectors, tag, self.namespaces, self.flags).select(limit): yield el
Iterate the specified tags.
def set_from_json(self, obj, json, models=None, setter=None): '''Sets the value of this property from a JSON value. Args: obj: (HasProps) : instance to set the property value on json: (JSON-value) : value to set to the attribute to models (dict or None, optional) : Mapping of model ids to models (default: None) This is needed in cases where the attributes to update also have values that have references. setter (ClientSession or ServerSession or None, optional) : This is used to prevent "boomerang" updates to Bokeh apps. (default: None) In the context of a Bokeh server application, incoming updates to properties will be annotated with the session that is doing the updating. This value is propagated through any subsequent change notifications that the update triggers. The session can compare the event setter to itself, and suppress any updates that originate from itself. Returns: None ''' self._internal_set(obj, json, setter=setter)
Sets the value of this property from a JSON value. Args: obj: (HasProps) : instance to set the property value on json: (JSON-value) : value to set to the attribute to models (dict or None, optional) : Mapping of model ids to models (default: None) This is needed in cases where the attributes to update also have values that have references. setter (ClientSession or ServerSession or None, optional) : This is used to prevent "boomerang" updates to Bokeh apps. (default: None) In the context of a Bokeh server application, incoming updates to properties will be annotated with the session that is doing the updating. This value is propagated through any subsequent change notifications that the update triggers. The session can compare the event setter to itself, and suppress any updates that originate from itself. Returns: None
def check_state(self, state): """ Check if the specific function is reached with certain arguments :param angr.SimState state: The state to check :return: True if the function is reached with certain arguments, False otherwise. :rtype: bool """ if state.addr == self.function.addr: arch = state.arch if self._check_arguments(arch, state): return True return False
Check if the specific function is reached with certain arguments :param angr.SimState state: The state to check :return: True if the function is reached with certain arguments, False otherwise. :rtype: bool
def unary_operator(op): """ Factory function for making unary operator methods for Filters. """ valid_ops = {'~'} if op not in valid_ops: raise ValueError("Invalid unary operator %s." % op) def unary_operator(self): # This can't be hoisted up a scope because the types returned by # unary_op_return_type aren't defined when the top-level function is # invoked. if isinstance(self, NumericalExpression): return NumExprFilter.create( "{op}({expr})".format(op=op, expr=self._expr), self.inputs, ) else: return NumExprFilter.create("{op}x_0".format(op=op), (self,)) unary_operator.__doc__ = "Unary Operator: '%s'" % op return unary_operator
Factory function for making unary operator methods for Filters.
def get_item(self, path): """ Get resource item :param path: string :return: Image """ try: f = tempfile.NamedTemporaryFile() self.blob_service.get_blob_to_path(self.container_name, path, f.name) f.seek(0) image = Image.open(f.name) f.close() return image except AzureMissingResourceHttpError: return False
Get resource item :param path: string :return: Image
def save_to_file(destination_filename, append=False): """ Save the output value to file. """ def decorator_fn(f): @wraps(f) def wrapper_fn(*args, **kwargs): res = f(*args, **kwargs) makedirs(os.path.dirname(destination_filename)) mode = "a" if append else "w" with open(destination_filename, mode) as text_file: text_file.write(res) return res return wrapper_fn return decorator_fn
Save the output value to file.
def _update_docs(self, file_to_update): """ Update the given documentation file or :code:`README.rst` so that it always gives branch related URL and informations. .. note:: This only apply to :code:`dev` and :code:`master` branch. :param file_to_update: The file to update. :type file_to_update: str """ if self.is_dev_version(): # The current version is the dev version. # We map what we have to replace. # Format: {match:replacement} regexes = { "/%s/" % "dev": r"\/%s\/" % "master", "=%s" % "dev": "=%s" % "master", } elif self.is_master_version(): # The current version is the master version. # We map what we have to replace. regexes = { "/%s/" % "master": r"\/%s\/" % "dev", "=%s" % "master": "=%s" % "dev", } else: # The current version is not the master nor the dev version. # We raise an exception as the branch we are currently is not meaned # for production. raise Exception("Please switch to `dev` or `master` branch.") # We get the content of the file to fix. to_update = File(file_to_update).read() for replacement, regex in regexes.items(): # We loop through reach element of the map. # We process the replacement. to_update = Regex(to_update, regex, replace_with=replacement).replace() # We finally overwrite the file to fix with the filtered. # content. File(file_to_update).write(to_update, overwrite=True)
Update the given documentation file or :code:`README.rst` so that it always gives branch related URL and informations. .. note:: This only apply to :code:`dev` and :code:`master` branch. :param file_to_update: The file to update. :type file_to_update: str
def groups_invite(self, room_id, user_id, **kwargs): """Adds a user to the private group.""" return self.__call_api_post('groups.invite', roomId=room_id, userId=user_id, kwargs=kwargs)
Adds a user to the private group.
def withdraw(self, account_id, **params): """https://developers.coinbase.com/api/v2#withdraw-funds""" for required in ['payment_method', 'amount', 'currency']: if required not in params: raise ValueError("Missing required parameter: %s" % required) response = self._post('v2', 'accounts', account_id, 'withdrawals', data=params) return self._make_api_object(response, Withdrawal)
https://developers.coinbase.com/api/v2#withdraw-funds
def validate_config(self, values, argv=None, strict=False): """Validate all config values through the command-line parser. This takes all supplied options (which could have been retrieved from a number of sources (such as CLI, env vars, etc...) and then validates them by running them through argparser (and raises SystemExit on failure). :returns dict: key/values for all config values (from all sources) :raises: SystemExit """ options = [] for option in self._options: kwargs = option.kwargs.copy() if option.name in values: if 'default' in kwargs: # Since we're overriding defaults, we need to # preserve the default value for the help text: help_text = kwargs.get('help') if help_text: if '(default: ' not in help_text: kwargs['help'] = '%s (default: %s)' % ( help_text, kwargs['default'] ) kwargs['default'] = values[option.name] kwargs['required'] = False # since we have a value temp = Option(*option.args, **kwargs) options.append(temp) parser = self.build_parser(options, formatter_class=argparse.HelpFormatter) if argv: parsed, extras = parser.parse_known_args(argv[1:]) if extras: valid, _ = self.parse_passthru_args(argv[1:]) parsed, extras = parser.parse_known_args(valid) if extras and strict: # still self.build_parser(options) parser.parse_args(argv[1:]) else: parsed = parser.parse_args([]) results = vars(parsed) raise_for_group = {} for option in self._options: if option.kwargs.get('required'): if option.dest not in results or results[option.dest] is None: if getattr(option, '_mutexgroup', None): raise_for_group.setdefault(option._mutexgroup, []) raise_for_group[option._mutexgroup].append( option._action) else: raise SystemExit("'%s' is required. See --help " "for more info." % option.name) else: if getattr(option, '_mutexgroup', None): raise_for_group.pop(option._mutexgroup, None) if raise_for_group: optstrings = [str(k.option_strings) for k in raise_for_group.values()[0]] msg = "One of %s required. " % " ,".join(optstrings) raise SystemExit(msg + "See --help for more info.") return results
Validate all config values through the command-line parser. This takes all supplied options (which could have been retrieved from a number of sources (such as CLI, env vars, etc...) and then validates them by running them through argparser (and raises SystemExit on failure). :returns dict: key/values for all config values (from all sources) :raises: SystemExit
def from_dict(cls, eas_from_nios): """Converts extensible attributes from the NIOS reply.""" if not eas_from_nios: return return cls({name: cls._process_value(ib_utils.try_value_to_bool, eas_from_nios[name]['value']) for name in eas_from_nios})
Converts extensible attributes from the NIOS reply.
def add_service(self, service_name): """ Add the given service to the manifest. """ if service_name not in self.manifest['services']: self.manifest['services'].append(service_name)
Add the given service to the manifest.
def get_pv_args(name, session=None, call=None): ''' Get PV arguments for a VM .. code-block:: bash salt-cloud -a get_pv_args xenvm01 ''' if call == 'function': raise SaltCloudException( 'This function must be called with -a or --action.' ) if session is None: log.debug('New session being created') session = _get_session() vm = _get_vm(name, session=session) pv_args = session.xenapi.VM.get_PV_args(vm) if pv_args: return pv_args return None
Get PV arguments for a VM .. code-block:: bash salt-cloud -a get_pv_args xenvm01
def _compile(self, dirpath, makename, compiler, debug, profile): """Compiles the makefile at the specified location with 'compiler'. :arg dirpath: the full path to the directory where the makefile lives. :arg compiler: one of ['ifort', 'gfortran']. :arg makename: the name of the make file to compile. """ from os import path options = "" if debug: options += " DEBUG=true" if profile: options += " GPROF=true" from os import system codestr = "cd {}; make -f '{}' F90={} FAM={}" + options code = system(codestr.format(dirpath, makename, compiler, compiler[0])) #It turns out that the compiler still returns a code of zero, even if the compile #failed because the actual compiler didn't fail; it did its job properly. We need to #check for the existence of errors in the 'compile.log' file. lcount = 0 errors = [] log = path.join(dirpath, "compile.log") with open(log) as f: for line in f: lcount += 1 if lcount > 21 and lcount < 32: errors.append(line) elif lcount > 21: break if len(errors) > 0: #There are 21 lines in the compile.log file when everything runs correctly #Overwrite code with a bad exit value since we have some other problems. code = 1 #We want to write the first couple of errors to the console and give them the #option to still execute if the compile only generated warnings. msg.warn("compile generated some errors or warnings:") msg.blank() msg.info(''.join(errors)) return code
Compiles the makefile at the specified location with 'compiler'. :arg dirpath: the full path to the directory where the makefile lives. :arg compiler: one of ['ifort', 'gfortran']. :arg makename: the name of the make file to compile.
def removeComments( self, comment = None ): """ Inserts comments into the editor based on the current selection.\ If no comment string is supplied, then the comment from the language \ will be used. :param comment | <str> || None :return <bool> | success """ if ( not comment ): lang = self.language() if ( lang ): comment = lang.lineComment() if ( not comment ): return False startline, startcol, endline, endcol = self.getSelection() len_comment = len(comment) line, col = self.getCursorPosition() for lineno in range(startline, endline+1 ): self.setSelection(lineno, 0, lineno, len_comment) if ( self.selectedText() == comment ): self.removeSelectedText() self.setSelection(startline, startcol, endline, endcol) self.setCursorPosition(line, col) return True
Inserts comments into the editor based on the current selection.\ If no comment string is supplied, then the comment from the language \ will be used. :param comment | <str> || None :return <bool> | success
def expect_lit(char, buf, pos): """Expect a literal character at the current buffer position.""" if pos >= len(buf) or buf[pos] != char: return None, len(buf) return char, pos+1
Expect a literal character at the current buffer position.
def deleteAll(self): """ Deletes whole Solr index. Use with care. """ for core in self.endpoints: self._send_solr_command(self.endpoints[core], "{\"delete\": { \"query\" : \"*:*\"}}")
Deletes whole Solr index. Use with care.
def results_path(self) -> str: """The path where the project results will be written""" def possible_paths(): yield self._results_path yield self.settings.fetch('path_results') yield environ.configs.fetch('results_directory') yield environ.paths.results(self.uuid) return next(p for p in possible_paths() if p is not None)
The path where the project results will be written
def extract_variables(href): """Return a list of variable names used in a URI template.""" patterns = [re.sub(r'\*|:\d+', '', pattern) for pattern in re.findall(r'{[\+#\./;\?&]?([^}]+)*}', href)] variables = [] for pattern in patterns: for part in pattern.split(","): if not part in variables: variables.append(part) return variables
Return a list of variable names used in a URI template.
def writelines(self, lines): """Write a list of strings to file.""" self.make_dir() with open(self.path, "w") as f: return f.writelines(lines)
Write a list of strings to file.
def add_user_to_group(iam_client, user, group, quiet = False): """ Add an IAM user to an IAM group :param iam_client: :param group: :param user: :param user_info: :param dry_run: :return: """ if not quiet: printInfo('Adding user to group %s...' % group) iam_client.add_user_to_group(GroupName = group, UserName = user)
Add an IAM user to an IAM group :param iam_client: :param group: :param user: :param user_info: :param dry_run: :return:
def attribute(self): """ Attribute that serves as a reference getter """ refs = re.findall( "\@([a-zA-Z:]+)=\\\?[\'\"]\$"+str(self.refsDecl.count("$"))+"\\\?[\'\"]", self.refsDecl ) return refs[-1]
Attribute that serves as a reference getter
def find_nested_meta_first_bf(d, prop_name): """Returns the $ value of the first meta element with the @property that matches @prop_name (or None). """ m_list = d.get('meta') if not m_list: return None if not isinstance(m_list, list): m_list = [m_list] for m_el in m_list: if m_el.get('@property') == prop_name or m_el.get('@rel') == prop_name: return m_el return None
Returns the $ value of the first meta element with the @property that matches @prop_name (or None).
def backup(): """ zips into db_backups_dir and uploads to bucket_name/s3_folder fab -f ./fabfile.py backup_dbs """ args = parser.parse_args() s3_backup_dir( args.datadir, args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name, args.zip_backups_dir, args.backup_aging_time, args.s3_folder, args.project)
zips into db_backups_dir and uploads to bucket_name/s3_folder fab -f ./fabfile.py backup_dbs
def add_user_to_group(self, user_name, group_name): """Adds a user to a group. :param user_name: name of user to be added :param group_name: name of group user is to be added to :returns: True if user added :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'POST', self.OCS_SERVICE_CLOUD, 'users/' + user_name + '/groups', data={'groupid': group_name} ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return True raise HTTPResponseError(res)
Adds a user to a group. :param user_name: name of user to be added :param group_name: name of group user is to be added to :returns: True if user added :raises: HTTPResponseError in case an HTTP error status was returned
def translate(offset, dtype=None): """Translate by an offset (x, y, z) . Parameters ---------- offset : array-like, shape (3,) Translation in x, y, z. dtype : dtype | None Output type (if None, don't cast). Returns ------- M : ndarray Transformation matrix describing the translation. """ assert len(offset) == 3 x, y, z = offset M = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.], [x, y, z, 1.0]], dtype) return M
Translate by an offset (x, y, z) . Parameters ---------- offset : array-like, shape (3,) Translation in x, y, z. dtype : dtype | None Output type (if None, don't cast). Returns ------- M : ndarray Transformation matrix describing the translation.
def _set_renamed_columns(self, table_diff, command, column): """ Set the renamed columns on the table diff. :rtype: orator.dbal.TableDiff """ new_column = Column(command.to, column.get_type(), column.to_dict()) table_diff.renamed_columns = {command.from_: new_column} return table_diff
Set the renamed columns on the table diff. :rtype: orator.dbal.TableDiff
def encrypt(self, pkt, seq_num=None, iv=None): """ Encrypt (and encapsulate) an IP(v6) packet with ESP or AH according to this SecurityAssociation. @param pkt: the packet to encrypt @param seq_num: if specified, use this sequence number instead of the generated one @param iv: if specified, use this initialization vector for encryption instead of a random one. @return: the encrypted/encapsulated packet """ if not isinstance(pkt, self.SUPPORTED_PROTOS): raise TypeError('cannot encrypt %s, supported protos are %s' % (pkt.__class__, self.SUPPORTED_PROTOS)) if self.proto is ESP: return self._encrypt_esp(pkt, seq_num=seq_num, iv=iv) else: return self._encrypt_ah(pkt, seq_num=seq_num)
Encrypt (and encapsulate) an IP(v6) packet with ESP or AH according to this SecurityAssociation. @param pkt: the packet to encrypt @param seq_num: if specified, use this sequence number instead of the generated one @param iv: if specified, use this initialization vector for encryption instead of a random one. @return: the encrypted/encapsulated packet
def migrate1(): "Migrate from version 0 to 1" initial = [ "create table Chill (version integer);", "insert into Chill (version) values (1);", "alter table SelectSQL rename to Query;", "alter table Node add column template integer references Template (id) on delete set null;", "alter table Node add column query integer references Query (id) on delete set null;" ] cleanup = [ "drop table SelectSQL_Node;", "drop table Template_Node;" ] c = db.cursor() try: c.execute("select version from Chill limit 1;") except sqlite3.DatabaseError as err: pass result = c.fetchone() if result: version = result[0] if version == 1: current_app.logger.warn("Migration from version 0 to 1 is not needed.") else: current_app.logger.warn("Migration from version 0 to {0} is not supported.".format(version)) return try: for query in initial: c.execute(query) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) try: c.execute(fetch_query_string('select_all_nodes.sql')) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) result = c.fetchall() if result: (result, col_names) = rowify(result, c.description) for kw in result: try: c.execute(""" update Node set template = ( select t.id from Template as t join Template_Node as tn on ( tn.template_id = t.id ) join Node as n on ( n.id = tn.node_id ) where n.id is :node_id group by t.id) where id is :node_id; """, {'node_id':kw['id']}) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) try: c.execute(""" update Node set query = ( select s.id from Query as s join SelectSQL_Node as sn on ( sn.selectsql_id = s.id ) join Node as n on ( n.id = sn.node_id ) where n.id is :node_id group by s.id) where id is :node_id; """, {'node_id':kw['id']}) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) try: for query in cleanup: c.execute(query) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) db.commit()
Migrate from version 0 to 1
def show(*args, **kwargs): r"""Show created figures, alias to ``plt.show()``. By default, showing plots does not block the prompt. Calling this function will block execution. """ _, plt, _ = _import_plt() plt.show(*args, **kwargs)
r"""Show created figures, alias to ``plt.show()``. By default, showing plots does not block the prompt. Calling this function will block execution.
def changed_lines(self): """ A list of dicts in the format: { 'file_name': str, 'content': str, 'line_number': int, 'position': int } """ lines = [] file_name = '' line_number = 0 patch_position = -1 found_first_information_line = False for i, content in enumerate(self.body.splitlines()): range_information_match = RANGE_INFORMATION_LINE.search(content) file_name_line_match = FILE_NAME_LINE.search(content) if file_name_line_match: file_name = file_name_line_match.group('file_name') found_first_information_line = False elif range_information_match: line_number = int(range_information_match.group('line_number')) if not found_first_information_line: # This is the first information line. Set patch position to 1 and start counting patch_position = 0 found_first_information_line = True elif MODIFIED_LINE.search(content): line = { 'file_name': file_name, 'content': content, 'line_number': line_number, 'position': patch_position } lines.append(line) line_number += 1 elif NOT_REMOVED_OR_NEWLINE_WARNING.search(content) or content == '': line_number += 1 patch_position += 1 return lines
A list of dicts in the format: { 'file_name': str, 'content': str, 'line_number': int, 'position': int }
def plot_gaussian_cdf(mean=0., variance=1., ax=None, xlim=None, ylim=(0., 1.), xlabel=None, ylabel=None, label=None): """ Plots a normal distribution CDF with the given mean and variance. x-axis contains the mean, the y-axis shows the cumulative probability. Parameters ---------- mean : scalar, default 0. mean for the normal distribution. variance : scalar, default 0. variance for the normal distribution. ax : matplotlib axes object, optional If provided, the axes to draw on, otherwise plt.gca() is used. xlim, ylim: (float,float), optional specify the limits for the x or y axis as tuple (low,high). If not specified, limits will be automatically chosen to be 'nice' xlabel : str,optional label for the x-axis ylabel : str, optional label for the y-axis label : str, optional label for the legend Returns ------- axis of plot """ import matplotlib.pyplot as plt if ax is None: ax = plt.gca() sigma = math.sqrt(variance) n = norm(mean, sigma) if xlim is None: xlim = [n.ppf(0.001), n.ppf(0.999)] xs = np.arange(xlim[0], xlim[1], (xlim[1] - xlim[0]) / 1000.) cdf = n.cdf(xs) ax.plot(xs, cdf, label=label) ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) return ax
Plots a normal distribution CDF with the given mean and variance. x-axis contains the mean, the y-axis shows the cumulative probability. Parameters ---------- mean : scalar, default 0. mean for the normal distribution. variance : scalar, default 0. variance for the normal distribution. ax : matplotlib axes object, optional If provided, the axes to draw on, otherwise plt.gca() is used. xlim, ylim: (float,float), optional specify the limits for the x or y axis as tuple (low,high). If not specified, limits will be automatically chosen to be 'nice' xlabel : str,optional label for the x-axis ylabel : str, optional label for the y-axis label : str, optional label for the legend Returns ------- axis of plot
def columnCount(self, parent): """Return the number of columns for the children of the given parent. :param parent: the parent index :type parent: :class:`QtCore.QModelIndex`: :returns: the column count :rtype: int :raises: None """ if parent.isValid(): return parent.internalPointer().column_count() else: return self._root.column_count()
Return the number of columns for the children of the given parent. :param parent: the parent index :type parent: :class:`QtCore.QModelIndex`: :returns: the column count :rtype: int :raises: None
def reference(self, install, upgrade): """Reference list with packages installed and upgraded """ self.template(78) print("| Total {0} {1} installed and {2} {3} upgraded".format( len(install), self.pkg(len(install)), len(upgrade), self.pkg(len(upgrade)))) self.template(78) for installed, upgraded in itertools.izip_longest(install, upgrade): if upgraded: print("| Package {0} upgraded successfully".format(upgraded)) if installed: print("| Package {0} installed successfully".format(installed)) self.template(78) print("")
Reference list with packages installed and upgraded
def getcomponents(self, product, force_refresh=False): """ Return a list of component names for the passed product. This can be implemented with Product.get, but behind the scenes it uses Bug.legal_values. Reason being that on bugzilla instances with tons of components, like bugzilla.redhat.com Product=Fedora for example, there's a 10x speed difference even with properly limited Product.get calls. On first invocation the value is cached, and subsequent calls will return the cached data. :param force_refresh: Force refreshing the cache, and return the new data """ proddict = self._lookup_product_in_cache(product) product_id = proddict.get("id", None) if (force_refresh or product_id is None or product_id not in self._cache.component_names): self.refresh_products(names=[product], include_fields=["name", "id"]) proddict = self._lookup_product_in_cache(product) if "id" not in proddict: raise BugzillaError("Product '%s' not found" % product) product_id = proddict["id"] opts = {'product_id': product_id, 'field': 'component'} names = self._proxy.Bug.legal_values(opts)["values"] self._cache.component_names[product_id] = names return self._cache.component_names[product_id]
Return a list of component names for the passed product. This can be implemented with Product.get, but behind the scenes it uses Bug.legal_values. Reason being that on bugzilla instances with tons of components, like bugzilla.redhat.com Product=Fedora for example, there's a 10x speed difference even with properly limited Product.get calls. On first invocation the value is cached, and subsequent calls will return the cached data. :param force_refresh: Force refreshing the cache, and return the new data
def winning_abbr(self): """ Returns a ``string`` of the winning team's abbreviation, such as 'HOU' for the Houston Astros. """ if self.winner == HOME: return utils._parse_abbreviation(self._home_name) return utils._parse_abbreviation(self._away_name)
Returns a ``string`` of the winning team's abbreviation, such as 'HOU' for the Houston Astros.
def delete(ids, yes): """ Delete datasets. """ failures = False for id in ids: data_source = get_data_object(id, use_data_config=True) if not data_source: failures = True continue data_name = normalize_data_name(data_source.name) suffix = data_name.split('/')[-1] if not suffix.isdigit(): failures = True floyd_logger.error('%s is not a dataset, skipped.', id) if suffix == 'output': floyd_logger.error('To delete job output, please delete the job itself.') continue if not yes and not click.confirm("Delete Data: {}?".format(data_name), abort=False, default=False): floyd_logger.info("Data %s: Skipped", data_name) continue if not DataClient().delete(data_source.id): failures = True else: floyd_logger.info("Data %s: Deleted", data_name) if failures: sys.exit(1)
Delete datasets.
async def add_items(self, *items): '''append items to the playlist |coro| Parameters ---------- items : array_like list of items to add(or their ids) See Also -------- remove_items : ''' items = [item.id for item in await self.process(items)] if not items: return await self.connector.post('Playlists/{Id}/Items'.format(Id=self.id), data={'Ids': ','.join(items)}, remote=False )
append items to the playlist |coro| Parameters ---------- items : array_like list of items to add(or their ids) See Also -------- remove_items :
def get_custom_view_details(name, auth, url): """ function requires no input and returns a list of dictionaries of custom views from an HPE IMC. Optional name argument will return only the specified view. :param name: str containing the name of the desired custom view :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param name: (optional) str of name of specific custom view :return: list of dictionaties containing attributes of the custom views :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.groups import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> view_details = get_custom_view_details('My Network View', auth.creds, auth.url) >>> assert type(view_details) is list >>> assert 'label' in view_details[0] """ view_id = get_custom_views(auth, url, name=name) if view_id is None: return view_id view_id = get_custom_views(auth, url, name=name)[0]['symbolId'] get_custom_view_details_url = '/imcrs/plat/res/view/custom/' + str(view_id) f_url = url + get_custom_view_details_url response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: current_devices = (json.loads(response.text)) if 'device' in current_devices: if isinstance(current_devices['device'], dict): return [current_devices['device']] else: return current_devices['device'] else: return [] except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' get_custom_views: An Error has occured'
function requires no input and returns a list of dictionaries of custom views from an HPE IMC. Optional name argument will return only the specified view. :param name: str containing the name of the desired custom view :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param name: (optional) str of name of specific custom view :return: list of dictionaties containing attributes of the custom views :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.groups import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> view_details = get_custom_view_details('My Network View', auth.creds, auth.url) >>> assert type(view_details) is list >>> assert 'label' in view_details[0]
def dumps(obj, imports=None, binary=True, sequence_as_stream=False, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, use_decimal=True, namedtuple_as_object=True, tuple_as_array=True, bigint_as_string=False, sort_keys=False, item_sort_key=None, for_json=None, ignore_nan=False, int_as_string_bitcount=None, iterable_as_array=False, **kw): """Serialize ``obj`` as Python ``string`` or ``bytes`` object, using the conversion table used by ``dump`` (above). Args: obj (Any): A python object to serialize according to the above table. Any Python object which is neither an instance of nor inherits from one of the types in the above table will raise TypeError. imports (Optional[Sequence[SymbolTable]]): A sequence of shared symbol tables to be used by by the writer. binary (Optional[True|False]): When True, outputs binary Ion. When false, outputs text Ion. sequence_as_stream (Optional[True|False]): When True, if ``obj`` is a sequence, it will be treated as a stream of top-level Ion values (i.e. the resulting Ion data will begin with ``obj``'s first element). Default: False. skipkeys: NOT IMPLEMENTED ensure_ascii: NOT IMPLEMENTED check_circular: NOT IMPLEMENTED allow_nan: NOT IMPLEMENTED cls: NOT IMPLEMENTED indent (Str): If binary is False and indent is a string, then members of containers will be pretty-printed with a newline followed by that string repeated for each level of nesting. None (the default) selects the most compact representation without any newlines. Example: to indent with four spaces per level of nesting, use ``' '``. separators: NOT IMPLEMENTED encoding: NOT IMPLEMENTED default: NOT IMPLEMENTED use_decimal: NOT IMPLEMENTED namedtuple_as_object: NOT IMPLEMENTED tuple_as_array: NOT IMPLEMENTED bigint_as_string: NOT IMPLEMENTED sort_keys: NOT IMPLEMENTED item_sort_key: NOT IMPLEMENTED for_json: NOT IMPLEMENTED ignore_nan: NOT IMPLEMENTED int_as_string_bitcount: NOT IMPLEMENTED iterable_as_array: NOT IMPLEMENTED **kw: NOT IMPLEMENTED Returns: Union[str|bytes]: The string or binary representation of the data. if ``binary=True``, this will be a ``bytes`` object, otherwise this will be a ``str`` object (or ``unicode`` in the case of Python 2.x) """ ion_buffer = six.BytesIO() dump(obj, ion_buffer, sequence_as_stream=sequence_as_stream, binary=binary, skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, cls=cls, indent=indent, separators=separators, encoding=encoding, default=default, use_decimal=use_decimal, namedtuple_as_object=namedtuple_as_object, tuple_as_array=tuple_as_array, bigint_as_string=bigint_as_string, sort_keys=sort_keys, item_sort_key=item_sort_key, for_json=for_json, ignore_nan=ignore_nan, int_as_string_bitcount=int_as_string_bitcount, iterable_as_array=iterable_as_array) ret_val = ion_buffer.getvalue() ion_buffer.close() if not binary: ret_val = ret_val.decode('utf-8') return ret_val
Serialize ``obj`` as Python ``string`` or ``bytes`` object, using the conversion table used by ``dump`` (above). Args: obj (Any): A python object to serialize according to the above table. Any Python object which is neither an instance of nor inherits from one of the types in the above table will raise TypeError. imports (Optional[Sequence[SymbolTable]]): A sequence of shared symbol tables to be used by by the writer. binary (Optional[True|False]): When True, outputs binary Ion. When false, outputs text Ion. sequence_as_stream (Optional[True|False]): When True, if ``obj`` is a sequence, it will be treated as a stream of top-level Ion values (i.e. the resulting Ion data will begin with ``obj``'s first element). Default: False. skipkeys: NOT IMPLEMENTED ensure_ascii: NOT IMPLEMENTED check_circular: NOT IMPLEMENTED allow_nan: NOT IMPLEMENTED cls: NOT IMPLEMENTED indent (Str): If binary is False and indent is a string, then members of containers will be pretty-printed with a newline followed by that string repeated for each level of nesting. None (the default) selects the most compact representation without any newlines. Example: to indent with four spaces per level of nesting, use ``' '``. separators: NOT IMPLEMENTED encoding: NOT IMPLEMENTED default: NOT IMPLEMENTED use_decimal: NOT IMPLEMENTED namedtuple_as_object: NOT IMPLEMENTED tuple_as_array: NOT IMPLEMENTED bigint_as_string: NOT IMPLEMENTED sort_keys: NOT IMPLEMENTED item_sort_key: NOT IMPLEMENTED for_json: NOT IMPLEMENTED ignore_nan: NOT IMPLEMENTED int_as_string_bitcount: NOT IMPLEMENTED iterable_as_array: NOT IMPLEMENTED **kw: NOT IMPLEMENTED Returns: Union[str|bytes]: The string or binary representation of the data. if ``binary=True``, this will be a ``bytes`` object, otherwise this will be a ``str`` object (or ``unicode`` in the case of Python 2.x)
def json_obj_to_cursor(self, json): """(Deprecated) Converts a JSON object to a mongo db cursor :param str json: A json string :returns: dictionary with ObjectId type :rtype: dict """ cursor = json_util.loads(json) if "id" in json: cursor["_id"] = ObjectId(cursor["id"]) del cursor["id"] return cursor
(Deprecated) Converts a JSON object to a mongo db cursor :param str json: A json string :returns: dictionary with ObjectId type :rtype: dict
def OnPaneClose(self, event): """Pane close toggle event handler (via close button)""" toggle_label = event.GetPane().caption # Get menu item to toggle menubar = self.main_window.menubar toggle_id = menubar.FindMenuItem(_("View"), toggle_label) toggle_item = menubar.FindItemById(toggle_id) # Adjust toggle to pane visibility toggle_item.Check(False) menubar.UpdateMenus() self.main_window._mgr.Update() event.Skip()
Pane close toggle event handler (via close button)
def sort(self, **kwargs): """ Adds a sort stage to aggregation query :param kwargs: Specifies the field(s) to sort by and the respective sort order. :return: The current object """ query = {} for field in kwargs: query[field] = kwargs[field] self._q.append({'$sort': query}) return self
Adds a sort stage to aggregation query :param kwargs: Specifies the field(s) to sort by and the respective sort order. :return: The current object
def decode_format_timestamp(timestamp): """Convert unix timestamp (millis) into date & time we use in logs output. :param timestamp: unix timestamp in millis :return: date, time in UTC """ dt = maya.MayaDT(timestamp / 1000).datetime(naive=True) return dt.strftime('%Y-%m-%d'), dt.strftime('%H:%M:%S')
Convert unix timestamp (millis) into date & time we use in logs output. :param timestamp: unix timestamp in millis :return: date, time in UTC
def _jobs_to_do(self, restrictions): """ :return: the relation containing the keys to be computed (derived from self.key_source) """ if self.restriction: raise DataJointError('Cannot call populate on a restricted table. ' 'Instead, pass conditions to populate() as arguments.') todo = self.key_source if not isinstance(todo, QueryExpression): raise DataJointError('Invalid key_source value') # check if target lacks any attributes from the primary key of key_source try: raise DataJointError( 'The populate target lacks attribute %s from the primary key of key_source' % next( name for name in todo.heading.primary_key if name not in self.target.heading)) except StopIteration: pass return (todo & AndList(restrictions)).proj()
:return: the relation containing the keys to be computed (derived from self.key_source)
def is_artifact_optional(chain, task_id, path): """Tells whether an artifact is flagged as optional or not. Args: chain (ChainOfTrust): the chain of trust object task_id (str): the id of the aforementioned task Returns: bool: True if artifact is optional """ upstream_artifacts = chain.task['payload'].get('upstreamArtifacts', []) optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts) return path in optional_artifacts_per_task_id.get(task_id, [])
Tells whether an artifact is flagged as optional or not. Args: chain (ChainOfTrust): the chain of trust object task_id (str): the id of the aforementioned task Returns: bool: True if artifact is optional
def detect_iter(self, det_iter, show_timer=False): """ detect all images in iterator Parameters: ---------- det_iter : DetIter iterator for all testing images show_timer : Boolean whether to print out detection exec time Returns: ---------- list of detection results """ num_images = det_iter._size if not isinstance(det_iter, mx.io.PrefetchingIter): det_iter = mx.io.PrefetchingIter(det_iter) start = timer() detections = self.mod.predict(det_iter).asnumpy() time_elapsed = timer() - start if show_timer: logging.info("Detection time for {} images: {:.4f} sec".format( num_images, time_elapsed)) result = Detector.filter_positive_detections(detections) return result
detect all images in iterator Parameters: ---------- det_iter : DetIter iterator for all testing images show_timer : Boolean whether to print out detection exec time Returns: ---------- list of detection results
def validate_output_format(self, default_format): """ Prepare output format for later use. """ output_format = self.options.output_format # Use default format if none is given if output_format is None: output_format = default_format # Check if it's a custom output format from configuration # (they take precedence over field names, so name them wisely) output_format = config.formats.get(output_format, output_format) # Expand plain field list to usable form if re.match(r"^[,._0-9a-zA-Z]+$", output_format): self.plain_output_format = True output_format = "%%(%s)s" % ")s\t%(".join(formatting.validate_field_list(output_format, allow_fmt_specs=True)) # Replace some escape sequences output_format = (output_format .replace(r"\\", "\\") .replace(r"\n", "\n") .replace(r"\t", "\t") .replace(r"\$", "\0") # the next 3 allow using $() instead of %() .replace("$(", "%(") .replace("\0", "$") .replace(r"\ ", " ") # to prevent stripping in config file #.replace(r"\", "\") ) self.options.output_format = formatting.preparse(output_format)
Prepare output format for later use.
def token_count_pandas(self): """ See token counts as pandas dataframe""" freq_df = pd.DataFrame.from_dict(self.indexer.word_counts, orient='index') freq_df.columns = ['count'] return freq_df.sort_values('count', ascending=False)
See token counts as pandas dataframe
def save(self, commit=True, **kwargs): '''Register the current user as admin on creation''' org = super(OrganizationForm, self).save(commit=False, **kwargs) if not org.id: user = current_user._get_current_object() member = Member(user=user, role='admin') org.members.append(member) if commit: org.save() return org
Register the current user as admin on creation
def showlist(self, window_name, object_name): """ Show combo box list / menu @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer """ object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) object_handle.Press() return 1
Show combo box list / menu @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer
def find_one(self, **kwargs): """Returns future. Executes collection's find_one method based on keyword args maps result ( dict to instance ) and return future Example:: manager = EntityManager(Product) product_saved = yield manager.find_one(_id=object_id) """ future = TracebackFuture() def handle_response(result, error): if error: future.set_exception(error) else: instance = self.__entity() instance.map_dict(result) future.set_result(instance) self.__collection.find_one(kwargs, callback=handle_response) return future
Returns future. Executes collection's find_one method based on keyword args maps result ( dict to instance ) and return future Example:: manager = EntityManager(Product) product_saved = yield manager.find_one(_id=object_id)
def copy(self, filename: Optional[PathLike] = None) -> 'AnnData': """Full copy, optionally on disk.""" if not self.isbacked: return AnnData(self._X.copy() if self._X is not None else None, self._obs.copy(), self._var.copy(), # deepcopy on DictView does not work and is unnecessary # as uns was copied already before self._uns.copy() if isinstance(self._uns, DictView) else deepcopy(self._uns), self._obsm.copy(), self._varm.copy(), raw=None if self._raw is None else self._raw.copy(), layers=self.layers.as_dict(), dtype=self._X.dtype.name if self._X is not None else 'float32') else: if filename is None: raise ValueError( 'To copy an AnnData object in backed mode, ' 'pass a filename: `.copy(filename=\'myfilename.h5ad\')`.') if self.isview: self.write(filename) else: from shutil import copyfile copyfile(self.filename, filename) return AnnData(filename=filename)
Full copy, optionally on disk.
def onAutoIndentTriggered(self): """Indent current line or selected lines """ cursor = self._qpart.textCursor() startBlock = self._qpart.document().findBlock(cursor.selectionStart()) endBlock = self._qpart.document().findBlock(cursor.selectionEnd()) if startBlock != endBlock: # indent multiply lines stopBlock = endBlock.next() block = startBlock with self._qpart: while block != stopBlock: self.autoIndentBlock(block, '') block = block.next() else: # indent 1 line self.autoIndentBlock(startBlock, '')
Indent current line or selected lines
def _mean_prediction(self,theta,Y,scores,h,t_params): """ Creates a h-step ahead mean prediction Parameters ---------- theta : np.array The past predicted values Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables Returns ---------- Y_exp : np.array Vector of past values and predictions """ Y_exp = Y.copy() theta_exp = theta.copy() scores_exp = scores.copy() #(TODO: vectorize the inner construction here) for t in range(0,h): new_value = theta_exp[-1] + t_params[0]*scores_exp[-1] if self.model_name2 == "Exponential": Y_exp = np.append(Y_exp, [1.0/self.link(new_value)]) else: Y_exp = np.append(Y_exp, [self.link(new_value)]) theta_exp = np.append(theta_exp, [new_value]) # For indexing consistency scores_exp = np.append(scores_exp, [0]) # expectation of score is zero return Y_exp
Creates a h-step ahead mean prediction Parameters ---------- theta : np.array The past predicted values Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables Returns ---------- Y_exp : np.array Vector of past values and predictions
def setDaemon(self, runnable, isdaemon, noregister = False): ''' If a runnable is a daemon, it will not keep the main loop running. The main loop will end when all alived runnables are daemons. ''' if not noregister and runnable not in self.registerIndex: self.register((), runnable) if isdaemon: self.daemons.add(runnable) else: self.daemons.discard(runnable)
If a runnable is a daemon, it will not keep the main loop running. The main loop will end when all alived runnables are daemons.
def get_subcols(module_ident, plpy): """Get all the sub-collections that the module is part of.""" plan = plpy.prepare(''' WITH RECURSIVE t(node, parent, path, document) AS ( SELECT tr.nodeid, tr.parent_id, ARRAY[tr.nodeid], tr.documentid FROM trees tr WHERE tr.documentid = $1 and tr.is_collated = 'False' UNION ALL SELECT c.nodeid, c.parent_id, path || ARRAY[c.nodeid], c.documentid FROM trees c JOIN t ON (c.nodeid = t.parent) WHERE not c.nodeid = ANY(t.path) ) SELECT DISTINCT m.module_ident FROM t JOIN modules m ON (t.document = m.module_ident) WHERE m.portal_type = 'SubCollection' ORDER BY m.module_ident ''', ('integer',)) for i in plpy.execute(plan, (module_ident,)): yield i['module_ident']
Get all the sub-collections that the module is part of.
def readCell(self, row, col): ''' read a cell''' try: if self.__sheet is None: self.openSheet(super(ExcelRead, self).DEFAULT_SHEET) return self.__sheet.cell(row, col).value except BaseException as excp: raise UfException(Errors.UNKNOWN_ERROR, "Unknown Error in Excellib.readCell %s" % excp)
read a cell
def main(): """Commande line interface""" parser = argparse.ArgumentParser(description='Extract parameters from an ULog file') parser.add_argument('filename', metavar='file.ulg', help='ULog input file') parser.add_argument('-d', '--delimiter', dest='delimiter', action='store', help='Use delimiter in CSV (default is \',\')', default=',') parser.add_argument('-i', '--initial', dest='initial', action='store_true', help='Only extract initial parameters', default=False) parser.add_argument('-o', '--octave', dest='octave', action='store_true', help='Use Octave format', default=False) parser.add_argument('-t', '--timestamps', dest='timestamps', action='store_true', help='Extract changed parameters with timestamps', default=False) parser.add_argument('output_filename', metavar='params.txt', type=argparse.FileType('w'), nargs='?', help='Output filename (default=stdout)', default=sys.stdout) args = parser.parse_args() ulog_file_name = args.filename message_filter = [] if not args.initial: message_filter = None ulog = ULog(ulog_file_name, message_filter) param_keys = sorted(ulog.initial_parameters.keys()) delimiter = args.delimiter output_file = args.output_filename if not args.octave: for param_key in param_keys: output_file.write(param_key) if args.initial: output_file.write(delimiter) output_file.write(str(ulog.initial_parameters[param_key])) output_file.write('\n') elif args.timestamps: output_file.write(delimiter) output_file.write(str(ulog.initial_parameters[param_key])) for t, name, value in ulog.changed_parameters: if name == param_key: output_file.write(delimiter) output_file.write(str(value)) output_file.write('\n') output_file.write("timestamp") output_file.write(delimiter) output_file.write('0') for t, name, value in ulog.changed_parameters: if name == param_key: output_file.write(delimiter) output_file.write(str(t)) output_file.write('\n') else: for t, name, value in ulog.changed_parameters: if name == param_key: output_file.write(delimiter) output_file.write(str(value)) output_file.write('\n') else: for param_key in param_keys: output_file.write('# name ') output_file.write(param_key) values = [ulog.initial_parameters[param_key]] if not args.initial: for t, name, value in ulog.changed_parameters: if name == param_key: values += [value] if len(values) > 1: output_file.write('\n# type: matrix\n') output_file.write('# rows: 1\n') output_file.write('# columns: ') output_file.write(str(len(values)) + '\n') for value in values: output_file.write(str(value) + ' ') else: output_file.write('\n# type: scalar\n') output_file.write(str(values[0])) output_file.write('\n')
Commande line interface
def basic_auth(f): """ Injects auth, into requests call over route :return: route """ def wrapper(*args, **kwargs): self = args[0] if 'auth' in kwargs: raise AttributeError("don't set auth token explicitly") assert self.is_connected, "not connected, call router.connect(email, password) first" if self._jwt_auth: kwargs['auth'] = self._jwt_auth elif self._auth: kwargs['auth'] = self._auth else: assert False, "no basic token, no JWT, but connected o_O" return f(*args, **kwargs) return wrapper
Injects auth, into requests call over route :return: route
def export_compound(infile, outfile, format, outcsv, max_rs_peakgroup_qvalue): """ Export Compound TSV/CSV tables """ if format == "score_plots": export_score_plots(infile) else: if outfile is None: if outcsv: outfile = infile.split(".osw")[0] + ".csv" else: outfile = infile.split(".osw")[0] + ".tsv" else: outfile = outfile export_compound_tsv(infile, outfile, format, outcsv, max_rs_peakgroup_qvalue)
Export Compound TSV/CSV tables
def movementCompute(self, displacement, noiseFactor = 0): """ Shift the current active cells by a vector. @param displacement (pair of floats) A translation vector [di, dj]. """ if noiseFactor != 0: displacement = copy.deepcopy(displacement) xnoise = np.random.normal(0, noiseFactor) ynoise = np.random.normal(0, noiseFactor) displacement[0] += xnoise displacement[1] += ynoise # Calculate delta in the module's coordinates. phaseDisplacement = np.matmul(self.A, displacement) # Shift the active coordinates. np.add(self.bumpPhases, phaseDisplacement[:,np.newaxis], out=self.bumpPhases) # In Python, (x % 1.0) can return 1.0 because of floating point goofiness. # Generally this doesn't cause problems, it's just confusing when you're # debugging. np.round(self.bumpPhases, decimals=9, out=self.bumpPhases) np.mod(self.bumpPhases, 1.0, out=self.bumpPhases) self._computeActiveCells() self.phaseDisplacement = phaseDisplacement
Shift the current active cells by a vector. @param displacement (pair of floats) A translation vector [di, dj].
def load(self, size): """open and read the file is existent""" if self.exists() and self.isfile(): return eval(open(self).read(size))
open and read the file is existent
def _build_credentials_tuple(mech, source, user, passwd, extra, database): """Build and return a mechanism specific credentials tuple. """ if mech != 'MONGODB-X509' and user is None: raise ConfigurationError("%s requires a username." % (mech,)) if mech == 'GSSAPI': if source is not None and source != '$external': raise ValueError( "authentication source must be $external or None for GSSAPI") properties = extra.get('authmechanismproperties', {}) service_name = properties.get('SERVICE_NAME', 'mongodb') canonicalize = properties.get('CANONICALIZE_HOST_NAME', False) service_realm = properties.get('SERVICE_REALM') props = GSSAPIProperties(service_name=service_name, canonicalize_host_name=canonicalize, service_realm=service_realm) # Source is always $external. return MongoCredential(mech, '$external', user, passwd, props, None) elif mech == 'MONGODB-X509': if passwd is not None: raise ConfigurationError( "Passwords are not supported by MONGODB-X509") if source is not None and source != '$external': raise ValueError( "authentication source must be " "$external or None for MONGODB-X509") # user can be None. return MongoCredential(mech, '$external', user, None, None, None) elif mech == 'PLAIN': source_database = source or database or '$external' return MongoCredential(mech, source_database, user, passwd, None, None) else: source_database = source or database or 'admin' if passwd is None: raise ConfigurationError("A password is required.") return MongoCredential( mech, source_database, user, passwd, None, _Cache())
Build and return a mechanism specific credentials tuple.
def copy_random_state(random_state, force_copy=False): """ Creates a copy of a random state. Parameters ---------- random_state : numpy.random.RandomState The random state to copy. force_copy : bool, optional If True, this function will always create a copy of every random state. If False, it will not copy numpy's default random state, but all other random states. Returns ------- rs_copy : numpy.random.RandomState The copied random state. """ if random_state == np.random and not force_copy: return random_state else: rs_copy = dummy_random_state() orig_state = random_state.get_state() rs_copy.set_state(orig_state) return rs_copy
Creates a copy of a random state. Parameters ---------- random_state : numpy.random.RandomState The random state to copy. force_copy : bool, optional If True, this function will always create a copy of every random state. If False, it will not copy numpy's default random state, but all other random states. Returns ------- rs_copy : numpy.random.RandomState The copied random state.
def update_video_image(edx_video_id, course_id, image_data, file_name): """ Update video image for an existing video. NOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise a new file name is constructed based on uuid and extension from `file_name` value. `image_data` will be None in case of course re-run and export. Arguments: image_data (InMemoryUploadedFile): Image data to be saved for a course video. Returns: course video image url Raises: Raises ValVideoNotFoundError if the CourseVideo cannot be retrieved. """ try: course_video = CourseVideo.objects.select_related('video').get( course_id=course_id, video__edx_video_id=edx_video_id ) except ObjectDoesNotExist: error_message = u'VAL: CourseVideo not found for edx_video_id: {0} and course_id: {1}'.format( edx_video_id, course_id ) raise ValVideoNotFoundError(error_message) video_image, _ = VideoImage.create_or_update(course_video, file_name, image_data) return video_image.image_url()
Update video image for an existing video. NOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise a new file name is constructed based on uuid and extension from `file_name` value. `image_data` will be None in case of course re-run and export. Arguments: image_data (InMemoryUploadedFile): Image data to be saved for a course video. Returns: course video image url Raises: Raises ValVideoNotFoundError if the CourseVideo cannot be retrieved.
def gnpool(name, start, room, lenout=_default_len_out): """ Return names of kernel variables matching a specified template. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gnpool_c.html :param name: Template that names should match. :type name: str :param start: Index of first matching name to retrieve. :type start: int :param room: The largest number of values to return. :type room: int :param lenout: Length of strings in output array kvars. :type lenout: int :return: Kernel pool variables whose names match name. :rtype: list of str """ name = stypes.stringToCharP(name) start = ctypes.c_int(start) kvars = stypes.emptyCharArray(yLen=room, xLen=lenout) room = ctypes.c_int(room) lenout = ctypes.c_int(lenout) n = ctypes.c_int() found = ctypes.c_int() libspice.gnpool_c(name, start, room, lenout, ctypes.byref(n), kvars, ctypes.byref(found)) return stypes.cVectorToPython(kvars)[0:n.value], bool(found.value)
Return names of kernel variables matching a specified template. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gnpool_c.html :param name: Template that names should match. :type name: str :param start: Index of first matching name to retrieve. :type start: int :param room: The largest number of values to return. :type room: int :param lenout: Length of strings in output array kvars. :type lenout: int :return: Kernel pool variables whose names match name. :rtype: list of str
def bounds_window(bounds, affine): """Create a full cover rasterio-style window """ w, s, e, n = bounds row_start, col_start = rowcol(w, n, affine) row_stop, col_stop = rowcol(e, s, affine, op=math.ceil) return (row_start, row_stop), (col_start, col_stop)
Create a full cover rasterio-style window
def network_lpf_contingency(network, snapshots=None, branch_outages=None): """ Computes linear power flow for a selection of branch outages. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots NB: currently this only works for a single snapshot branch_outages : list-like A list of passive branches which are to be tested for outages. If None, it's take as all network.passive_branches_i() Returns ------- p0 : pandas.DataFrame num_passive_branch x num_branch_outages DataFrame of new power flows """ if snapshots is None: snapshots = network.snapshots if isinstance(snapshots, collections.Iterable): logger.warning("Apologies LPF contingency, this only works for single snapshots at the moment, taking the first snapshot.") snapshot = snapshots[0] else: snapshot = snapshots network.lpf(snapshot) # Store the flows from the base case passive_branches = network.passive_branches() if branch_outages is None: branch_outages = passive_branches.index p0_base = pd.Series(index=passive_branches.index) for c in network.passive_branch_components: pnl = network.pnl(c) p0_base[c] = pnl.p0.loc[snapshot] for sn in network.sub_networks.obj: sn._branches = sn.branches() sn.calculate_BODF() p0 = pd.DataFrame(index=passive_branches.index) p0["base"] = p0_base for branch in branch_outages: if type(branch) is not tuple: logger.warning("No type given for {}, assuming it is a line".format(branch)) branch = ("Line",branch) sn = network.sub_networks.obj[passive_branches.sub_network[branch]] branch_i = sn._branches.index.get_loc(branch) p0_new = p0_base + pd.Series(sn.BODF[:,branch_i]*p0_base[branch],sn._branches.index) p0[branch] = p0_new return p0
Computes linear power flow for a selection of branch outages. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots NB: currently this only works for a single snapshot branch_outages : list-like A list of passive branches which are to be tested for outages. If None, it's take as all network.passive_branches_i() Returns ------- p0 : pandas.DataFrame num_passive_branch x num_branch_outages DataFrame of new power flows
def to_dict(self): """ Return a dictionary of the worker stats. Returns: dict: Dictionary of the stats. """ return { 'name': self.name, 'broker': self.broker.to_dict(), 'pid': self.pid, 'process_pids': self.process_pids, 'concurrency': self.concurrency, 'job_count': self.job_count, 'queues': [q.to_dict() for q in self.queues] }
Return a dictionary of the worker stats. Returns: dict: Dictionary of the stats.
def process_schema(value): """Load schema from JSONSchema registry based on given value. :param value: Schema path, relative to the directory when it was registered. :returns: The schema absolute path. """ schemas = current_app.extensions['invenio-jsonschemas'].schemas try: return schemas[value] except KeyError: raise click.BadParameter( 'Unknown schema {0}. Please use one of:\n {1}'.format( value, '\n'.join(schemas.keys()) ) )
Load schema from JSONSchema registry based on given value. :param value: Schema path, relative to the directory when it was registered. :returns: The schema absolute path.
def merge_from(self, other): """Merge information from another PhoneNumberDesc object into this one.""" if other.national_number_pattern is not None: self.national_number_pattern = other.national_number_pattern if other.example_number is not None: self.example_number = other.example_number
Merge information from another PhoneNumberDesc object into this one.
def get_repository_nodes(self, repository_id, ancestor_levels, descendant_levels, include_siblings): """Gets a portion of the hierarchy for the given repository. arg: repository_id (osid.id.Id): the ``Id`` to query arg: ancestor_levels (cardinal): the maximum number of ancestor levels to include. A value of 0 returns no parents in the node. arg: descendant_levels (cardinal): the maximum number of descendant levels to include. A value of 0 returns no children in the node. arg: include_siblings (boolean): ``true`` to include the siblings of the given node, ``false`` to omit the siblings return: (osid.repository.RepositoryNode) - the specified repository node raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_bin_nodes return objects.RepositoryNode(self.get_repository_node_ids( repository_id=repository_id, ancestor_levels=ancestor_levels, descendant_levels=descendant_levels, include_siblings=include_siblings)._my_map, runtime=self._runtime, proxy=self._proxy)
Gets a portion of the hierarchy for the given repository. arg: repository_id (osid.id.Id): the ``Id`` to query arg: ancestor_levels (cardinal): the maximum number of ancestor levels to include. A value of 0 returns no parents in the node. arg: descendant_levels (cardinal): the maximum number of descendant levels to include. A value of 0 returns no children in the node. arg: include_siblings (boolean): ``true`` to include the siblings of the given node, ``false`` to omit the siblings return: (osid.repository.RepositoryNode) - the specified repository node raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def urlretrieve(self, url, filename=None, method='GET', body=None, dir=None, **kwargs): """ Save result of a request to a file, similarly to :func:`urllib.urlretrieve`. If an error is encountered may raise any of the scrapelib `exceptions`_. A filename may be provided or :meth:`urlretrieve` will safely create a temporary file. If a directory is provided, a file will be given a random name within the specified directory. Either way, it is the responsibility of the caller to ensure that the temporary file is deleted when it is no longer needed. :param url: URL for request :param filename: optional name for file :param method: any valid HTTP method, but generally GET or POST :param body: optional body for request, to turn parameters into an appropriate string use :func:`urllib.urlencode()` :param dir: optional directory to place file in :returns filename, response: tuple with filename for saved response (will be same as given filename if one was given, otherwise will be a temp file in the OS temp directory) and a :class:`Response` object that can be used to inspect the response headers. """ result = self.request(method, url, data=body, **kwargs) result.code = result.status_code # backwards compat if not filename: fd, filename = tempfile.mkstemp(dir=dir) f = os.fdopen(fd, 'wb') else: f = open(filename, 'wb') f.write(result.content) f.close() return filename, result
Save result of a request to a file, similarly to :func:`urllib.urlretrieve`. If an error is encountered may raise any of the scrapelib `exceptions`_. A filename may be provided or :meth:`urlretrieve` will safely create a temporary file. If a directory is provided, a file will be given a random name within the specified directory. Either way, it is the responsibility of the caller to ensure that the temporary file is deleted when it is no longer needed. :param url: URL for request :param filename: optional name for file :param method: any valid HTTP method, but generally GET or POST :param body: optional body for request, to turn parameters into an appropriate string use :func:`urllib.urlencode()` :param dir: optional directory to place file in :returns filename, response: tuple with filename for saved response (will be same as given filename if one was given, otherwise will be a temp file in the OS temp directory) and a :class:`Response` object that can be used to inspect the response headers.
def preflight(self): """Handles request authentication""" origin = self.request.headers.get('Origin', '*') self.set_header('Access-Control-Allow-Origin', origin) headers = self.request.headers.get('Access-Control-Request-Headers') if headers: self.set_header('Access-Control-Allow-Headers', headers) self.set_header('Access-Control-Allow-Credentials', 'true')
Handles request authentication
def _setextra(self, extradata): ''' Set the _extra field in the struct, which stands for the additional ("extra") data after the defined fields. ''' current = self while hasattr(current, '_sub'): current = current._sub _set(current, '_extra', extradata)
Set the _extra field in the struct, which stands for the additional ("extra") data after the defined fields.
def _calc_validation_statistics(validation_results): """ Calculate summary statistics for the validation results and return ``ExpectationStatistics``. """ # calc stats successful_expectations = sum(exp["success"] for exp in validation_results) evaluated_expectations = len(validation_results) unsuccessful_expectations = evaluated_expectations - successful_expectations success = successful_expectations == evaluated_expectations try: success_percent = successful_expectations / evaluated_expectations * 100 except ZeroDivisionError: success_percent = float("nan") return ValidationStatistics( successful_expectations=successful_expectations, evaluated_expectations=evaluated_expectations, unsuccessful_expectations=unsuccessful_expectations, success=success, success_percent=success_percent, )
Calculate summary statistics for the validation results and return ``ExpectationStatistics``.
def imshow_z(data, name): """2D color plot of the quasiparticle weight as a function of interaction and doping""" zmes = pick_flat_z(data) plt.figure() plt.imshow(zmes.T, origin='lower', \ extent=[data['doping'].min(), data['doping'].max(), \ 0, data['u_int'].max()], aspect=.16) plt.colorbar() plt.xlabel('$n$', fontsize=20) plt.ylabel('$U/D$', fontsize=20) plt.savefig(name+'_imshow.png', dpi=300, format='png', transparent=False, bbox_inches='tight', pad_inches=0.05)
2D color plot of the quasiparticle weight as a function of interaction and doping
def _backup_dir_item(self, dir_path, process_bar): """ Backup one dir item :param dir_path: filesystem_walk.DirEntryPath() instance """ self.path_helper.set_src_filepath(dir_path) if self.path_helper.abs_src_filepath is None: self.total_errored_items += 1 log.info("Can't backup %r", dir_path) # self.summary(no, dir_path.stat.st_mtime, end=" ") if dir_path.is_symlink: self.summary("TODO Symlink: %s" % dir_path) return if dir_path.resolve_error is not None: self.summary("TODO resolve error: %s" % dir_path.resolve_error) pprint_path(dir_path) return if dir_path.different_path: self.summary("TODO different path:") pprint_path(dir_path) return if dir_path.is_dir: self.summary("TODO dir: %s" % dir_path) elif dir_path.is_file: # self.summary("Normal file: %s", dir_path) file_backup = FileBackup(dir_path, self.path_helper, self.backup_run) old_backup_entry = self.fast_compare(dir_path) if old_backup_entry is not None: # We can just link the file from a old backup file_backup.fast_deduplication_backup(old_backup_entry, process_bar) else: file_backup.deduplication_backup(process_bar) assert file_backup.fast_backup is not None, dir_path.path assert file_backup.file_linked is not None, dir_path.path file_size = dir_path.stat.st_size if file_backup.file_linked: # os.link() was used self.total_file_link_count += 1 self.total_stined_bytes += file_size else: self.total_new_file_count += 1 self.total_new_bytes += file_size if file_backup.fast_backup: self.total_fast_backup += 1 else: self.summary("TODO:" % dir_path) pprint_path(dir_path)
Backup one dir item :param dir_path: filesystem_walk.DirEntryPath() instance
def _compute_distance(self, dists, C): """ Compute the second term of the equation described on p. 1144: `` c4 * np.log(sqrt(R ** 2. + h ** 2.) """ return C["c4"] * np.log(np.sqrt(dists.rrup ** 2. + C["h"] ** 2.))
Compute the second term of the equation described on p. 1144: `` c4 * np.log(sqrt(R ** 2. + h ** 2.)
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET): """ Creates a dataset from a publicly accessible file stored in the cloud. :param file_url: string, in the form of a URL to a file accessible on the cloud. Popular options include Dropbox, AWS S3, Google Drive. warning: Google Drive by default gives you a link to a web ui that allows you to download a file NOT to the file directly. There is a way to change the link to point directly to the file as of 2018 as this may change, please search google for a solution. returns: a request object """ auth = 'Bearer ' + self.check_for_token(token) dummy_files = {'type': (None, 'text-intent'), 'path':(None, file_url)} h = {'Authorization': auth, 'Cache-Control':'no-cache'} the_url = url r = requests.post(the_url, headers=h, files=dummy_files) return r
Creates a dataset from a publicly accessible file stored in the cloud. :param file_url: string, in the form of a URL to a file accessible on the cloud. Popular options include Dropbox, AWS S3, Google Drive. warning: Google Drive by default gives you a link to a web ui that allows you to download a file NOT to the file directly. There is a way to change the link to point directly to the file as of 2018 as this may change, please search google for a solution. returns: a request object
def read_csv(self, file: str, table: str, libref: str ="", nosub: bool =False, opts: dict = None) -> '<SASdata object>': """ This method will import a csv file into a SAS Data Set and return the SASdata object referring to it. file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file table - the name of the SAS Data Set to create libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows) """ opts = opts if opts is not None else {} code = "filename x " if file.lower().startswith("http"): code += "url " code += "\""+file+"\";\n" code += "proc import datafile=x out=" if len(libref): code += libref+"." code += table+" dbms=csv replace; "+self._sb._impopts(opts)+" run;" if nosub: print(code) else: ll = self.submit(code, "text")
This method will import a csv file into a SAS Data Set and return the SASdata object referring to it. file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file table - the name of the SAS Data Set to create libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
def LoadElement(href, only_etag=False): """ Return an instance of a element as a ElementCache dict used as a cache. :rtype ElementCache """ request = SMCRequest(href=href) request.exception = FetchElementFailed result = request.read() if only_etag: return result.etag return ElementCache( result.json, etag=result.etag)
Return an instance of a element as a ElementCache dict used as a cache. :rtype ElementCache
def _DetermineOperatingSystem(self, searcher): """Tries to determine the underlying operating system. Args: searcher (dfvfs.FileSystemSearcher): file system searcher. Returns: str: operating system for example "Windows". This should be one of the values in definitions.OPERATING_SYSTEM_FAMILIES. """ find_specs = [ file_system_searcher.FindSpec( location='/etc', case_sensitive=False), file_system_searcher.FindSpec( location='/System/Library', case_sensitive=False), file_system_searcher.FindSpec( location='/Windows/System32', case_sensitive=False), file_system_searcher.FindSpec( location='/WINNT/System32', case_sensitive=False), file_system_searcher.FindSpec( location='/WINNT35/System32', case_sensitive=False), file_system_searcher.FindSpec( location='/WTSRV/System32', case_sensitive=False)] locations = [] for path_spec in searcher.Find(find_specs=find_specs): relative_path = searcher.GetRelativePath(path_spec) if relative_path: locations.append(relative_path.lower()) # We need to check for both forward and backward slashes since the path # spec will be OS dependent, as in running the tool on Windows will return # Windows paths (backward slash) vs. forward slash on *NIX systems. windows_locations = set([ '/windows/system32', '\\windows\\system32', '/winnt/system32', '\\winnt\\system32', '/winnt35/system32', '\\winnt35\\system32', '\\wtsrv\\system32', '/wtsrv/system32']) operating_system = definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN if windows_locations.intersection(set(locations)): operating_system = definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT elif '/system/library' in locations: operating_system = definitions.OPERATING_SYSTEM_FAMILY_MACOS elif '/etc' in locations: operating_system = definitions.OPERATING_SYSTEM_FAMILY_LINUX return operating_system
Tries to determine the underlying operating system. Args: searcher (dfvfs.FileSystemSearcher): file system searcher. Returns: str: operating system for example "Windows". This should be one of the values in definitions.OPERATING_SYSTEM_FAMILIES.
def get_words(self): """ Returns the document words. :return: Document words. :rtype: list """ words = [] block = self.document().findBlockByLineNumber(0) while block.isValid(): blockWords = foundations.strings.get_words(foundations.strings.to_string(block.text())) if blockWords: words.extend(blockWords) block = block.next() return words
Returns the document words. :return: Document words. :rtype: list
def InterpolateValue(self, value, type_info_obj=type_info.String(), default_section=None, context=None): """Interpolate the value and parse it with the appropriate type.""" # It is only possible to interpolate strings... if isinstance(value, Text): try: value = StringInterpolator( value, self, default_section=default_section, parameter=type_info_obj.name, context=context).Parse() except InterpolationError as e: # TODO(hanuszczak): This is a quick hack to not refactor too much while # working on Python 3 compatibility. But this is bad and exceptions # should not be used like this. message = "{cause}: {detail}".format(cause=e, detail=value) raise type(e)(message) # Parse the data from the string. value = type_info_obj.FromString(value) # ... and lists of strings. if isinstance(value, list): value = [ self.InterpolateValue( v, default_section=default_section, context=context) for v in value ] return value
Interpolate the value and parse it with the appropriate type.
async def connect(self): """Establish a connection to the chat server. Returns when an error has occurred, or :func:`disconnect` has been called. """ proxy = os.environ.get('HTTP_PROXY') self._session = http_utils.Session(self._cookies, proxy=proxy) try: self._channel = channel.Channel( self._session, self._max_retries, self._retry_backoff_base ) # Forward the Channel events to the Client events. self._channel.on_connect.add_observer(self.on_connect.fire) self._channel.on_reconnect.add_observer(self.on_reconnect.fire) self._channel.on_disconnect.add_observer(self.on_disconnect.fire) self._channel.on_receive_array.add_observer(self._on_receive_array) # Wrap the coroutine in a Future so it can be cancelled. self._listen_future = asyncio.ensure_future(self._channel.listen()) # Listen for StateUpdate messages from the Channel until it # disconnects. try: await self._listen_future except asyncio.CancelledError: # If this task is cancelled, we need to cancel our child task # as well. We don't need an additional yield because listen # cancels immediately. self._listen_future.cancel() logger.info( 'Client.connect returning because Channel.listen returned' ) finally: await self._session.close()
Establish a connection to the chat server. Returns when an error has occurred, or :func:`disconnect` has been called.
def rates_for_location(self, postal_code, location_deets=None): """Shows the sales tax rates for a given location.""" request = self._get("rates/" + postal_code, location_deets) return self.responder(request)
Shows the sales tax rates for a given location.
def load(self): """ Loads the user's SDB inventory Raises parseException """ self.inventory = SDBInventory(self.usr) self.forms = self.inventory.forms
Loads the user's SDB inventory Raises parseException
def _update_index(self, axis, key, value): """Update the current axis index based on a given key or value This is an internal method designed to set the origin or step for an index, whilst updating existing Index arrays as appropriate Examples -------- >>> self._update_index("x0", 0) >>> self._update_index("dx", 0) To actually set an index array, use `_set_index` """ # delete current value if given None if value is None: return delattr(self, key) _key = "_{}".format(key) index = "{[0]}index".format(axis) unit = "{[0]}unit".format(axis) # convert float to Quantity if not isinstance(value, Quantity): try: value = Quantity(value, getattr(self, unit)) except TypeError: value = Quantity(float(value), getattr(self, unit)) # if value is changing, delete current index try: curr = getattr(self, _key) except AttributeError: delattr(self, index) else: if ( value is None or getattr(self, key) is None or not value.unit.is_equivalent(curr.unit) or value != curr ): delattr(self, index) # set new value setattr(self, _key, value) return value
Update the current axis index based on a given key or value This is an internal method designed to set the origin or step for an index, whilst updating existing Index arrays as appropriate Examples -------- >>> self._update_index("x0", 0) >>> self._update_index("dx", 0) To actually set an index array, use `_set_index`
def set_calibrators(self, parameter, calibrators): """ Apply an ordered set of calibrators for the specified parameter. This replaces existing calibrators (if any). Each calibrator may have a context, which indicates when it its effects may be applied. Only the first matching calibrator is applied. A calibrator with context ``None`` is the *default* calibrator. There can be only one such calibrator, and is always applied at the end when no other contextual calibrator was applicable. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param .Calibrator[] calibrators: List of calibrators (either contextual or not) """ req = mdb_pb2.ChangeParameterRequest() req.action = mdb_pb2.ChangeParameterRequest.SET_CALIBRATORS for c in calibrators: if c.context : context_calib = req.contextCalibrator.add() context_calib.context = rs.context calib_info = context_calib.calibrator else : calib_info = req.defaultCalibrator _add_calib(calib_info, c.type, c.data) url = '/mdb/{}/{}/parameters/{}'.format( self._instance, self._processor, parameter) response = self._client.post_proto(url, data=req.SerializeToString()) pti = mdb_pb2.ParameterTypeInfo()
Apply an ordered set of calibrators for the specified parameter. This replaces existing calibrators (if any). Each calibrator may have a context, which indicates when it its effects may be applied. Only the first matching calibrator is applied. A calibrator with context ``None`` is the *default* calibrator. There can be only one such calibrator, and is always applied at the end when no other contextual calibrator was applicable. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param .Calibrator[] calibrators: List of calibrators (either contextual or not)
def convert_norm(node, **kwargs): """Map MXNet's norm operator attributes to onnx's ReduceL1 and ReduceL2 operators and return the created node. """ name, input_nodes, attrs = get_inputs(node, kwargs) mx_axis = attrs.get("axis", None) axes = convert_string_to_list(str(mx_axis)) if mx_axis else None keepdims = get_boolean_attribute_value(attrs, "keepdims") ord = int(attrs.get("ord", 2)) onnx_op_name = "ReduceL1" if ord == 1 else "ReduceL2" if axes: reduce_node = onnx.helper.make_node( onnx_op_name, input_nodes, [name], axes=axes, keepdims=keepdims, name=name ) return [reduce_node] else: reduce_node = onnx.helper.make_node( onnx_op_name, input_nodes, [name], keepdims=keepdims, name=name ) return [reduce_node]
Map MXNet's norm operator attributes to onnx's ReduceL1 and ReduceL2 operators and return the created node.
def _does_require_deprecation(self): """ Check if we have to put the previous version into the deprecated list. """ for index, version_number in enumerate(self.current_version[0][:2]): # We loop through the 2 last elements of the version. if version_number > self.version_yaml[index]: # The currently read version number is greater than the one we have in # the version.yaml. # We return True. return True # We return False, we do not need to deprecate anything. return False
Check if we have to put the previous version into the deprecated list.