code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def assemble(input_): """ Assembles input string, and leave the result in the MEMORY global object """ global MEMORY if MEMORY is None: MEMORY = Memory() parser.parse(input_, lexer=LEXER, debug=OPTIONS.Debug.value > 2) if len(MEMORY.scopes): error(MEMORY.scopes[-1], 'Missing ENDP to close this scope') return gl.has_errors
Assembles input string, and leave the result in the MEMORY global object
def next(self): """ Gets next entry as a dictionary. Returns: object - Object key/value pair representing a row. {key1: value1, key2: value2, ...} """ try: entry = {} row = self._csv_reader.next() for i in range(0, len(row)): entry[self._headers[i]] = row[i] return entry except Exception as e: # close our file when we're done reading. self._file.close() raise e
Gets next entry as a dictionary. Returns: object - Object key/value pair representing a row. {key1: value1, key2: value2, ...}
def forum_list(context, forum_visibility_contents): """ Renders the considered forum list. This will render the given list of forums by respecting the order and the depth of each forum in the forums tree. Usage:: {% forum_list my_forums %} """ request = context.get('request') tracking_handler = TrackingHandler(request=request) data_dict = { 'forum_contents': forum_visibility_contents, 'unread_forums': tracking_handler.get_unread_forums_from_list( request.user, forum_visibility_contents.forums), 'user': request.user, 'request': request, } root_level = forum_visibility_contents.root_level if root_level is not None: data_dict['root_level'] = root_level data_dict['root_level_middle'] = root_level + 1 data_dict['root_level_sub'] = root_level + 2 return data_dict
Renders the considered forum list. This will render the given list of forums by respecting the order and the depth of each forum in the forums tree. Usage:: {% forum_list my_forums %}
def to_aws_name(self, name): """ Returns a transliteration of the name that safe to use for resource names on AWS. If the given name is relative, it converted to its absolute form before the transliteration. The transliteration uses two consequitive '_' to encode a single '_' and a single '_' to separate the name components. AWS-safe names are by definition absolute such that the leading separator can be removed. This leads to fairly readable AWS-safe names, especially for names in the root namespace, where the transliteration is the identity function if the input does not contain any '_'. This scheme only works if name components don't start with '_'. Without that condition, '/_' would become '___' the inverse of which is '_/'. >>> ctx = Context( 'us-west-1b', namespace='/' ) >>> ctx.to_aws_name( 'foo' ) 'foo' >>> ctx.from_aws_name( 'foo' ) 'foo' Illegal paths that would introduce ambiguity need to raise an exception >>> ctx.to_aws_name('/_') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_' >>> ctx.to_aws_name('/_/') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_/' >>> ctx.from_aws_name('___') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_/' >>> ctx.to_aws_name( 'foo_bar') 'foo__bar' >>> ctx.from_aws_name( 'foo__bar') 'foo_bar' >>> ctx.to_aws_name( '/sub_ns/foo_bar') 'sub__ns_foo__bar' >>> ctx.to_aws_name( 'sub_ns/foo_bar') 'sub__ns_foo__bar' >>> ctx.from_aws_name( 'sub__ns_foo__bar' ) 'sub_ns/foo_bar' >>> ctx.to_aws_name( 'g_/' ) 'g___' >>> ctx.from_aws_name( 'g___' ) 'g_/' >>> ctx = Context( 'us-west-1b', namespace='/this_ns/' ) >>> ctx.to_aws_name( 'foo' ) 'this__ns_foo' >>> ctx.from_aws_name( 'this__ns_foo' ) 'foo' >>> ctx.to_aws_name( 'foo_bar') 'this__ns_foo__bar' >>> ctx.from_aws_name( 'this__ns_foo__bar') 'foo_bar' >>> ctx.to_aws_name( '/other_ns/foo_bar' ) 'other__ns_foo__bar' >>> ctx.from_aws_name( 'other__ns_foo__bar' ) '/other_ns/foo_bar' >>> ctx.to_aws_name( 'other_ns/foo_bar' ) 'this__ns_other__ns_foo__bar' >>> ctx.from_aws_name( 'this__ns_other__ns_foo__bar' ) 'other_ns/foo_bar' >>> ctx.to_aws_name( '/this_ns/foo_bar' ) 'this__ns_foo__bar' >>> ctx.from_aws_name( 'this__ns_foo__bar' ) 'foo_bar' """ name = self.absolute_name(name) assert name.startswith('/') return name[1:].replace('_', '__').replace('/', '_')
Returns a transliteration of the name that safe to use for resource names on AWS. If the given name is relative, it converted to its absolute form before the transliteration. The transliteration uses two consequitive '_' to encode a single '_' and a single '_' to separate the name components. AWS-safe names are by definition absolute such that the leading separator can be removed. This leads to fairly readable AWS-safe names, especially for names in the root namespace, where the transliteration is the identity function if the input does not contain any '_'. This scheme only works if name components don't start with '_'. Without that condition, '/_' would become '___' the inverse of which is '_/'. >>> ctx = Context( 'us-west-1b', namespace='/' ) >>> ctx.to_aws_name( 'foo' ) 'foo' >>> ctx.from_aws_name( 'foo' ) 'foo' Illegal paths that would introduce ambiguity need to raise an exception >>> ctx.to_aws_name('/_') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_' >>> ctx.to_aws_name('/_/') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_/' >>> ctx.from_aws_name('___') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_/' >>> ctx.to_aws_name( 'foo_bar') 'foo__bar' >>> ctx.from_aws_name( 'foo__bar') 'foo_bar' >>> ctx.to_aws_name( '/sub_ns/foo_bar') 'sub__ns_foo__bar' >>> ctx.to_aws_name( 'sub_ns/foo_bar') 'sub__ns_foo__bar' >>> ctx.from_aws_name( 'sub__ns_foo__bar' ) 'sub_ns/foo_bar' >>> ctx.to_aws_name( 'g_/' ) 'g___' >>> ctx.from_aws_name( 'g___' ) 'g_/' >>> ctx = Context( 'us-west-1b', namespace='/this_ns/' ) >>> ctx.to_aws_name( 'foo' ) 'this__ns_foo' >>> ctx.from_aws_name( 'this__ns_foo' ) 'foo' >>> ctx.to_aws_name( 'foo_bar') 'this__ns_foo__bar' >>> ctx.from_aws_name( 'this__ns_foo__bar') 'foo_bar' >>> ctx.to_aws_name( '/other_ns/foo_bar' ) 'other__ns_foo__bar' >>> ctx.from_aws_name( 'other__ns_foo__bar' ) '/other_ns/foo_bar' >>> ctx.to_aws_name( 'other_ns/foo_bar' ) 'this__ns_other__ns_foo__bar' >>> ctx.from_aws_name( 'this__ns_other__ns_foo__bar' ) 'other_ns/foo_bar' >>> ctx.to_aws_name( '/this_ns/foo_bar' ) 'this__ns_foo__bar' >>> ctx.from_aws_name( 'this__ns_foo__bar' ) 'foo_bar'
def stoichiometry_coefficients(compound, elements): """ Determine the stoichiometry coefficients of the specified elements in the specified chemical compound. :param compound: Formula of a chemical compound, e.g. 'SiO2'. :param elements: List of elements, e.g. ['Si', 'O', 'C']. :returns: List of stoichiometry coefficients. """ stoichiometry = parse_compound(compound.strip()).count() return [stoichiometry[element] for element in elements]
Determine the stoichiometry coefficients of the specified elements in the specified chemical compound. :param compound: Formula of a chemical compound, e.g. 'SiO2'. :param elements: List of elements, e.g. ['Si', 'O', 'C']. :returns: List of stoichiometry coefficients.
def replace(self, html): """Perform replacements on given HTML fragment.""" self.html = html text = html.text() positions = [] def perform_replacement(match): offset = sum(positions) start, stop = match.start() + offset, match.end() + offset s = self.html[start:stop] if self._is_replacement_allowed(s): repl = match.expand(self.replacement) self.html[start:stop] = repl else: repl = match.group() # no replacement takes place positions.append(match.end()) return repl while True: if positions: text = text[positions[-1]:] text, n = self.pattern.subn(perform_replacement, text, count=1) if not n: # all is already replaced break
Perform replacements on given HTML fragment.
def addGene( self, gene_id, gene_label, gene_type=None, gene_description=None ): ''' genes are classes ''' if gene_type is None: gene_type = self.globaltt['gene'] self.model.addClassToGraph(gene_id, gene_label, gene_type, gene_description) return
genes are classes
def main(): """ Continues to validate patterns until it encounters EOF within a pattern file or Ctrl-C is pressed by the user. """ parser = argparse.ArgumentParser(description='Validate STIX Patterns.') parser.add_argument('-f', '--file', help="Specify this arg to read patterns from a file.", type=argparse.FileType("r")) args = parser.parse_args() pass_count = fail_count = 0 # I tried using a generator (where each iteration would run raw_input()), # but raw_input()'s behavior seems to change when called from within a # generator: I only get one line, then the generator completes! I don't # know why behavior changes... import functools if args.file: nextpattern = args.file.readline else: nextpattern = functools.partial(six.moves.input, "Enter a pattern to validate: ") try: while True: pattern = nextpattern() if not pattern: break tests_passed, err_strings = validate(pattern, True) if tests_passed: print("\nPASS: %s" % pattern) pass_count += 1 else: for err in err_strings: print(err, '\n') fail_count += 1 except (EOFError, KeyboardInterrupt): pass finally: if args.file: args.file.close() print("\nPASSED:", pass_count, " patterns") print("FAILED:", fail_count, " patterns")
Continues to validate patterns until it encounters EOF within a pattern file or Ctrl-C is pressed by the user.
def update(self, pid, session, **kwargs): '''taobao.fenxiao.product.update 更新产品 - 更新分销平台产品数据,不传更新数据返回失败 - 对sku进行增、删操作时,原有的sku_ids字段会被忽略,请使用sku_properties和sku_properties_del。''' request = TOPRequest('taobao.fenxiao.product.update') request['pid'] = pid for k, v in kwargs.iteritems(): if k not in ('name', 'standard_price', 'cost_price', 'retail_price_low', 'retail_price_high', 'outer_id', 'quantity', 'alarm_number','desc','prov','city','postage_type','postage_id','postage_ordinary','postage_fast','postage_ems','status','sku_ids','sku_cost_prices','sku_quantitys','sku_outer_ids','have_invoice','have_guarantee','discount_id','sku_standard_prices','sku_properties','sku_properties_del','is_authz','pic_path','image','properties','property_alias','input_properties','dealer_cost_price','sku_dealer_cost_prices','category_id') and v==None: continue request[k] = v self.create(self.execute(request, session), fields=['pid','modified'], models={'modified':TOPDate}) return self
taobao.fenxiao.product.update 更新产品 - 更新分销平台产品数据,不传更新数据返回失败 - 对sku进行增、删操作时,原有的sku_ids字段会被忽略,请使用sku_properties和sku_properties_del。
def _do_setup(self): """Setup basic parameters for this class. `base` is the numeric base which when raised to `power` is equivalent to 1 unit of the corresponding prefix. I.e., base=2, power=10 represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte. Likewise, for the SI prefix classes `base` will be 10, and the `power` for the Kilobyte is 3. """ (self._base, self._power, self._name_singular, self._name_plural) = self._setup() self._unit_value = self._base ** self._power
Setup basic parameters for this class. `base` is the numeric base which when raised to `power` is equivalent to 1 unit of the corresponding prefix. I.e., base=2, power=10 represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte. Likewise, for the SI prefix classes `base` will be 10, and the `power` for the Kilobyte is 3.
def many(cls, filter=None, **kwargs): """Return a list of documents matching the filter""" from mongoframes.queries import Condition, Group, to_refs # Flatten the projection kwargs['projection'], references, subs = \ cls._flatten_projection( kwargs.get('projection', cls._default_projection) ) # Find the documents if isinstance(filter, (Condition, Group)): filter = filter.to_dict() documents = list(cls.get_collection().find(to_refs(filter), **kwargs)) # Dereference the documents (if required) if references: cls._dereference(documents, references) # Add sub-frames to the documents (if required) if subs: cls._apply_sub_frames(documents, subs) return [cls(d) for d in documents]
Return a list of documents matching the filter
def __ProcessHttpResponse(self, method_config, http_response, request): """Process the given http response.""" if http_response.status_code not in (http_client.OK, http_client.CREATED, http_client.NO_CONTENT): raise exceptions.HttpError.FromResponse( http_response, method_config=method_config, request=request) if http_response.status_code == http_client.NO_CONTENT: # TODO(craigcitro): Find out why _replace doesn't seem to work # here. http_response = http_wrapper.Response( info=http_response.info, content='{}', request_url=http_response.request_url) content = http_response.content if self._client.response_encoding and isinstance(content, bytes): content = content.decode(self._client.response_encoding) if self.__client.response_type_model == 'json': return content response_type = _LoadClass(method_config.response_type_name, self.__client.MESSAGES_MODULE) return self.__client.DeserializeMessage(response_type, content)
Process the given http response.
def deltaW(N, m, h): """Generate sequence of Wiener increments for m independent Wiener processes W_j(t) j=0..m-1 for each of N time intervals of length h. Returns: dW (array of shape (N, m)): The [n, j] element has the value W_j((n+1)*h) - W_j(n*h) """ return np.random.normal(0.0, np.sqrt(h), (N, m))
Generate sequence of Wiener increments for m independent Wiener processes W_j(t) j=0..m-1 for each of N time intervals of length h. Returns: dW (array of shape (N, m)): The [n, j] element has the value W_j((n+1)*h) - W_j(n*h)
def transitive_subgraph_of_addresses(self, addresses, *vargs, **kwargs): """Returns all transitive dependencies of `addresses`. Note that this uses `walk_transitive_dependencies_graph` and the predicate is passed through, hence it trims graphs rather than just filtering out Targets that do not match the predicate. See `walk_transitive_dependency_graph for more detail on `predicate`. :API: public :param list<Address> addresses: The root addresses to transitively close over. :param function predicate: The predicate passed through to `walk_transitive_dependencies_graph`. :param bool postorder: When ``True``, the traversal order is postorder (children before parents), else it is preorder (parents before children). :param function predicate: If this parameter is not given, no Targets will be filtered out of the closure. If it is given, any Target which fails the predicate will not be walked, nor will its dependencies. Thus predicate effectively trims out any subgraph that would only be reachable through Targets that fail the predicate. :param function dep_predicate: Takes two parameters, the current target and the dependency of the current target. If this parameter is not given, no dependencies will be filtered when traversing the closure. If it is given, when the predicate fails, the edge to the dependency will not be expanded. """ ret = OrderedSet() self.walk_transitive_dependency_graph(addresses, ret.add, *vargs, **kwargs) return ret
Returns all transitive dependencies of `addresses`. Note that this uses `walk_transitive_dependencies_graph` and the predicate is passed through, hence it trims graphs rather than just filtering out Targets that do not match the predicate. See `walk_transitive_dependency_graph for more detail on `predicate`. :API: public :param list<Address> addresses: The root addresses to transitively close over. :param function predicate: The predicate passed through to `walk_transitive_dependencies_graph`. :param bool postorder: When ``True``, the traversal order is postorder (children before parents), else it is preorder (parents before children). :param function predicate: If this parameter is not given, no Targets will be filtered out of the closure. If it is given, any Target which fails the predicate will not be walked, nor will its dependencies. Thus predicate effectively trims out any subgraph that would only be reachable through Targets that fail the predicate. :param function dep_predicate: Takes two parameters, the current target and the dependency of the current target. If this parameter is not given, no dependencies will be filtered when traversing the closure. If it is given, when the predicate fails, the edge to the dependency will not be expanded.
def _bit_is_one(self, n, hash_bytes): """ Check if the n (index) of hash_bytes is 1 or 0. """ scale = 16 # hexadecimal if not hash_bytes[int(n / (scale / 2))] >> int( (scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1: return False return True
Check if the n (index) of hash_bytes is 1 or 0.
def validate( message, get_certificate=lambda url: urlopen(url).read(), certificate_url_regex=DEFAULT_CERTIFICATE_URL_REGEX, max_age=DEFAULT_MAX_AGE ): """ Validate a decoded SNS message. Parameters: message: Decoded SNS message. get_certificate: Function that receives a URL, and returns the certificate from that URL as a string. The default doesn't implement caching. certificate_url_regex: Regex that validates the signing certificate URL. Default value checks it's hosted on an AWS-controlled domain, in the format "https://sns.<data-center>.amazonaws.com/" max_age: Maximum age of an SNS message before it fails validation, expressed as a `datetime.timedelta`. Defaults to one hour, the max. lifetime of an SNS message. """ # Check the signing certicate URL. SigningCertURLValidator(certificate_url_regex).validate(message) # Check the message age. if not isinstance(max_age, datetime.timedelta): raise ValueError("max_age must be None or a timedelta object") MessageAgeValidator(max_age).validate(message) # Passed the basic checks, let's download the cert. # We've validated the URL, so aren't worried about a malicious server. certificate = get_certificate(message["SigningCertURL"]) # Check the cryptographic signature. SignatureValidator(certificate).validate(message)
Validate a decoded SNS message. Parameters: message: Decoded SNS message. get_certificate: Function that receives a URL, and returns the certificate from that URL as a string. The default doesn't implement caching. certificate_url_regex: Regex that validates the signing certificate URL. Default value checks it's hosted on an AWS-controlled domain, in the format "https://sns.<data-center>.amazonaws.com/" max_age: Maximum age of an SNS message before it fails validation, expressed as a `datetime.timedelta`. Defaults to one hour, the max. lifetime of an SNS message.
def handleButtonClick(self, button): """ Handles the button click for this widget. If the Reset button was clicked, then the resetRequested signal will be emitted. All buttons will emit the buttonClicked signal. :param button | <QAbstractButton> """ if ( self.signalsBlocked() ): return if ( button == self._buttonBox.button(QDialogButtonBox.Reset) ): self.resetRequested.emit() self.buttonClicked.emit(button)
Handles the button click for this widget. If the Reset button was clicked, then the resetRequested signal will be emitted. All buttons will emit the buttonClicked signal. :param button | <QAbstractButton>
def add_package_origins(self, modpath): """Whenever you 'import a.b.c', Python automatically binds 'b' in a to the a.b module and binds 'c' in a.b to the a.b.c module.""" parts = modpath.split('.') parent = parts[0] for part in parts[1:]: child = parent + '.' + part if self.find_module(child): self.add(parent, part, child) parent = child
Whenever you 'import a.b.c', Python automatically binds 'b' in a to the a.b module and binds 'c' in a.b to the a.b.c module.
def bm3_g(p, v0, g0, g0p, k0, k0p): """ calculate shear modulus at given pressure. not fully tested with mdaap. :param p: pressure :param v0: volume at reference condition :param g0: shear modulus at reference condition :param g0p: pressure derivative of shear modulus at reference condition :param k0: bulk modulus at reference condition :param k0p: pressure derivative of bulk modulus at reference condition :return: shear modulus at high pressure """ return cal_g_bm3(p, [g0, g0p], [v0, k0, k0p])
calculate shear modulus at given pressure. not fully tested with mdaap. :param p: pressure :param v0: volume at reference condition :param g0: shear modulus at reference condition :param g0p: pressure derivative of shear modulus at reference condition :param k0: bulk modulus at reference condition :param k0p: pressure derivative of bulk modulus at reference condition :return: shear modulus at high pressure
def get_feed_list(opml_obj: OPML) -> List[str]: """Walk an OPML document to extract the list of feed it contains.""" rv = list() def collect(obj): for outline in obj.outlines: if outline.type == 'rss' and outline.xml_url: rv.append(outline.xml_url) if outline.outlines: collect(outline) collect(opml_obj) return rv
Walk an OPML document to extract the list of feed it contains.
def QA_indicator_DMI(DataFrame, M1=14, M2=6): """ 趋向指标 DMI """ HIGH = DataFrame.high LOW = DataFrame.low CLOSE = DataFrame.close OPEN = DataFrame.open TR = SUM(MAX(MAX(HIGH-LOW, ABS(HIGH-REF(CLOSE, 1))), ABS(LOW-REF(CLOSE, 1))), M1) HD = HIGH-REF(HIGH, 1) LD = REF(LOW, 1)-LOW DMP = SUM(IFAND(HD>0,HD>LD,HD,0), M1) DMM = SUM(IFAND(LD>0,LD>HD,LD,0), M1) DI1 = DMP*100/TR DI2 = DMM*100/TR ADX = MA(ABS(DI2-DI1)/(DI1+DI2)*100, M2) ADXR = (ADX+REF(ADX, M2))/2 return pd.DataFrame({ 'DI1': DI1, 'DI2': DI2, 'ADX': ADX, 'ADXR': ADXR })
趋向指标 DMI
def letternum(letter): """ Get The Number Corresponding To A Letter """ if not isinstance(letter, str): raise TypeError("Invalid letter provided.") if not len(letter) == 1: raise ValueError("Invalid letter length provided.") letter = letter.lower() alphaletters = string.ascii_lowercase for i in range(len(alphaletters)): if letter[0] == alphaletters[i]: return i + 1
Get The Number Corresponding To A Letter
def stringpatterns(table, field): """ Profile string patterns in the given field, returning a table of patterns, counts and frequencies. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ... ['Mr. Foo', '123-1254'], ... ['Mrs. Bar', '234-1123'], ... ['Mr. Spo', '123-1254'], ... [u'Mr. Baz', u'321 1434'], ... [u'Mrs. Baz', u'321 1434'], ... ['Mr. Quux', '123-1254-XX']] >>> etl.stringpatterns(table, 'foo') +------------+-------+---------------------+ | pattern | count | frequency | +============+=======+=====================+ | 'Aa. Aaa' | 3 | 0.5 | +------------+-------+---------------------+ | 'Aaa. Aaa' | 2 | 0.3333333333333333 | +------------+-------+---------------------+ | 'Aa. Aaaa' | 1 | 0.16666666666666666 | +------------+-------+---------------------+ >>> etl.stringpatterns(table, 'bar') +---------------+-------+---------------------+ | pattern | count | frequency | +===============+=======+=====================+ | '999-9999' | 3 | 0.5 | +---------------+-------+---------------------+ | '999 9999' | 2 | 0.3333333333333333 | +---------------+-------+---------------------+ | '999-9999-AA' | 1 | 0.16666666666666666 | +---------------+-------+---------------------+ """ counter = stringpatterncounter(table, field) output = [('pattern', 'count', 'frequency')] counter = counter.most_common() total = sum(c[1] for c in counter) cnts = [(c[0], c[1], float(c[1])/total) for c in counter] output.extend(cnts) return wrap(output)
Profile string patterns in the given field, returning a table of patterns, counts and frequencies. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ... ['Mr. Foo', '123-1254'], ... ['Mrs. Bar', '234-1123'], ... ['Mr. Spo', '123-1254'], ... [u'Mr. Baz', u'321 1434'], ... [u'Mrs. Baz', u'321 1434'], ... ['Mr. Quux', '123-1254-XX']] >>> etl.stringpatterns(table, 'foo') +------------+-------+---------------------+ | pattern | count | frequency | +============+=======+=====================+ | 'Aa. Aaa' | 3 | 0.5 | +------------+-------+---------------------+ | 'Aaa. Aaa' | 2 | 0.3333333333333333 | +------------+-------+---------------------+ | 'Aa. Aaaa' | 1 | 0.16666666666666666 | +------------+-------+---------------------+ >>> etl.stringpatterns(table, 'bar') +---------------+-------+---------------------+ | pattern | count | frequency | +===============+=======+=====================+ | '999-9999' | 3 | 0.5 | +---------------+-------+---------------------+ | '999 9999' | 2 | 0.3333333333333333 | +---------------+-------+---------------------+ | '999-9999-AA' | 1 | 0.16666666666666666 | +---------------+-------+---------------------+
def return_port(port): """Return a port that is no longer being used so it can be reused.""" if port in _random_ports: _random_ports.remove(port) elif port in _owned_ports: _owned_ports.remove(port) _free_ports.add(port) elif port in _free_ports: logging.info("Returning a port that was already returned: %s", port) else: logging.info("Returning a port that wasn't given by portpicker: %s", port)
Return a port that is no longer being used so it can be reused.
def delete_credential(self, identifier, credential_id=None): """Delete the object storage credential. :param int id: The object storage account identifier. :param int credential_id: The credential id to be deleted. """ credential = { 'id': credential_id } return self.client.call('SoftLayer_Network_Storage_Hub_Cleversafe_Account', 'credentialDelete', credential, id=identifier)
Delete the object storage credential. :param int id: The object storage account identifier. :param int credential_id: The credential id to be deleted.
def extract_data(self): """Extracts data from archive. Returns: PackageData object containing the extracted data. """ data = PackageData( local_file=self.local_file, name=self.name, pkg_name=self.rpm_name or self.name_convertor.rpm_name( self.name, pkg_name=True), version=self.version, srcname=self.srcname) with self.archive: data.set_from(self.data_from_archive) # for example nose has attribute `packages` but instead of name # listing the pacakges is using function to find them, that makes # data.packages an empty set if virtualenv is disabled if self.venv_extraction_disabled and getattr(data, "packages") == []: data.packages = [data.name] return data
Extracts data from archive. Returns: PackageData object containing the extracted data.
def import_committees(src): """ Read the committees from the csv files into a single Dataframe. Intended for importing new data. """ committees = [] subcommittees = [] with open("{0}/{1}/committees-current.yaml".format(src, LEGISLATOR_DIR), 'r') as stream: committees += yaml.load(stream) with open("{0}/{1}/committees-historical.yaml".format(src, LEGISLATOR_DIR), 'r') as stream: committees += yaml.load(stream) # Sub Committees are not Committees # And unfortunately the good folk at thomas thought modeling data with duplicate id's was a good idea. # you can have two subcommittees with the ID 12. Makes a simple membership map impossible. for com in committees: com['committee_id'] = com['thomas_id'] if 'subcommittees' in com: # process sub committees into separate DataFrame for subcom in com.get('subcommittees'): subcom['committee_id'] = com[ 'thomas_id' ] # we use committee_id so we can easily merge dataframes subcom['subcommittee_id'] = "{0}-{1}".format( subcom['committee_id'], subcom['thomas_id']) subcommittees.append(subcom) del com['subcommittees'] committees_df = pd.DataFrame(committees) subcommittees_df = pd.DataFrame(subcommittees) return [committees_df, subcommittees_df]
Read the committees from the csv files into a single Dataframe. Intended for importing new data.
def to_uint(self): """Convert vector to an unsigned integer, if possible. This is only useful for arrays filled with zero/one entries. """ num = 0 for i, f in enumerate(self._items): if f.is_zero(): pass elif f.is_one(): num += 1 << i else: fstr = "expected all functions to be a constant (0 or 1) form" raise ValueError(fstr) return num
Convert vector to an unsigned integer, if possible. This is only useful for arrays filled with zero/one entries.
def _connect(self): """Connect to PostgreSQL, either by reusing a connection from the pool if possible, or by creating the new connection. :rtype: psycopg2.extensions.connection :raises: pool.NoIdleConnectionsError """ future = concurrent.Future() # Attempt to get a cached connection from the connection pool try: connection = self._pool_manager.get(self.pid, self) self._connections[connection.fileno()] = connection future.set_result(connection) # Add the connection to the IOLoop self._ioloop.add_handler(connection.fileno(), self._on_io_events, ioloop.IOLoop.WRITE) except pool.NoIdleConnectionsError: self._create_connection(future) return future
Connect to PostgreSQL, either by reusing a connection from the pool if possible, or by creating the new connection. :rtype: psycopg2.extensions.connection :raises: pool.NoIdleConnectionsError
def finish(self): """ Creates block of content with lines belonging to fragment. """ self.lines.reverse() self._content = '\n'.join(self.lines) self.lines = None
Creates block of content with lines belonging to fragment.
def _map_arg(arg): """ Return `arg` appropriately parsed or mapped to a usable value. """ # Grab the easy to parse values if isinstance(arg, _ast.Str): return repr(arg.s) elif isinstance(arg, _ast.Num): return arg.n elif isinstance(arg, _ast.Name): name = arg.id if name == 'True': return True elif name == 'False': return False elif name == 'None': return None return name else: # Everything else we don't bother with return Unparseable()
Return `arg` appropriately parsed or mapped to a usable value.
def closest_point(mesh, points): """ Given a mesh and a list of points, find the closest point on any triangle. Parameters ---------- mesh : Trimesh object points : (m,3) float, points in space Returns ---------- closest : (m,3) float, closest point on triangles for each point distance : (m,) float, distance triangle_id : (m,) int, index of triangle containing closest point """ points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): raise ValueError('points must be (n,3)!') # do a tree- based query for faces near each point candidates = nearby_faces(mesh, points) # view triangles as an ndarray so we don't have to recompute # the MD5 during all of the subsequent advanced indexing triangles = mesh.triangles.view(np.ndarray) # create the corresponding list of triangles # and query points to send to the closest_point function query_point = deque() query_tri = deque() for triangle_ids, point in zip(candidates, points): query_point.append(np.tile(point, (len(triangle_ids), 1))) query_tri.append(triangles[triangle_ids]) # stack points into an (n,3) array query_point = np.vstack(query_point) # stack triangles into an (n,3,3) array query_tri = np.vstack(query_tri) # do the computation for closest point query_close = closest_point_corresponding(query_tri, query_point) query_group = np.cumsum(np.array([len(i) for i in candidates]))[:-1] distance_2 = ((query_close - query_point) ** 2).sum(axis=1) # find the single closest point for each group of candidates result_close = np.zeros((len(points), 3), dtype=np.float64) result_tid = np.zeros(len(points), dtype=np.int64) result_distance = np.zeros(len(points), dtype=np.float64) # go through results to get minimum distance result for i, close_points, distance, candidate in zip( np.arange(len(points)), np.array_split(query_close, query_group), np.array_split(distance_2, query_group), candidates): # unless some other check is true use the smallest distance idx = distance.argmin() # if we have multiple candidates check them if len(candidate) > 1: # (2, ) int, list of 2 closest candidate indices idxs = distance.argsort()[:2] # make sure the two distances are identical check_distance = distance[idxs].ptp() < tol.merge # make sure the magnitude of both distances are nonzero check_magnitude = (np.abs(distance[idxs]) > tol.merge).all() # check if query-points are actually off-surface if check_distance and check_magnitude: # get face normals for two points normals = mesh.face_normals[np.array(candidate)[idxs]] # compute normalized surface-point to query-point vectors vectors = ((points[i] - close_points[idxs]) / distance[idxs, np.newaxis] ** 0.5) # compare enclosed angle for both face normals dots = util.diagonal_dot(normals, vectors) # take the idx with the most positive angle idx = idxs[dots.argmax()] # take the single closest value from the group of values result_close[i] = close_points[idx] result_tid[i] = candidate[idx] result_distance[i] = distance[idx] # we were comparing the distance squared so # now take the square root in one vectorized operation result_distance **= .5 return result_close, result_distance, result_tid
Given a mesh and a list of points, find the closest point on any triangle. Parameters ---------- mesh : Trimesh object points : (m,3) float, points in space Returns ---------- closest : (m,3) float, closest point on triangles for each point distance : (m,) float, distance triangle_id : (m,) int, index of triangle containing closest point
async def start(self): """ This method opens an IP connection on the IP device :return: None """ try: self.reader, self.writer = await asyncio.open_connection( self.ip_address, self.port, loop=self.loop) except OSError: print("Can't open connection to " + self.ip_address) sys.exit(0)
This method opens an IP connection on the IP device :return: None
def resolvefaults(self, definitions, op): """ Resolve soap fault I{message} references by cross-referencing with operation defined in port type. @param definitions: A definitions object. @type definitions: L{Definitions} @param op: An I{operation} object. @type op: I{operation} """ ptop = self.type.operation(op.name) if ptop is None: raise Exception, \ "operation '%s' not defined in portType" % op.name soap = op.soap for fault in soap.faults: for f in ptop.faults: if f.name == fault.name: fault.parts = f.message.parts continue if hasattr(fault, 'parts'): continue raise Exception, \ "fault '%s' not defined in portType '%s'" % (fault.name, self.type.name)
Resolve soap fault I{message} references by cross-referencing with operation defined in port type. @param definitions: A definitions object. @type definitions: L{Definitions} @param op: An I{operation} object. @type op: I{operation}
def _source_is_newer(src_fs, src_path, dst_fs, dst_path): # type: (FS, Text, FS, Text) -> bool """Determine if source file is newer than destination file. Arguments: src_fs (FS): Source filesystem (instance or URL). src_path (str): Path to a file on the source filesystem. dst_fs (FS): Destination filesystem (instance or URL). dst_path (str): Path to a file on the destination filesystem. Returns: bool: `True` if the source file is newer than the destination file or file modification time cannot be determined, `False` otherwise. """ try: if dst_fs.exists(dst_path): namespace = ("details", "modified") src_modified = src_fs.getinfo(src_path, namespace).modified if src_modified is not None: dst_modified = dst_fs.getinfo(dst_path, namespace).modified return dst_modified is None or src_modified > dst_modified return True except FSError: # pragma: no cover # todo: should log something here return True
Determine if source file is newer than destination file. Arguments: src_fs (FS): Source filesystem (instance or URL). src_path (str): Path to a file on the source filesystem. dst_fs (FS): Destination filesystem (instance or URL). dst_path (str): Path to a file on the destination filesystem. Returns: bool: `True` if the source file is newer than the destination file or file modification time cannot be determined, `False` otherwise.
def forwardSlash(listOfFiles): """convert silly C:\\names\\like\\this.txt to c:/names/like/this.txt""" for i,fname in enumerate(listOfFiles): listOfFiles[i]=fname.replace("\\","/") return listOfFiles
convert silly C:\\names\\like\\this.txt to c:/names/like/this.txt
def eval_advs(self, x, y, preds_adv, X_test, Y_test, att_type): """ Evaluate the accuracy of the model on adversarial examples :param x: symbolic input to model. :param y: symbolic variable for the label. :param preds_adv: symbolic variable for the prediction on an adversarial example. :param X_test: NumPy array of test set inputs. :param Y_test: NumPy array of test set labels. :param att_type: name of the attack. """ end = (len(X_test) // self.batch_size) * self.batch_size if self.hparams.fast_tests: end = 10*self.batch_size acc = model_eval(self.sess, x, y, preds_adv, X_test[:end], Y_test[:end], args=self.eval_params) self.log_value('test_accuracy_%s' % att_type, acc, 'Test accuracy on adversarial examples') return acc
Evaluate the accuracy of the model on adversarial examples :param x: symbolic input to model. :param y: symbolic variable for the label. :param preds_adv: symbolic variable for the prediction on an adversarial example. :param X_test: NumPy array of test set inputs. :param Y_test: NumPy array of test set labels. :param att_type: name of the attack.
def mzminus(df, minus=0, noise=10000): """ The abundances of ions which are minus below the molecular ion. """ mol_ions = ((df.values > noise) * df.columns).max(axis=1) - minus mol_ions[np.abs(mol_ions) < 0] = 0 d = np.abs(np.ones(df.shape) * df.columns - (mol_ions[np.newaxis].T * np.ones(df.shape))) < 1 d = (df.values * d).sum(axis=1) return Trace(d, df.index, name='m-' + str(minus))
The abundances of ions which are minus below the molecular ion.
def raise_http_error(cls, response): """Raise a `ResponseError` of the appropriate subclass in reaction to the given `http_client.HTTPResponse`.""" response_xml = response.read() logging.getLogger('recurly.http.response').debug(response_xml) exc_class = recurly.errors.error_class_for_http_status(response.status) raise exc_class(response_xml)
Raise a `ResponseError` of the appropriate subclass in reaction to the given `http_client.HTTPResponse`.
def daemonize_posix(self): """ do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ logger.info('daemonize_posix') try: pid = os.fork() if pid > 0: logger.debug('forked first child, pid = %d' % (pid,)) return pid logger.debug('in child after first fork, pid = %d' % (pid, )) except OSError as error: logger.exception('fork #1') sys.stderr.write("fork #1 failed: %d (%s)\n" % (error.errno, error.strerror)) sys.exit(1) # decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent logger.debug('forked second child, pid = %d, exiting' % (pid,)) sys.exit(0) except OSError as error: logger.exception('fork #2') sys.stderr.write("fork #2 failed: %d (%s)\n" % (error.errno, error.strerror)) sys.exit(1) # redirect standard file descriptors logger.info('daemonized, pid = %d' % (pid, )) sys.stdin.flush() sys.stdout.flush() sys.stderr.flush() os.dup2(self.stdin.fileno(), sys.stdin.fileno()) os.dup2(self.stdout.fileno(), sys.stdout.fileno()) os.dup2(self.stderr.fileno(), sys.stderr.fileno()) # write pidfile atexit.register(self.delpid) pid = str(os.getpid()) with open(self.pidfile, 'w+') as fd: fd.write("%s\n" % pid)
do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
def to_utf8(text): """ Enforce UTF8 encoding. """ # return empty/false stuff unaltered if not text: if isinstance(text, string_types): text = "" return text try: # Is it a unicode string, or pure ascii? return text.encode("utf8") except UnicodeDecodeError: try: # Is it a utf8 byte string? if text.startswith(codecs.BOM_UTF8): text = text[len(codecs.BOM_UTF8):] return text.decode("utf8").encode("utf8") except UnicodeDecodeError: # Check BOM if text.startswith(codecs.BOM_UTF16_LE): encoding = "utf-16le" text = text[len(codecs.BOM_UTF16_LE):] elif text.startswith(codecs.BOM_UTF16_BE): encoding = "utf-16be" text = text[len(codecs.BOM_UTF16_BE):] else: # Assume CP-1252 encoding = "cp1252" try: return text.decode(encoding).encode("utf8") except UnicodeDecodeError as exc: for line in text.splitlines(): try: line.decode(encoding).encode("utf8") except UnicodeDecodeError: log.warn("Cannot transcode the following into UTF8 cause of %s: %r" % (exc, line)) break return text
Enforce UTF8 encoding.
def setattr_context(obj, **kwargs): """ Context manager to temporarily change the values of object attributes while executing a function. Example ------- >>> class Foo: pass >>> f = Foo(); f.attr = 'hello' >>> with setattr_context(f, attr='goodbye'): ... print(f.attr) goodbye >>> print(f.attr) hello """ old_kwargs = dict([(key, getattr(obj, key)) for key in kwargs]) [setattr(obj, key, val) for key, val in kwargs.items()] try: yield finally: [setattr(obj, key, val) for key, val in old_kwargs.items()]
Context manager to temporarily change the values of object attributes while executing a function. Example ------- >>> class Foo: pass >>> f = Foo(); f.attr = 'hello' >>> with setattr_context(f, attr='goodbye'): ... print(f.attr) goodbye >>> print(f.attr) hello
def commit( self, message: str, files_to_add: typing.Optional[typing.Union[typing.List[str], str]] = None, allow_empty: bool = False, ): """ Commits changes to the repo :param message: first line of the message :type message: str :param files_to_add: files to commit :type files_to_add: optional list of str :param allow_empty: allow dummy commit :type allow_empty: bool """ message = str(message) LOGGER.debug('message: %s', message) files_to_add = self._sanitize_files_to_add(files_to_add) LOGGER.debug('files to add: %s', files_to_add) if not message: LOGGER.error('empty commit message') sys.exit(-1) if os.getenv('APPVEYOR'): LOGGER.info('committing on AV, adding skip_ci tag') message = self.add_skip_ci_to_commit_msg(message) if files_to_add is None: self.stage_all() else: self.reset_index() self.stage_subset(*files_to_add) if self.index_is_empty() and not allow_empty: LOGGER.error('empty commit') sys.exit(-1) self.repo.index.commit(message=message)
Commits changes to the repo :param message: first line of the message :type message: str :param files_to_add: files to commit :type files_to_add: optional list of str :param allow_empty: allow dummy commit :type allow_empty: bool
def load_prefix(s3_loc, success_only=None, recent_versions=None, exclude_regex=None, just_sql=False): """Get a bash command which will load every dataset in a bucket at a prefix. For this to work, all datasets must be of the form `s3://$BUCKET_NAME/$PREFIX/$DATASET_NAME/v$VERSION/$PARTITIONS`. Any other formats will be ignored. :param bucket_name :param prefix """ bucket_name, prefix = _get_bucket_and_prefix(s3_loc) datasets = _get_common_prefixes(bucket_name, prefix) bash_cmd = '' for dataset in datasets: dataset = _remove_trailing_backslash(dataset) try: bash_cmd += get_bash_cmd('s3://{}/{}'.format(bucket_name, dataset), success_only=success_only, recent_versions=recent_versions, exclude_regex=exclude_regex, just_sql=just_sql) except Exception as e: sys.stderr.write('Failed to process {}, {}\n'.format(dataset, str(e))) return bash_cmd
Get a bash command which will load every dataset in a bucket at a prefix. For this to work, all datasets must be of the form `s3://$BUCKET_NAME/$PREFIX/$DATASET_NAME/v$VERSION/$PARTITIONS`. Any other formats will be ignored. :param bucket_name :param prefix
def decipher(self,string): """Decipher string using Delastelle cipher according to initialised key. Example:: plaintext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. The plaintext will be 1/3 the length of the ciphertext. """ string = self.remove_punctuation(string,filter='[^'+self.chars+']') ret = '' for i in range(0,len(string),3): ind = tuple([int(string[i+k]) for k in [0,1,2]]) ret += IND2L[ind] return ret
Decipher string using Delastelle cipher according to initialised key. Example:: plaintext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. The plaintext will be 1/3 the length of the ciphertext.
def generate_random_perovskite(lat=None): ''' This generates a random valid perovskite structure in ASE format. Useful for testing. Binary and organic perovskites are not considered. ''' if not lat: lat = round(random.uniform(3.5, Perovskite_tilting.OCTAHEDRON_BOND_LENGTH_LIMIT*2), 3) A_site = random.choice(Perovskite_Structure.A) B_site = random.choice(Perovskite_Structure.B) Ci_site = random.choice(Perovskite_Structure.C) Cii_site = random.choice(Perovskite_Structure.C) while covalent_radii[chemical_symbols.index(A_site)] - \ covalent_radii[chemical_symbols.index(B_site)] < 0.05 or \ covalent_radii[chemical_symbols.index(A_site)] - \ covalent_radii[chemical_symbols.index(B_site)] > 0.5: A_site = random.choice(Perovskite_Structure.A) B_site = random.choice(Perovskite_Structure.B) return crystal( [A_site, B_site, Ci_site, Cii_site], [(0.5, 0.25, 0.0), (0.0, 0.0, 0.0), (0.0, 0.25, 0.0), (0.25, 0.0, 0.75)], spacegroup=62, cellpar=[lat*math.sqrt(2), 2*lat, lat*math.sqrt(2), 90, 90, 90] )
This generates a random valid perovskite structure in ASE format. Useful for testing. Binary and organic perovskites are not considered.
def check_grad(f_df, xref, stepsize=1e-6, tol=1e-6, width=15, style='round', out=sys.stdout): """ Compares the numerical gradient to the analytic gradient Parameters ---------- f_df : function The analytic objective and gradient function to check x0 : array_like Parameter values to check the gradient at stepsize : float, optional Stepsize for the numerical gradient. Too big and this will poorly estimate the gradient. Too small and you will run into precision issues (default: 1e-6) tol : float, optional Tolerance to use when coloring correct/incorrect gradients (default: 1e-5) width : int, optional Width of the table columns (default: 15) style : string, optional Style of the printed table, see tableprint for a list of styles (default: 'round') """ CORRECT = u'\x1b[32m\N{CHECK MARK}\x1b[0m' INCORRECT = u'\x1b[31m\N{BALLOT X}\x1b[0m' obj, grad = wrap(f_df, xref, size=0) x0 = destruct(xref) df = grad(x0) # header out.write(tp.header(["Numerical", "Analytic", "Error"], width=width, style=style) + "\n") out.flush() # helper function to parse a number def parse_error(number): # colors failure = "\033[91m" passing = "\033[92m" warning = "\033[93m" end = "\033[0m" base = "{}{:0.3e}{}" # correct if error < 0.1 * tol: return base.format(passing, error, end) # warning elif error < tol: return base.format(warning, error, end) # failure else: return base.format(failure, error, end) # check each dimension num_errors = 0 for j in range(x0.size): # take a small step in one dimension dx = np.zeros(x0.size) dx[j] = stepsize # compute the centered difference formula df_approx = (obj(x0 + dx) - obj(x0 - dx)) / (2 * stepsize) df_analytic = df[j] # absolute error abs_error = np.linalg.norm(df_approx - df_analytic) # relative error error = abs_error if np.allclose(abs_error, 0) else abs_error / \ (np.linalg.norm(df_analytic) + np.linalg.norm(df_approx)) num_errors += error >= tol errstr = CORRECT if error < tol else INCORRECT out.write(tp.row([df_approx, df_analytic, parse_error(error) + ' ' + errstr], width=width, style=style) + "\n") out.flush() out.write(tp.bottom(3, width=width, style=style) + "\n") return num_errors
Compares the numerical gradient to the analytic gradient Parameters ---------- f_df : function The analytic objective and gradient function to check x0 : array_like Parameter values to check the gradient at stepsize : float, optional Stepsize for the numerical gradient. Too big and this will poorly estimate the gradient. Too small and you will run into precision issues (default: 1e-6) tol : float, optional Tolerance to use when coloring correct/incorrect gradients (default: 1e-5) width : int, optional Width of the table columns (default: 15) style : string, optional Style of the printed table, see tableprint for a list of styles (default: 'round')
def _match_real(filename, include, exclude, follow, symlinks): """Match real filename includes and excludes.""" sep = '\\' if util.platform() == "windows" else '/' if isinstance(filename, bytes): sep = os.fsencode(sep) if not filename.endswith(sep) and os.path.isdir(filename): filename += sep matched = False for pattern in include: if _fs_match(pattern, filename, sep, follow, symlinks): matched = True break if matched: matched = True if exclude: for pattern in exclude: if _fs_match(pattern, filename, sep, follow, symlinks): matched = False break return matched
Match real filename includes and excludes.
def cublasSsyr2(handle, uplo, n, alpha, x, incx, y, incy, A, lda): """ Rank-2 operation on real symmetric matrix. """ status = _libcublas.cublasSsyr2_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_float(alpha)), int(x), incx, int(y), incy, int(A), lda) cublasCheckStatus(status)
Rank-2 operation on real symmetric matrix.
def update_resource_assignments(self, id_or_uri, resource_assignments, timeout=-1): """ Modifies scope membership by adding or removing resource assignments. Args: id_or_uri: Can be either the resource ID or the resource URI. resource_assignments (dict): A dict object with a list of resource URIs to be added and a list of resource URIs to be removed. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Updated resource. """ uri = self._client.build_uri(id_or_uri) + "/resource-assignments" headers = {'Content-Type': 'application/json'} return self._client.patch_request(uri, resource_assignments, timeout=timeout, custom_headers=headers)
Modifies scope membership by adding or removing resource assignments. Args: id_or_uri: Can be either the resource ID or the resource URI. resource_assignments (dict): A dict object with a list of resource URIs to be added and a list of resource URIs to be removed. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Updated resource.
def get_pyxb(self): """Generate a DataONE Exception PyXB object. The PyXB object supports directly reading and writing the individual values that may be included in a DataONE Exception. """ dataone_exception_pyxb = dataoneErrors.error() dataone_exception_pyxb.name = self.__class__.__name__ dataone_exception_pyxb.errorCode = self.errorCode dataone_exception_pyxb.detailCode = self.detailCode if self.description is not None: dataone_exception_pyxb.description = self.description dataone_exception_pyxb.traceInformation = self.traceInformation if self.identifier is not None: dataone_exception_pyxb.identifier = self.identifier if self.nodeId is not None: dataone_exception_pyxb.nodeId = self.nodeId return dataone_exception_pyxb
Generate a DataONE Exception PyXB object. The PyXB object supports directly reading and writing the individual values that may be included in a DataONE Exception.
def _run_cmd(self, command): """ Run a DQL command """ if self.throttle: tables = self.engine.describe_all(False) limiter = self.throttle.get_limiter(tables) else: limiter = None self.engine.rate_limit = limiter results = self.engine.execute(command) if results is None: pass elif isinstance(results, basestring): print(results) else: with self.display() as ostream: formatter = FORMATTERS[self.conf["format"]]( results, ostream, pagesize=self.conf["pagesize"], width=self.conf["width"], ) formatter.display() print_count = 0 total = None for (cmd_fragment, capacity) in self.engine.consumed_capacities: total += capacity print(cmd_fragment) print(indent(str(capacity))) print_count += 1 if print_count > 1: print("TOTAL") print(indent(str(total)))
Run a DQL command
def normalize(url, strip=False): "RFC3986 normalize URL & Optionally removing url-query/fragment string" if strip: p = _urltools.parse(url) url = p.scheme + '://' + p.subdomain + p.domain + p.path return _urltools.normalize(url)
RFC3986 normalize URL & Optionally removing url-query/fragment string
def configure_specials_key(self, keyboard): """Configures specials key if needed. :param keyboard: Keyboard instance this layout belong. """ special_row = VKeyRow() max_length = self.max_length i = len(self.rows) - 1 current_row = self.rows[i] special_keys = [VBackKey()] if self.allow_uppercase: special_keys.append(VUppercaseKey(keyboard)) if self.allow_special_chars: special_keys.append(VSpecialCharKey(keyboard)) while len(special_keys) > 0: first = False while len(special_keys) > 0 and len(current_row) < max_length: current_row.add_key(special_keys.pop(0), first=first) first = not first if i > 0: i -= 1 current_row = self.rows[i] else: break if self.allow_space: space_length = len(current_row) - len(special_keys) special_row.add_key(VSpaceKey(space_length)) first = True # Adding left to the special bar. while len(special_keys) > 0: special_row.add_key(special_keys.pop(0), first=first) first = not first if len(special_row) > 0: self.rows.append(special_row)
Configures specials key if needed. :param keyboard: Keyboard instance this layout belong.
def unget_service(self, reference): # type: (ServiceReference) -> bool """ Disables a reference to the service :return: True if the bundle was using this reference, else False """ # Lose the dependency return self.__framework._registry.unget_service( self.__bundle, reference )
Disables a reference to the service :return: True if the bundle was using this reference, else False
def _normalize_lang_attrs(self, text, strip): """Remove embedded bracketed attributes. This (potentially) bitwise-ands bracketed attributes together and adds to the end. This is applied to a single alternative at a time -- not to a parenthesized list. It removes all embedded bracketed attributes, logically-ands them together, and places them at the end. However if strip is true, this can indeed remove embedded bracketed attributes from a parenthesized list. Parameters ---------- text : str A Beider-Morse phonetic encoding (in progress) strip : bool Remove the bracketed attributes (and throw away) Returns ------- str A Beider-Morse phonetic code Raises ------ ValueError No closing square bracket """ uninitialized = -1 # all 1's attrib = uninitialized while '[' in text: bracket_start = text.find('[') bracket_end = text.find(']', bracket_start) if bracket_end == -1: raise ValueError( 'No closing square bracket: text=(' + text + ') strip=(' + text_type(strip) + ')' ) attrib &= int(text[bracket_start + 1 : bracket_end]) text = text[:bracket_start] + text[bracket_end + 1 :] if attrib == uninitialized or strip: return text elif attrib == 0: # means that the attributes were incompatible and there is no # alternative here return '[0]' return text + '[' + str(attrib) + ']'
Remove embedded bracketed attributes. This (potentially) bitwise-ands bracketed attributes together and adds to the end. This is applied to a single alternative at a time -- not to a parenthesized list. It removes all embedded bracketed attributes, logically-ands them together, and places them at the end. However if strip is true, this can indeed remove embedded bracketed attributes from a parenthesized list. Parameters ---------- text : str A Beider-Morse phonetic encoding (in progress) strip : bool Remove the bracketed attributes (and throw away) Returns ------- str A Beider-Morse phonetic code Raises ------ ValueError No closing square bracket
def _CheckGitkitError(self, raw_response): """Raises error if API invocation failed. Args: raw_response: string, the http response. Raises: GitkitClientError: if the error code is 4xx. GitkitServerError: if the response if malformed. Returns: Successful response as dict. """ try: response = simplejson.loads(raw_response) if 'error' not in response: return response else: error = response['error'] if 'code' in error: code = error['code'] if str(code).startswith('4'): raise errors.GitkitClientError(error['message']) else: raise errors.GitkitServerError(error['message']) except simplejson.JSONDecodeError: pass raise errors.GitkitServerError('null error code from Gitkit server')
Raises error if API invocation failed. Args: raw_response: string, the http response. Raises: GitkitClientError: if the error code is 4xx. GitkitServerError: if the response if malformed. Returns: Successful response as dict.
def unprotect_response(self, response, **kwargs): """ Removes protection from the specified response :param request: response from the key vault service :return: unprotected response with any security protocal encryption removed """ body = response.content # if the current message security doesn't support message protection, the body is empty, or the request failed # skip protection and return the original response if not self.supports_protection() or len(response.content) == 0 or response.status_code != 200: return response # ensure the content-type is application/jose+json if 'application/jose+json' not in response.headers.get('content-type', '').lower(): raise ValueError('Invalid protected response') # deserialize the response into a JwsObject, using response.text so requests handles the encoding jws = _JwsObject().deserialize(body) # deserialize the protected header jws_header = _JwsHeader.from_compact_header(jws.protected) # ensure the jws signature kid matches the key from original challenge # and the alg matches expected signature alg if jws_header.kid != self.server_signature_key.kid \ or jws_header.alg != 'RS256': raise ValueError('Invalid protected response') # validate the signature of the jws data = (jws.protected + '.' + jws.payload).encode('ascii') # verify will raise an InvalidSignature exception if the signature doesn't match self.server_signature_key.verify(signature=_b64_to_bstr(jws.signature), data=data) # get the unprotected response body decrypted = self._unprotect_payload(jws.payload) response._content = decrypted response.headers['Content-Type'] = 'application/json' return response
Removes protection from the specified response :param request: response from the key vault service :return: unprotected response with any security protocal encryption removed
def write_recording(recording, save_path): ''' Save recording extractor to MEArec format. Parameters ---------- recording: RecordingExtractor Recording extractor object to be saved save_path: str .h5 or .hdf5 path ''' assert HAVE_MREX, "To use the MEArec extractors, install MEArec: \n\n pip install MEArec\n\n" save_path = Path(save_path) if save_path.is_dir(): print("The file will be saved as recording.h5 in the provided folder") save_path = save_path / 'recording.h5' if save_path.suffix == '.h5' or save_path.suffix == '.hdf5': info = {'recordings': {'fs': recording.get_sampling_frequency()}} rec_dict = {'recordings': recording.get_traces()} if 'location' in recording.get_channel_property_names(): positions = np.array([recording.get_channel_property(chan, 'location') for chan in recording.get_channel_ids()]) rec_dict['channel_positions'] = positions recgen = mr.RecordingGenerator(rec_dict=rec_dict, info=info) mr.save_recording_generator(recgen, str(save_path), verbose=False) else: raise Exception("Provide a folder or an .h5/.hdf5 as 'save_path'")
Save recording extractor to MEArec format. Parameters ---------- recording: RecordingExtractor Recording extractor object to be saved save_path: str .h5 or .hdf5 path
def record_evaluation(eval_result): """Create a callback that records the evaluation history into ``eval_result``. Parameters ---------- eval_result : dict A dictionary to store the evaluation results. Returns ------- callback : function The callback that records the evaluation history into the passed dictionary. """ if not isinstance(eval_result, dict): raise TypeError('Eval_result should be a dictionary') eval_result.clear() def _init(env): for data_name, _, _, _ in env.evaluation_result_list: eval_result.setdefault(data_name, collections.defaultdict(list)) def _callback(env): if not eval_result: _init(env) for data_name, eval_name, result, _ in env.evaluation_result_list: eval_result[data_name][eval_name].append(result) _callback.order = 20 return _callback
Create a callback that records the evaluation history into ``eval_result``. Parameters ---------- eval_result : dict A dictionary to store the evaluation results. Returns ------- callback : function The callback that records the evaluation history into the passed dictionary.
def resizeEvent( self, event ): """ Resizes the current widget and its parts widget. :param event | <QResizeEvent> """ super(XNavigationEdit, self).resizeEvent(event) w = self.width() h = self.height() self._scrollWidget.resize(w - 4, h - 4) if ( self._scrollWidget.width() < self._partsWidget.width() ): self.scrollParts( self._partsWidget.width() - self._scrollWidget.width() )
Resizes the current widget and its parts widget. :param event | <QResizeEvent>
def _save_pys(self, filepath): """Saves file as pys file and returns True if save success Parameters ---------- filepath: String \tTarget file path for xls file """ try: with Bz2AOpen(filepath, "wb", main_window=self.main_window) as outfile: interface = Pys(self.grid.code_array, outfile) interface.from_code_array() except (IOError, ValueError), err: try: post_command_event(self.main_window, self.StatusBarMsg, text=err) return except TypeError: # The main window does not exist any more pass return not outfile.aborted
Saves file as pys file and returns True if save success Parameters ---------- filepath: String \tTarget file path for xls file
def accept(self, message=None, expires_at=None): """Accept request.""" with db.session.begin_nested(): if self.status != RequestStatus.PENDING: raise InvalidRequestStateError(RequestStatus.PENDING) self.status = RequestStatus.ACCEPTED request_accepted.send(self, message=message, expires_at=expires_at)
Accept request.
def dict_merge(a, b, path=None): """merges b into a""" return dict_selective_merge(a, b, b.keys(), path)
merges b into a
def iter_query_indexes(self): """ Iterator that constructs :class:`~dql.models.QueryIndex` for all global and local indexes, and a special one for the default table hash & range key with the name 'TABLE' """ if self._table.range_key is None: range_key = None else: range_key = self._table.range_key.name yield QueryIndex("TABLE", True, self._table.hash_key.name, range_key) for index in self._table.indexes: yield QueryIndex.from_table_index(self._table, index) for index in self._table.global_indexes: yield QueryIndex.from_table_index(self._table, index)
Iterator that constructs :class:`~dql.models.QueryIndex` for all global and local indexes, and a special one for the default table hash & range key with the name 'TABLE'
def settings(self): """ Resturns the account settings data for this acocunt. This is not a listing endpoint. """ result = self.client.get('/account/settings') if not 'managed' in result: raise UnexpectedResponseError('Unexpected response when getting account settings!', json=result) s = AccountSettings(self.client, result['managed'], result) return s
Resturns the account settings data for this acocunt. This is not a listing endpoint.
def set_doc(self, doc: str): """Assign the given docstring to the property instance and, if possible, to the `__test__` dictionary of the module of its owner class.""" self.__doc__ = doc if hasattr(self, 'module'): ref = f'{self.objtype.__name__}.{self.name}' self.module.__dict__['__test__'][ref] = doc
Assign the given docstring to the property instance and, if possible, to the `__test__` dictionary of the module of its owner class.
def save_screenshot(driver, name): """ Save a screenshot of the browser. The location of the screenshot can be configured by the environment variable `SCREENSHOT_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. name (str): A name for the screenshot, which will be used in the output file name. Returns: None """ if hasattr(driver, 'save_screenshot'): screenshot_dir = os.environ.get('SCREENSHOT_DIR') if not screenshot_dir: LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot') return elif not os.path.exists(screenshot_dir): os.makedirs(screenshot_dir) image_name = os.path.join(screenshot_dir, name + '.png') driver.save_screenshot(image_name) else: msg = ( u"Browser does not support screenshots. " u"Could not save screenshot '{name}'" ).format(name=name) LOGGER.warning(msg)
Save a screenshot of the browser. The location of the screenshot can be configured by the environment variable `SCREENSHOT_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. name (str): A name for the screenshot, which will be used in the output file name. Returns: None
def dataframe(self): """ Returns a ``pandas DataFrame`` containing all other relevant class properties and values where each index is a different season plus the career stats. """ temp_index = self._index rows = [] indices = [] if not self._season: return None for season in self._season: self._index = self._season.index(season) rows.append(self._dataframe_fields()) indices.append(season) self._index = temp_index return pd.DataFrame(rows, index=[indices])
Returns a ``pandas DataFrame`` containing all other relevant class properties and values where each index is a different season plus the career stats.
def build_result(data): """Create a dictionary with the contents of result.json""" more = {} for key, value in data.items(): if key != 'elements': newnode = value else: newnode = {} for el in value: nkey, nvalue = process_node(el) newnode[nkey] = nvalue more[key] = newnode return more
Create a dictionary with the contents of result.json
def cache_clear(self): """Clear local cache by deleting all cached resources and their downloaded files. """ # Delete content of local cache directory for f in os.listdir(self.directory): f = os.path.join(self.directory, f) if os.path.isfile(f): os.remove(f) elif os.path.isdir(f): shutil.rmtree(f) # Empty cache index self.cache = {}
Clear local cache by deleting all cached resources and their downloaded files.
def read_cifar10(filename_queue): """Reads and parses examples from CIFAR10 data files. Recommendation: if you want N-way read parallelism, call this function N times. This will give you N independent Readers reading different files & positions within those files, which will give better mixing of examples. Args: filename_queue: A queue of strings with the filenames to read from. Returns: An object representing a single example, with the following fields: height: number of rows in the result (32) width: number of columns in the result (32) depth: number of color channels in the result (3) key: a scalar string Tensor describing the filename & record number for this example. label: an int32 Tensor with the label in the range 0..9. uint8image: a [height, width, depth] uint8 Tensor with the image data """ class CIFAR10Record(object): pass result = CIFAR10Record() # Dimensions of the images in the CIFAR-10 dataset. # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the # input format. label_bytes = 1 # 2 for CIFAR-100 result.height = 32 result.width = 32 result.depth = 3 image_bytes = result.height * result.width * result.depth # Every record consists of a label followed by the image, with a # fixed number of bytes for each. record_bytes = label_bytes + image_bytes # Read a record, getting filenames from the filename_queue. No # header or footer in the CIFAR-10 format, so we leave header_bytes # and footer_bytes at their default of 0. reader = tf.FixedLengthRecordReader(record_bytes=record_bytes) result.key, value = reader.read(filename_queue) # Convert from a string to a vector of uint8 that is record_bytes long. record_bytes = tf.decode_raw(value, tf.uint8) # The first bytes represent the label, which we convert from uint8->int32. result.label = tf.cast( tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32) # The remaining bytes after the label represent the image, which we reshape # from [depth * height * width] to [depth, height, width]. depth_major = tf.reshape( tf.strided_slice(record_bytes, [label_bytes], [label_bytes + image_bytes]), [result.depth, result.height, result.width]) # Convert from [depth, height, width] to [height, width, depth]. result.uint8image = tf.transpose(depth_major, [1, 2, 0]) return result
Reads and parses examples from CIFAR10 data files. Recommendation: if you want N-way read parallelism, call this function N times. This will give you N independent Readers reading different files & positions within those files, which will give better mixing of examples. Args: filename_queue: A queue of strings with the filenames to read from. Returns: An object representing a single example, with the following fields: height: number of rows in the result (32) width: number of columns in the result (32) depth: number of color channels in the result (3) key: a scalar string Tensor describing the filename & record number for this example. label: an int32 Tensor with the label in the range 0..9. uint8image: a [height, width, depth] uint8 Tensor with the image data
def create_group(self, attrs, members, folder_id=None, tags=None): """Create a contact group XML example : <cn l="7> ## ContactSpec <a n="lastName">MARTIN</a> <a n="firstName">Pierre</a> <a n="email">pmartin@example.com</a> </cn> Which would be in zimsoap : attrs = { 'lastname': 'MARTIN', 'firstname': 'Pierre', 'email': 'pmartin@example.com' } folder_id = 7 :param folder_id: a string of the ID's folder where to create contact. Default '7' :param tags: comma-separated list of tag names :param members: list of dict. Members with their type. Example {'type': 'I', 'value': 'manual_addresse@example.com'}. :param attrs: a dictionary of attributes to set ({key:value,...}). At least one attr is required :returns: the created zobjects.Contact """ cn = {} cn['m'] = members if folder_id: cn['l'] = str(folder_id) if tags: cn['tn'] = tags attrs = [{'n': k, '_content': v} for k, v in attrs.items()] attrs.append({'n': 'type', '_content': 'group'}) cn['a'] = attrs resp = self.request_single('CreateContact', {'cn': cn}) return zobjects.Contact.from_dict(resp)
Create a contact group XML example : <cn l="7> ## ContactSpec <a n="lastName">MARTIN</a> <a n="firstName">Pierre</a> <a n="email">pmartin@example.com</a> </cn> Which would be in zimsoap : attrs = { 'lastname': 'MARTIN', 'firstname': 'Pierre', 'email': 'pmartin@example.com' } folder_id = 7 :param folder_id: a string of the ID's folder where to create contact. Default '7' :param tags: comma-separated list of tag names :param members: list of dict. Members with their type. Example {'type': 'I', 'value': 'manual_addresse@example.com'}. :param attrs: a dictionary of attributes to set ({key:value,...}). At least one attr is required :returns: the created zobjects.Contact
def to_dict(self): """ Return the task context content as a dictionary. """ return { 'task_name': self.task_name, 'dag_name': self.dag_name, 'workflow_name': self.workflow_name, 'workflow_id': self.workflow_id, 'worker_hostname': self.worker_hostname }
Return the task context content as a dictionary.
def _find_pair(self, protocol, remote_candidate): """ Find a candidate pair in the check list. """ for pair in self._check_list: if (pair.protocol == protocol and pair.remote_candidate == remote_candidate): return pair return None
Find a candidate pair in the check list.
def copytree(source_directory, destination_directory, ignore=None): """ Recursively copy the contents of a source directory into a destination directory. Both directories must exist. This function does not copy the root directory ``source_directory`` into ``destination_directory``. Since ``shutil.copytree(src, dst)`` requires ``dst`` not to exist, we cannot use for our purposes. Code adapted from http://stackoverflow.com/a/12686557 :param string source_directory: the source directory, already existing :param string destination_directory: the destination directory, already existing """ if os.path.isdir(source_directory): if not os.path.isdir(destination_directory): os.makedirs(destination_directory) files = os.listdir(source_directory) if ignore is not None: ignored = ignore(source_directory, files) else: ignored = set() for f in files: if f not in ignored: copytree( os.path.join(source_directory, f), os.path.join(destination_directory, f), ignore ) else: shutil.copyfile(source_directory, destination_directory)
Recursively copy the contents of a source directory into a destination directory. Both directories must exist. This function does not copy the root directory ``source_directory`` into ``destination_directory``. Since ``shutil.copytree(src, dst)`` requires ``dst`` not to exist, we cannot use for our purposes. Code adapted from http://stackoverflow.com/a/12686557 :param string source_directory: the source directory, already existing :param string destination_directory: the destination directory, already existing
def _calculate(cls): """ Calculate the percentage of each status. """ # We map the current state/counters of the different status. percentages = { "up": PyFunceble.INTERN["counter"]["number"]["up"], "down": PyFunceble.INTERN["counter"]["number"]["down"], "invalid": PyFunceble.INTERN["counter"]["number"]["invalid"], } for percentage in percentages: # We loop through our map index. # We calculate the percentage. calculation = ( percentages[percentage] * 100 // PyFunceble.INTERN["counter"]["number"]["tested"] ) # And we update the percentage counter of the actual status. PyFunceble.INTERN["counter"]["percentage"].update({percentage: calculation})
Calculate the percentage of each status.
def _get_paging_controls(request): """Parses start and/or limit queries into a paging controls dict. """ start = request.url.query.get('start', None) limit = request.url.query.get('limit', None) controls = {} if limit is not None: try: controls['limit'] = int(limit) except ValueError: LOGGER.debug('Request query had an invalid limit: %s', limit) raise errors.CountInvalid() if controls['limit'] <= 0: LOGGER.debug('Request query had an invalid limit: %s', limit) raise errors.CountInvalid() if start is not None: controls['start'] = start return controls
Parses start and/or limit queries into a paging controls dict.
def _check_raising_stopiteration_in_generator_next_call(self, node): """Check if a StopIteration exception is raised by the call to next function If the next value has a default value, then do not add message. :param node: Check to see if this Call node is a next function :type node: :class:`astroid.node_classes.Call` """ def _looks_like_infinite_iterator(param): inferred = utils.safe_infer(param) if inferred: return inferred.qname() in KNOWN_INFINITE_ITERATORS return False if isinstance(node.func, astroid.Attribute): # A next() method, which is now what we want. return inferred = utils.safe_infer(node.func) if getattr(inferred, "name", "") == "next": frame = node.frame() # The next builtin can only have up to two # positional arguments and no keyword arguments has_sentinel_value = len(node.args) > 1 if ( isinstance(frame, astroid.FunctionDef) and frame.is_generator() and not has_sentinel_value and not utils.node_ignores_exception(node, StopIteration) and not _looks_like_infinite_iterator(node.args[0]) ): self.add_message("stop-iteration-return", node=node)
Check if a StopIteration exception is raised by the call to next function If the next value has a default value, then do not add message. :param node: Check to see if this Call node is a next function :type node: :class:`astroid.node_classes.Call`
def is_open(location, now=None): """ Is the company currently open? Pass "now" to test with a specific timestamp. Can be used stand-alone or as a helper. """ if now is None: now = get_now() if has_closing_rule_for_now(location): return False now_time = datetime.time(now.hour, now.minute, now.second) if location: ohs = OpeningHours.objects.filter(company=location) else: ohs = Company.objects.first().openinghours_set.all() for oh in ohs: is_open = False # start and end is on the same day if (oh.weekday == now.isoweekday() and oh.from_hour <= now_time and now_time <= oh.to_hour): is_open = oh # start and end are not on the same day and we test on the start day if (oh.weekday == now.isoweekday() and oh.from_hour <= now_time and ((oh.to_hour < oh.from_hour) and (now_time < datetime.time(23, 59, 59)))): is_open = oh # start and end are not on the same day and we test on the end day if (oh.weekday == (now.isoweekday() - 1) % 7 and oh.from_hour >= now_time and oh.to_hour >= now_time and oh.to_hour < oh.from_hour): is_open = oh # print " 'Special' case after midnight", oh if is_open is not False: return oh return False
Is the company currently open? Pass "now" to test with a specific timestamp. Can be used stand-alone or as a helper.
def _add_request_parameters(func): """Adds the ratelimit and request timeout parameters to a function.""" # The function the decorator returns async def decorated_func(*args, handle_ratelimit=None, max_tries=None, request_timeout=None, **kwargs): return await func(*args, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout, **kwargs) # We return the decorated func return decorated_func
Adds the ratelimit and request timeout parameters to a function.
def reduce_annotations(self, annotations, options): """Reduce annotations to ones used to identify enrichment (normally exclude ND and NOT).""" getfnc_qual_ev = options.getfnc_qual_ev() return [nt for nt in annotations if getfnc_qual_ev(nt.Qualifier, nt.Evidence_Code)]
Reduce annotations to ones used to identify enrichment (normally exclude ND and NOT).
def load_obj(self, jref, getter=None, parser=None): """ load a object(those in spec._version_.objects) from a JSON reference. """ obj = self.__resolver.resolve(jref, getter) # get root document to check its swagger version. tmp = {'_tmp_': {}} version = utils.get_swagger_version(obj) if version == '1.2': # swagger 1.2 with ResourceListContext(tmp, '_tmp_') as ctx: ctx.parse(obj, jref, self.__resolver, getter) elif version == '2.0': # swagger 2.0 with SwaggerContext(tmp, '_tmp_') as ctx: ctx.parse(obj) elif version == None and parser: with parser(tmp, '_tmp_') as ctx: ctx.parse(obj) version = tmp['_tmp_'].__swagger_version__ if hasattr(tmp['_tmp_'], '__swagger_version__') else version else: raise NotImplementedError('Unsupported Swagger Version: {0} from {1}'.format(version, jref)) if not tmp['_tmp_']: raise Exception('Unable to parse object from {0}'.format(jref)) logger.info('version: {0}'.format(version)) return tmp['_tmp_'], version
load a object(those in spec._version_.objects) from a JSON reference.
def process_alias_create_namespace(namespace): """ Validate input arguments when the user invokes 'az alias create'. Args: namespace: argparse namespace object. """ namespace = filter_alias_create_namespace(namespace) _validate_alias_name(namespace.alias_name) _validate_alias_command(namespace.alias_command) _validate_alias_command_level(namespace.alias_name, namespace.alias_command) _validate_pos_args_syntax(namespace.alias_name, namespace.alias_command)
Validate input arguments when the user invokes 'az alias create'. Args: namespace: argparse namespace object.
def verify(self): """Raise an |ValueError| if the dates or the step size of the time frame are inconsistent. """ if self.firstdate >= self.lastdate: raise ValueError( f'Unplausible timegrid. The first given date ' f'{self.firstdate}, the second given date is {self.lastdate}.') if (self.lastdate-self.firstdate) % self.stepsize: raise ValueError( f'Unplausible timegrid. The period span between the given ' f'dates {self.firstdate} and {self.lastdate} is not ' f'a multiple of the given step size {self.stepsize}.')
Raise an |ValueError| if the dates or the step size of the time frame are inconsistent.
def set_entry(self, filename, obj): """ Set the entry. """ self.entries[filename] = obj self.dirty = True
Set the entry.
def add(self, event, pk, ts=None, ttl=None): """Add an event to event store. All events were stored in a sorted set in redis with timestamp as rank score. :param event: the event to be added, format should be ``table_action`` :param pk: the primary key of event :param ts: timestamp of the event, default to redis_server's current timestamp :param ttl: the expiration time of event since the last update :return: bool """ key = self._keygen(event, ts) try: self._zadd(key, pk, ts, ttl) return True except redis.ConnectionError as e: # connection error typically happens when redis server can't be # reached or timed out, the error will be silent with an error # log and return None. self.logger.error( "redis event store failed with connection error %r" % e) return False
Add an event to event store. All events were stored in a sorted set in redis with timestamp as rank score. :param event: the event to be added, format should be ``table_action`` :param pk: the primary key of event :param ts: timestamp of the event, default to redis_server's current timestamp :param ttl: the expiration time of event since the last update :return: bool
def estimator_cov(self,method): """ Creates covariance matrix for the estimators Parameters ---------- method : str Estimation method Returns ---------- A Covariance Matrix """ Y = np.array([reg[self.lags:] for reg in self.data]) Z = self._create_Z(Y) if method == 'OLS': sigma = self.ols_covariance() else: sigma = self.custom_covariance(self.latent_variables.get_z_values()) return np.kron(np.linalg.inv(np.dot(Z,np.transpose(Z))), sigma)
Creates covariance matrix for the estimators Parameters ---------- method : str Estimation method Returns ---------- A Covariance Matrix
def group_info(name): ''' .. versionadded:: 2016.11.0 Lists all packages in the specified group CLI Example: .. code-block:: bash salt '*' pkg.group_info 'xorg' ''' pkgtypes = ('mandatory', 'optional', 'default', 'conditional') ret = {} for pkgtype in pkgtypes: ret[pkgtype] = set() cmd = ['pacman', '-Sgg', name] out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if not line: continue try: pkg = line.split()[1] except ValueError: log.error('Problem parsing pacman -Sgg: Unexpected formatting in ' 'line: \'%s\'', line) else: ret['default'].add(pkg) for pkgtype in pkgtypes: ret[pkgtype] = sorted(ret[pkgtype]) return ret
.. versionadded:: 2016.11.0 Lists all packages in the specified group CLI Example: .. code-block:: bash salt '*' pkg.group_info 'xorg'
def _run_psql(cmd, runas=None, password=None, host=None, port=None, user=None): ''' Helper function to call psql, because the password requirement makes this too much code to be repeated in each function below ''' kwargs = { 'reset_system_locale': False, 'clean_env': True, } if runas is None: if not host: host = __salt__['config.option']('postgres.host') if not host or host.startswith('/'): if 'FreeBSD' in __grains__['os_family']: runas = 'pgsql' elif 'OpenBSD' in __grains__['os_family']: runas = '_postgresql' else: runas = 'postgres' if user is None: user = runas if runas: kwargs['runas'] = runas if password is None: password = __salt__['config.option']('postgres.pass') if password is not None: pgpassfile = salt.utils.files.mkstemp(text=True) with salt.utils.files.fopen(pgpassfile, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str('{0}:{1}:*:{2}:{3}'.format( 'localhost' if not host or host.startswith('/') else host, port if port else '*', user if user else '*', password, ))) __salt__['file.chown'](pgpassfile, runas, '') kwargs['env'] = {'PGPASSFILE': pgpassfile} ret = __salt__['cmd.run_all'](cmd, python_shell=False, **kwargs) if ret.get('retcode', 0) != 0: log.error('Error connecting to Postgresql server') if password is not None and not __salt__['file.remove'](pgpassfile): log.warning('Remove PGPASSFILE failed') return ret
Helper function to call psql, because the password requirement makes this too much code to be repeated in each function below
def x_axis_properties(self, title_size=None, title_offset=None, label_angle=None, label_align=None, color=None): """Change x-axis title font size and label angle Parameters ---------- title_size: int, default None Title size, in px title_offset: int, default None Pixel offset from given axis label_angle: int, default None label angle in degrees label_align: str, default None Label alignment color: str, default None Hex color """ self._axis_properties('x', title_size, title_offset, label_angle, label_align, color) return self
Change x-axis title font size and label angle Parameters ---------- title_size: int, default None Title size, in px title_offset: int, default None Pixel offset from given axis label_angle: int, default None label angle in degrees label_align: str, default None Label alignment color: str, default None Hex color
def to_deeper_graph(graph): ''' deeper graph ''' weighted_layer_ids = graph.deep_layer_ids() if len(weighted_layer_ids) >= Constant.MAX_LAYERS: return None deeper_layer_ids = sample(weighted_layer_ids, 1) for layer_id in deeper_layer_ids: layer = graph.layer_list[layer_id] new_layer = create_new_layer(layer, graph.n_dim) graph.to_deeper_model(layer_id, new_layer) return graph
deeper graph
def subscribe(self): """ Subscribe to contact and conversation events. These are accessible through :meth:`getEvents`. """ self.conn("POST", "{0}/users/ME/endpoints/{1}/subscriptions".format(self.conn.msgsHost, self.id), auth=SkypeConnection.Auth.RegToken, json={"interestedResources": ["/v1/threads/ALL", "/v1/users/ME/contacts/ALL", "/v1/users/ME/conversations/ALL/messages", "/v1/users/ME/conversations/ALL/properties"], "template": "raw", "channelType": "httpLongPoll"}) self.subscribed = True
Subscribe to contact and conversation events. These are accessible through :meth:`getEvents`.
def get_resource_children(raml_resource): """ Get children of :raml_resource:. :param raml_resource: Instance of ramlfications.raml.ResourceNode. """ path = raml_resource.path return [res for res in raml_resource.root.resources if res.parent and res.parent.path == path]
Get children of :raml_resource:. :param raml_resource: Instance of ramlfications.raml.ResourceNode.
def get(self, sent_id, **kwargs): ''' If sent_id exists, remove and return the associated sentence object else return default. If no default is provided, KeyError will be raised.''' if sent_id is not None and not isinstance(sent_id, int): sent_id = int(sent_id) if sent_id is None or not self.has_id(sent_id): if 'default' in kwargs: return kwargs['default'] else: raise KeyError("Invalid sentence ID ({})".format(sent_id)) return self.__sent_map[sent_id]
If sent_id exists, remove and return the associated sentence object else return default. If no default is provided, KeyError will be raised.
def record(self): # type: () -> bytes ''' Record this Extended Attribute Record. Parameters: None. Returns: A string representing this Extended Attribute Record. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('This XARecord is not yet initialized!') return struct.pack(self.FMT, self._group_id, self._user_id, self._attributes, b'XA', self._filenum, b'\x00' * 5)
Record this Extended Attribute Record. Parameters: None. Returns: A string representing this Extended Attribute Record.
def supported_auth_methods(self) -> List[str]: """ Get all AUTH methods supported by the both server and by us. """ return [auth for auth in self.AUTH_METHODS if auth in self.server_auth_methods]
Get all AUTH methods supported by the both server and by us.
def _get_all_eip_addresses(addresses=None, allocation_ids=None, region=None, key=None, keyid=None, profile=None): ''' Get all EIP's associated with the current credentials. addresses (list) - Optional list of addresses. If provided, only those those in the list will be returned. allocation_ids (list) - Optional list of allocation IDs. If provided, only the addresses associated with the given allocation IDs will be returned. returns (list) - The requested Addresses as a list of :class:`boto.ec2.address.Address` ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: return conn.get_all_addresses(addresses=addresses, allocation_ids=allocation_ids) except boto.exception.BotoServerError as e: log.error(e) return []
Get all EIP's associated with the current credentials. addresses (list) - Optional list of addresses. If provided, only those those in the list will be returned. allocation_ids (list) - Optional list of allocation IDs. If provided, only the addresses associated with the given allocation IDs will be returned. returns (list) - The requested Addresses as a list of :class:`boto.ec2.address.Address`
def set_simple_fault_geometry_3D(w, src): """ Builds a 3D polygon from a node instance """ assert "simpleFaultSource" in src.tag geometry_node = src.nodes[get_taglist(src).index("simpleFaultGeometry")] fault_attrs = parse_simple_fault_geometry(geometry_node) build_polygon_from_fault_attrs(w, fault_attrs)
Builds a 3D polygon from a node instance
def _extract_nn_info(self, structure, nns): """Given Voronoi NNs, extract the NN info in the form needed by NearestNeighbors Args: structure (Structure): Structure being evaluated nns ([dicts]): Nearest neighbor information for a structure Returns: (list of tuples (Site, array, float)): See nn_info """ # Get the target information if self.targets is None: targets = structure.composition.elements else: targets = self.targets # Extract the NN info siw = [] max_weight = max(nn[self.weight] for nn in nns.values()) for nstats in nns.values(): site = nstats['site'] if nstats[self.weight] > self.tol * max_weight \ and self._is_in_targets(site, targets): nn_info = {'site': site, 'image': self._get_image(structure, site), 'weight': nstats[self.weight] / max_weight, 'site_index': self._get_original_site( structure, site)} if self.extra_nn_info: # Add all the information about the site poly_info = nstats del poly_info['site'] nn_info['poly_info'] = poly_info siw.append(nn_info) return siw
Given Voronoi NNs, extract the NN info in the form needed by NearestNeighbors Args: structure (Structure): Structure being evaluated nns ([dicts]): Nearest neighbor information for a structure Returns: (list of tuples (Site, array, float)): See nn_info