code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def ToCsv(self, columns_order=None, order_by=(), separator=','): csv_buffer = six.StringIO() writer = csv.writer(csv_buffer, delimiter=separator) if (columns_order is None): columns_order = [col['id'] for col in self.__columns] col_dict = dict([(col['id'], col) for col in self.__columns]) def ensure_str(s): 'Compatibility function. Ensures using of str rather than unicode.' if isinstance(s, str): return s return s.encode('utf-8') writer.writerow([ensure_str(col_dict[col]['label']) for col in columns_order]) for (row, unused_cp) in self._PreparedData(order_by): cells_list = [] for col in columns_order: value = '' if ((col in row) and (row[col] is not None)): value = self.CoerceValue(row[col], col_dict[col]['type']) if isinstance(value, tuple): if (col_dict[col]['type'] in ['date', 'datetime', 'timeofday']): cells_list.append(ensure_str(self.ToString(value[1]))) else: cells_list.append(ensure_str(self.ToString(value[0]))) else: cells_list.append(ensure_str(self.ToString(value))) writer.writerow(cells_list) return csv_buffer.getvalue()
Writes the data table as a CSV string. Output is encoded in UTF-8 because the Python "csv" module can't handle Unicode properly according to its documentation. Args: columns_order: Optional. Specifies the order of columns in the output table. Specify a list of all column IDs in the order in which you want the table created. Note that you must list all column IDs in this parameter, if you use it. order_by: Optional. Specifies the name of the column(s) to sort by. Passed as is to _PreparedData. separator: Optional. The separator to use between the values. Returns: A CSV string representing the table. Example result: 'a','b','c' 1,'z',2 3,'w','' Raises: DataTableException: The data does not match the type.
codesearchnet
def __get__(self, inst, cls): if inst is None: return self._unbound_method else: if not hasattr(inst, INSTANCE_OBSERVER_ATTR): d = {} setattr(inst, INSTANCE_OBSERVER_ATTR, d) else: d = getattr(inst, INSTANCE_OBSERVER_ATTR) observers = d.setdefault(self._func.__name__, {}) return ObservableBoundMethod(self._func, inst, observers)
Return an ObservableBoundMethod or ObservableUnboundMethod. If accessed by instance, I return an ObservableBoundMethod which handles that instance. If accessed by class I return an ObservableUnboundMethod. Args: inst: The instance through which I was accessed. This will be None if I was accessed through the class, i.e. as an unbound method. cls: The class through which I was accessed.
juraj-google-style
def updateFeatureService(self, efs_config): if self.securityhandler is None: print ("Security handler required") return fsRes = None fst = None fURL = None resItm= None try: fsRes = [] fst = featureservicetools.featureservicetools(securityinfo=self) if isinstance(efs_config, list): for ext_service in efs_config: fURL = None cs = 0 try: if 'ChunkSize' in ext_service: if common.is_number(ext_service['ChunkSize']): cs = ext_service['ChunkSize'] except Exception as e: pass resItm={"DeleteDetails": None,"AddDetails":None} if 'ItemId' in ext_service and 'LayerName' in ext_service: fs = fst.GetFeatureService(itemId=ext_service['ItemId'],returnURLOnly=False) if not fs is None: fURL = fst.GetLayerFromFeatureService(fs=fs,layerName=ext_service['LayerName'],returnURLOnly=True) if fURL is None and 'URL' in ext_service: fURL = ext_service['URL'] if fURL is None: print("Item and layer not found or URL not in config") continue if 'DeleteInfo' in ext_service: if str(ext_service['DeleteInfo']['Delete']).upper() == "TRUE": resItm['DeleteDetails'] = fst.DeleteFeaturesFromFeatureLayer(url=fURL, sql=ext_service['DeleteInfo']['DeleteSQL'],chunksize=cs) if not 'error' in resItm['DeleteDetails'] : print ("Delete Successful: %s" % fURL) else: print (str(resItm['DeleteDetails'])) resItm['AddDetails'] = fst.AddFeaturesToFeatureLayer(url=fURL, pathToFeatureClass = ext_service['FeatureClass'],chunksize=cs) fsRes.append(resItm) if not 'error' in resItm['AddDetails']: print ("Add Successful: %s " % fURL) else: print (str(resItm['AddDetails'])) else: resItm={"DeleteDetails": None,"AddDetails":None} fURL = efs_config['URL'] cs = 0 try: if 'ChunkSize' in efs_config: if common.is_number(efs_config['ChunkSize']): cs = efs_config['ChunkSize'] except Exception as e: pass if 'ItemId' in efs_config and 'LayerName' in efs_config: fs = fst.GetFeatureService(itemId=efs_config['ItemId'],returnURLOnly=False) if not fs is None: fURL = fst.GetLayerFromFeatureService(fs=fs,layerName=efs_config['LayerName'],returnURLOnly=True) if fURL is None and 'URL' in efs_config: fURL = efs_config['URL'] if fURL is None: print("Item and layer not found or URL not in config") return None if 'DeleteInfo' in efs_config: if str(efs_config['DeleteInfo']['Delete']).upper() == "TRUE": resItm['DeleteDetails'] = fst.DeleteFeaturesFromFeatureLayer(url=fURL, sql=efs_config['DeleteInfo']['DeleteSQL'],chunksize=cs) if not 'error' in resItm['DeleteDetails'] : print (" Delete Successful: %s" % fURL) else: print (" " + str(resItm['DeleteDetails'])) resItm['AddDetails'] = fst.AddFeaturesToFeatureLayer(url=fURL, pathToFeatureClass = efs_config['FeatureClass'],chunksize=cs) fsRes.append(resItm) if not 'error' in resItm['AddDetails']: print (" Add Successful: %s " % fURL) else: print (" " + str(resItm['AddDetails'])) return fsRes except common.ArcRestHelperError as e: raise e except Exception as e: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "updateFeatureService", "line": line, "filename": filename, "synerror": synerror, }) finally: fst = None fURL = None resItm= None del fst del fURL del resItm gc.collect()
Updates a feature service. Args: efs_config (list): A list of JSON configuration feature service details to update. Returns: dict: A dictionary of results objects.
juraj-google-style
def __init__(self, tcex, name, description, data_type, interval, keyed=False): self.tcex = tcex self._metric_data_type = data_type self._metric_description = description self._metric_id = None self._metric_interval = interval self._metric_keyed = keyed self._metric_name = name if not self.metric_find(): self.metric_create()
Initialize the Class properties. Args: name (str): The name for the metric. description (str): The description of the metric. data_type (str): The type of metric: Sum, Count, Min, Max, First, Last, and Average. interval (str): The metric interval: Hourly, Daily, Weekly, Monthly, and Yearly. keyed (bool, default:False): Indicates whether the data will have a keyed value.
juraj-google-style
def lchmod(self, path, mode): if self.filesystem.is_windows_fs: raise (NameError, "name 'lchmod' is not defined") self.filesystem.chmod(path, mode, follow_symlinks=False)
Change the permissions of a file as encoded in integer mode. If the file is a link, the permissions of the link are changed. Args: path: (str) Path to the file. mode: (int) Permissions.
codesearchnet
def apply_inverse(self, y): self._recompute() return self.solver.solve(self._process_input(y))
Apply the inverse of the covariance matrix to a vector or matrix Solve ``K.x = y`` for ``x`` where ``K`` is the covariance matrix of the GP with the white noise and ``yerr`` components included on the diagonal. Args: y (array[n] or array[n, nrhs]): The vector or matrix ``y`` described above. Returns: array[n] or array[n, nrhs]: The solution to the linear system. This will have the same shape as ``y``. Raises: ValueError: For mismatched dimensions.
codesearchnet
def get_last_next(self, date): past, future = (None, None), (None, None) for mjd, value in reversed(self.data): if mjd <= date: past = (mjd, value) break future = (mjd, value) return past, future
Provide the last and next leap-second events relative to a date Args: date (float): Date in MJD Return: tuple:
juraj-google-style
def ExpandSignature(sig): params = [] for param in sig.params: if isinstance(param.type, pytd.UnionType): params.append([param.Replace(type=t) for t in param.type.type_list]) else: params.append([param]) new_signatures = [sig.Replace(params=tuple(combination)) for combination in itertools.product(*params)] return new_signatures
Expand a single signature. For argument lists that contain disjunctions, generates all combinations of arguments. The expansion will be done right to left. E.g., from (a or b, c or d), this will generate the signatures (a, c), (a, d), (b, c), (b, d). (In that order) Arguments: sig: A pytd.Signature instance. Returns: A list. The visit function of the parent of this node (VisitFunction) will process this list further.
github-repos
def CaptureFrameLocals(self, frame): variables = {n: self.CaptureNamedVariable(n, v, 1, self.default_capture_limits) for n, v in six.viewitems(frame.f_locals)} nargs = frame.f_code.co_argcount if frame.f_code.co_flags & inspect.CO_VARARGS: nargs += 1 if frame.f_code.co_flags & inspect.CO_VARKEYWORDS: nargs += 1 frame_arguments = [] for argname in frame.f_code.co_varnames[:nargs]: if argname in variables: frame_arguments.append(variables.pop(argname)) return (frame_arguments, list(six.viewvalues(variables)))
Captures local variables and arguments of the specified frame. Args: frame: frame to capture locals and arguments. Returns: (arguments, locals) tuple.
juraj-google-style
def _transform_filter_to_sql(filter_block, node, context): expression = filter_block.predicate return _expression_to_sql(expression, node, context)
Transform a Filter block to its corresponding SQLAlchemy expression. Args: filter_block: Filter, the Filter block to transform. node: SqlNode, the node Filter block applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy expression equivalent to the Filter.predicate expression.
codesearchnet
def _MaxPoolAlongCols(self, input_matrix, col_seq, overlapping): input_matrix = input_matrix.transpose() output_matrix = self._MaxPoolAlongRows(input_matrix, col_seq, overlapping) return output_matrix.transpose()
Perform max pool along column of a 2-D matrix based on col_seq. Args: input_matrix: A 2-D matrix. col_seq: Cumulative pooling sequence along column. overlapping: Whether or not use overlapping when pooling. Returns: A 2-D matrix, with * num_rows = input_matrix.num_rows * num_cols = len(col_seq)-1.
github-repos
def HasColumn(self, table_name, column_name): if not self._connection: raise IOError('Not opened.') if not column_name: return False table_name = table_name.lower() column_names = self._column_names_per_table.get(table_name, None) if column_names is None: column_names = [] self._cursor.execute(self._HAS_COLUMN_QUERY.format(table_name)) for row in self._cursor.fetchall(): if not row[1]: continue row_column_name = row[1] if isinstance(row_column_name, bytes): row_column_name = row_column_name.decode('utf-8') column_names.append(row_column_name.lower()) self._column_names_per_table[table_name] = column_names column_name = column_name.lower() return column_name in column_names
Determines if a specific column exists. Args: table_name (str): name of the table. column_name (str): name of the column. Returns: bool: True if the column exists. Raises: IOError: if the database file is not opened. OSError: if the database file is not opened.
juraj-google-style
def perform(self, agent_indices, observ): with tf.name_scope('perform/'): observ = self._observ_filter.transform(observ) if (self._last_state is None): state = None else: state = tools.nested.map((lambda x: tf.gather(x, agent_indices)), self._last_state) with tf.device(('/gpu:0' if self._use_gpu else '/cpu:0')): output = self._network(observ[(:, None)], tf.ones(observ.shape[0]), state) action = tf.cond(self._is_training, output.policy.sample, output.policy.mode) logprob = output.policy.log_prob(action)[(:, 0)] summary = tf.cond(self._should_log, (lambda : tf.summary.merge([tf.summary.histogram('mode', output.policy.mode()[(:, 0)]), tf.summary.histogram('action', action[(:, 0)]), tf.summary.histogram('logprob', logprob)])), str) if (self._last_state is None): assign_state = tf.no_op() else: assign_state = utility.assign_nested_vars(self._last_state, output.state, agent_indices) remember_last_action = tf.scatter_update(self._last_action, agent_indices, action[(:, 0)]) policy_params = tools.nested.filter((lambda x: isinstance(x, tf.Tensor)), output.policy.parameters) assert policy_params, 'Policy has no parameters to store.' remember_last_policy = tools.nested.map((lambda var, val: tf.scatter_update(var, agent_indices, val[(:, 0)])), self._last_policy, policy_params, flatten=True) with tf.control_dependencies(((assign_state, remember_last_action) + remember_last_policy)): return (action[(:, 0)], tf.identity(summary))
Compute batch of actions and a summary for a batch of observation. Args: agent_indices: Tensor containing current batch indices. observ: Tensor of a batch of observations for all agents. Returns: Tuple of action batch tensor and summary tensor.
codesearchnet
def AddEventTag(self, event_tag): self._RaiseIfNotWritable() event_identifier = event_tag.GetEventIdentifier() if not isinstance(event_identifier, identifiers.FakeIdentifier): raise IOError('Unsupported event identifier type: {0:s}'.format( type(event_identifier))) event_tag = self._PrepareAttributeContainer(event_tag) self._event_tags.append(event_tag) self.number_of_event_tags += 1
Adds an event tag. Args: event_tag (EventTag): event tag. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
juraj-google-style
def get_assignee(self, main_type, sub_type, unique_id, assignee_id, params=None): params = params or {} return self.assignee(main_type, sub_type, unique_id, assignee_id, params=params)
Args: main_type: sub_type: unique_id: assignee_id: params: Return:
juraj-google-style
def is_test_executed(self, test_name): for record in self.executed: if record.test_name == test_name: return True return False
Checks if a specific test has been executed. Args: test_name: string, the name of the test to check. Returns: True if the test has been executed according to the test result, False otherwise.
github-repos
def check_time(timer_id): if (timer_id not in _g_timers): _g_timers[timer_id] = Timer() return 0 else: return _g_timers[timer_id].since_last_check()
Add check points in a single line. This method is suitable for running a task on a list of items. A timer will be registered when the method is called for the first time. :Example: >>> import time >>> import mmcv >>> for i in range(1, 6): >>> # simulate a code block >>> time.sleep(i) >>> mmcv.check_time('task1') 2.000 3.000 4.000 5.000 Args: timer_id (str): Timer identifier.
codesearchnet
def broker_metadata(self, broker_id): return self._brokers.get(broker_id) or self._bootstrap_brokers.get(broker_id)
Get BrokerMetadata Arguments: broker_id (int): node_id for a broker to check Returns: BrokerMetadata or None if not found
juraj-google-style
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0): super(CreateKeyPairRequestPayload, self).read(input_buffer, kmip_version=kmip_version) local_buffer = utils.BytearrayStream(input_buffer.read(self.length)) if (kmip_version < enums.KMIPVersion.KMIP_2_0): if self.is_tag_next(enums.Tags.COMMON_TEMPLATE_ATTRIBUTE, local_buffer): self._common_template_attribute = objects.TemplateAttribute(tag=enums.Tags.COMMON_TEMPLATE_ATTRIBUTE) self._common_template_attribute.read(local_buffer, kmip_version=kmip_version) elif self.is_tag_next(enums.Tags.COMMON_ATTRIBUTES, local_buffer): attributes = objects.Attributes(tag=enums.Tags.COMMON_ATTRIBUTES) attributes.read(local_buffer, kmip_version=kmip_version) self._common_template_attribute = objects.convert_attributes_to_template_attribute(attributes) if (kmip_version < enums.KMIPVersion.KMIP_2_0): if self.is_tag_next(enums.Tags.PRIVATE_KEY_TEMPLATE_ATTRIBUTE, local_buffer): self._private_key_template_attribute = objects.TemplateAttribute(tag=enums.Tags.PRIVATE_KEY_TEMPLATE_ATTRIBUTE) self._private_key_template_attribute.read(local_buffer, kmip_version=kmip_version) elif self.is_tag_next(enums.Tags.PRIVATE_KEY_ATTRIBUTES, local_buffer): attributes = objects.Attributes(tag=enums.Tags.PRIVATE_KEY_ATTRIBUTES) attributes.read(local_buffer, kmip_version=kmip_version) self._private_key_template_attribute = objects.convert_attributes_to_template_attribute(attributes) if (kmip_version < enums.KMIPVersion.KMIP_2_0): if self.is_tag_next(enums.Tags.PUBLIC_KEY_TEMPLATE_ATTRIBUTE, local_buffer): self._public_key_template_attribute = objects.TemplateAttribute(tag=enums.Tags.PUBLIC_KEY_TEMPLATE_ATTRIBUTE) self._public_key_template_attribute.read(local_buffer, kmip_version=kmip_version) elif self.is_tag_next(enums.Tags.PUBLIC_KEY_ATTRIBUTES, local_buffer): attributes = objects.Attributes(tag=enums.Tags.PUBLIC_KEY_ATTRIBUTES) attributes.read(local_buffer, kmip_version=kmip_version) self._public_key_template_attribute = objects.convert_attributes_to_template_attribute(attributes) self.is_oversized(local_buffer)
Read the data encoding the CreateKeyPair request payload and decode it into its constituent parts. Args: input_buffer (stream): A data buffer containing encoded object data, supporting a read method. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
codesearchnet
def intersect(self, other, strategy=_STRATEGY.GEOMETRIC, _verify=True): if _verify: if (not isinstance(other, Surface)): raise TypeError('Can only intersect with another surface', 'Received', other) if ((self._dimension != 2) or (other._dimension != 2)): raise NotImplementedError('Intersection only implemented in 2D') if (strategy == _STRATEGY.GEOMETRIC): do_intersect = _surface_intersection.geometric_intersect elif (strategy == _STRATEGY.ALGEBRAIC): do_intersect = _surface_intersection.algebraic_intersect else: raise ValueError('Unexpected strategy.', strategy) (edge_infos, contained, all_edge_nodes) = do_intersect(self._nodes, self._degree, other._nodes, other._degree, _verify) if (edge_infos is None): if contained: return [self] else: return [other] else: return [_make_intersection(edge_info, all_edge_nodes) for edge_info in edge_infos]
Find the common intersection with another surface. Args: other (Surface): Other surface to intersect with. strategy (Optional[~bezier.curve.IntersectionStrategy]): The intersection algorithm to use. Defaults to geometric. _verify (Optional[bool]): Indicates if extra caution should be used to verify assumptions about the algorithm as it proceeds. Can be disabled to speed up execution time. Defaults to :data:`True`. Returns: List[Union[~bezier.curved_polygon.CurvedPolygon, \ ~bezier.surface.Surface]]: List of intersections (possibly empty). Raises: TypeError: If ``other`` is not a surface (and ``_verify=True``). NotImplementedError: If at least one of the surfaces isn't two-dimensional (and ``_verify=True``). ValueError: If ``strategy`` is not a valid :class:`.IntersectionStrategy`.
codesearchnet
def seat_button_count(self): if (self.type != EventType.TABLET_TOOL_BUTTON): raise AttributeError(_wrong_prop.format(self.type)) return self._libinput.libinput_event_tablet_tool_get_seat_button_count(self._handle)
The total number of buttons pressed on all devices on the associated seat after the the event was triggered. For events that are not of type :attr:`~libinput.constant.EventType.TABLET_TOOL_BUTTON`, this property raises :exc:`AttributeError`. Returns: int: The seat wide pressed button count for the key of this event.
codesearchnet
def load_resource(path): with open(get_path_to_datafile(path), 'rb') as f: return f.read()
Load the resource at given path, where path is relative to tensorflow/. Args: path: a string resource path relative to tensorflow/. Returns: The contents of that resource. Raises: IOError: If the path is not found, or the resource can't be opened.
github-repos
def get(self: 'Option[Mapping[K,V]]', key: K, default=None) -> 'Option[V]': if self._is_some: return self._type.maybe(self._val.get(key, default)) return self._type.maybe(default)
Gets a mapping value by key in the contained value or returns ``default`` if the key doesn't exist. Args: key: The mapping key. default: The defauilt value. Returns: * ``Some`` variant of the mapping value if the key exists and the value is not None. * ``Some(default)`` if ``default`` is not None. * :py:data:`NONE` if ``default`` is None. Examples: >>> Some({'hi': 1}).get('hi') Some(1) >>> Some({}).get('hi', 12) Some(12) >>> NONE.get('hi', 12) Some(12) >>> NONE.get('hi') NONE
codesearchnet
def default(self, obj): from ..model import Model from ..colors import Color from .has_props import HasProps if (pd and isinstance(obj, (pd.Series, pd.Index))): return transform_series(obj, force_list=True) elif isinstance(obj, np.ndarray): return transform_array(obj, force_list=True) elif isinstance(obj, collections.deque): return list(map(self.default, obj)) elif isinstance(obj, Model): return obj.ref elif isinstance(obj, HasProps): return obj.properties_with_values(include_defaults=False) elif isinstance(obj, Color): return obj.to_css() else: return self.transform_python_types(obj)
The required ``default`` method for ``JSONEncoder`` subclasses. Args: obj (obj) : The object to encode. Anything not specifically handled in this method is passed on to the default system JSON encoder.
codesearchnet
def choose_branch(exclude=None): if exclude is None: master = conf.get('git.master_branch', 'master') develop = conf.get('git.devel_branch', 'develop') exclude = {master, develop} branches = list(set(git.branches()) - exclude) for i, branch_name in enumerate(branches): shell.cprint('<90>[{}] <33>{}'.format(i + 1, branch_name)) choice = 0 while choice < 1 or choice > len(branches): prompt = "Pick a base branch from the above [1-{}]".format( len(branches) ) choice = click.prompt(prompt, value_proc=int) if not (1 <= choice <= len(branches)): fmt = "Invalid choice {}, you must pick a number between {} and {}" log.err(fmt.format(choice, 1, len(branches))) return branches[choice - 1]
Show the user a menu to pick a branch from the existing ones. Args: exclude (list[str]): List of branch names to exclude from the menu. By default it will exclude master and develop branches. To show all branches pass an empty array here. Returns: str: The name of the branch chosen by the user. If the user inputs an invalid choice, he will be asked again (and again) until he picks a a valid branch.
juraj-google-style
def do_batch(args): if args.subcommand == 'list': do_batch_list(args) if args.subcommand == 'show': do_batch_show(args) if args.subcommand == 'status': do_batch_status(args) if args.subcommand == 'submit': do_batch_submit(args)
Runs the batch list, batch show or batch status command, printing output to the console Args: args: The parsed arguments sent to the command at runtime
juraj-google-style
def _WriteFile(output_path, name, content): path = os.path.join(output_path, name) with open(path, 'wb') as f: f.write(content) return path
Write given content to a file in a given directory. Args: output_path: The directory to store the file in. name: The name of the file to store the content in. content: The content to write to the file.close Returns: The full path to the written file.
codesearchnet
def valueWritePreprocessor(valueString, replaceParamsFile=None): if type(valueString) is bool: log.warning("Only numerical variable types can be handled by the valueReadPreprocessor function.") return valueString variableString = valueString if replaceParamsFile is not None: if variableString == REPLACE_NO_VALUE: variableString = '[NO_VARIABLE]' else: try: number = int(valueString) if number < 0: parameterID = number * -1 for targetParam in replaceParamsFile.targetParameters: if targetParam.id == parameterID: variableString = targetParam.targetVariable break except: pass return variableString
Look up variable name in replace param file for the negative id given and return it. Args: valueString (str): String representing the value to be preprocessed. replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if replacement variables are included in the project. Returns: str: Processed value as a string
juraj-google-style
def write_compounds(self, stream, compounds, properties=None): self._write_entries( stream, compounds, self.convert_compound_entry, properties)
Write iterable of compounds as YAML object to stream. Args: stream: File-like object. compounds: Iterable of compound entries. properties: Set of compound properties to output (or None to output all).
juraj-google-style
def profile_graph(self, options): opts = _build_options(options) tfprof_node = tfprof_output_pb2.GraphNodeProto() try: tfprof_node.ParseFromString(print_mdl.Profile('graph'.encode('utf-8'), opts.SerializeToString())) except message.DecodeError as e: sys.stderr.write('Cannot parse returned proto: %s.\n' % e) return tfprof_node
Profile the statistics of graph nodes, organized by dataflow graph. Args: options: A dict of options. See core/profiler/g3doc/options.md. Returns: a GraphNodeProto that records the results.
github-repos
def validate(self, proxy_scanner, expected_num=20, queue_timeout=3, val_timeout=5): while self.proxy_num() < expected_num: try: candidate_proxy = proxy_scanner.proxy_queue.get( timeout=queue_timeout) except queue.Empty: if proxy_scanner.is_scanning(): continue else: break addr = candidate_proxy['addr'] protocol = candidate_proxy['protocol'] ret = self.is_valid(addr, protocol, val_timeout) if self.proxy_num() >= expected_num: self.logger.info('Enough valid proxies, thread {} exit.' .format(threading.current_thread().name)) break if ret['valid']: self.add_proxy(Proxy(addr, protocol)) self.logger.info('{} ok, {:.2f}s'.format(addr, ret[ 'response_time'])) else: self.logger.info('{} invalid, {}'.format(addr, ret['msg']))
Target function of validation threads Args: proxy_scanner: A ProxyScanner object. expected_num: Max number of valid proxies to be scanned. queue_timeout: Timeout for getting a proxy from the queue. val_timeout: An integer passed to `is_valid` as argument `timeout`.
juraj-google-style
def __make_request(self, url, method, data, auth, cookies, headers, proxies, timeout, verify): request_by_method = getattr(requests, method) return request_by_method(url=url, data=data, auth=auth, cookies=cookies, headers=headers, proxies=proxies, timeout=timeout, verify=verify, allow_redirects=True, stream=False)
Execute a request with the given data. Args: url (str): The URL to call. method (str): The method (e.g. `get` or `post`). data (str): The data to call the URL with. auth (obj): The authentication class. cookies (obj): The cookie dict. headers (obj): The header dict. proxies (obj): The proxies dict. timeout (int): The request timeout in seconds. verify (mixed): SSL verification. Returns: obj: The response object.
codesearchnet
def is_layouts_same(self, embedding_layouts) -> bool: if self._checkpoint_layouts.keys() != embedding_layouts.keys(): raise ValueError('Layouts in checkpoint and embedding must have the same keys. found {} and {}'.format(self._checkpoint_layouts.keys(), embedding_layouts.keys())) for key, layout in self._checkpoint_layouts.items(): if not compare.ProtoEq(layout, embedding_layouts[key]): logging.info('Layouts do not match for %s this will require resharding; %s vs %s', key, layout, embedding_layouts[key]) return False return True
Returns True if the all the embedding and checkpoint layouts are the same. Args: embedding_layouts: dict of layouts for embedding tables. Raises: ValueError if the embedding layouts and checkpoint layouts do not have the same keys. Returns: Bool representing if the embedding layouts match the layouts in checkpoint.
github-repos
def VerifyCipherSignature(self, remote_public_key): if (self.cipher_metadata.signature and remote_public_key): stats_collector_instance.Get().IncrementCounter('grr_rsa_operations') remote_public_key.Verify(self.serialized_cipher, self.cipher_metadata.signature) return True
Verifies the signature on the encrypted cipher block. This method returns True if the signature verifies correctly with the key given. Args: remote_public_key: The remote public key. Returns: None Raises: rdf_crypto.VerificationError: A signature and a key were both given but verification fails.
codesearchnet
def emit_completion(self, completion_percent): completion_mode = XBlockCompletionMode.get_mode(self) if not self.has_custom_completion or completion_mode != XBlockCompletionMode.COMPLETABLE: raise AttributeError( "Using `emit_completion` requires `has_custom_completion == True` (was {}) " "and `completion_mode == 'completable'` (was {})".format( self.has_custom_completion, completion_mode, ) ) if completion_percent is None or not 0.0 <= completion_percent <= 1.0: raise ValueError("Completion percent must be in [0.0; 1.0] interval, {} given".format(completion_percent)) self.runtime.publish( self, 'completion', {'completion': completion_percent}, )
Emits completion event through Completion API. Unlike grading API, calling this method allows completion to go down - i.e. emitting a value of 0.0 on a previously completed block indicates that it is no longer considered complete. Arguments: completion_percent (float): Completion in range [0.0; 1.0] (inclusive), where 0.0 means the block is not completed, 1.0 means the block is fully completed. Returns: None
juraj-google-style
def filter(self, items=None, like=None, regex=None, axis=None): nkw = count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` are mutually exclusive" ) if nkw == 0: raise TypeError("Must pass either `items`, `like`, or `regex`") if axis is None: axis = "columns" axis = self._get_axis_number(axis) labels = self.columns if axis else self.index if items is not None: bool_arr = labels.isin(items) elif like is not None: def f(x): return like in to_str(x) bool_arr = labels.map(f).tolist() else: def f(x): return matcher.search(to_str(x)) is not None matcher = re.compile(regex) bool_arr = labels.map(f).tolist() if not axis: return self[bool_arr] return self[self.columns[bool_arr]]
Subset rows or columns based on their labels Args: items (list): list of labels to subset like (string): retain labels where `arg in label == True` regex (string): retain labels matching regex input axis: axis to filter on Returns: A new DataFrame with the filter applied.
juraj-google-style
def from_dict(d: Dict[(str, Any)]) -> 'CoverageInstructions': name_type = d['type'] cls = _NAME_TO_INSTRUCTIONS[name_type] return cls.from_dict(d)
Loads a set of coverage instructions from a given dictionary. Raises: BadCoverageInstructions: if the given coverage instructions are illegal.
codesearchnet
def set_python_graph(self, python_graph): self._python_graph = python_graph self._node_traceback = {} if self._python_graph: for op in self._python_graph.get_operations(): self._node_traceback[op.name] = tuple(map(tuple, op.traceback))
Provide Python `Graph` object to the wrapper. Unlike the partition graphs, which are protobuf `GraphDef` objects, `Graph` is a Python object and carries additional information such as the traceback of the construction of the nodes in the graph. Args: python_graph: (ops.Graph) The Python Graph object.
github-repos
def load_from_dict(self, conf_dict=None): self.set_to_default() self._update_dict(self._config, conf_dict) self._update_python_paths()
Load the configuration from a dictionary. Args: conf_dict (dict): Dictionary with the configuration.
juraj-google-style
def __init__(self, pb_id): SchedulingObject.__init__(self, PB_KEY, pb_id) self._check_object_exists()
Create a PB object. Args: pb_id (str): Processing Block Identifier Raises: KeyError, if the specified PB does not exist
juraj-google-style
def list_file_extensions(path: str, reportevery: int = 1) -> List[str]: extensions = set() count = 0 for root, dirs, files in os.walk(path): count += 1 if count % reportevery == 0: log.debug("Walking directory {}: {!r}", count, root) for file in files: filename, ext = os.path.splitext(file) extensions.add(ext) return sorted(list(extensions))
Returns a sorted list of every file extension found in a directory and its subdirectories. Args: path: path to scan reportevery: report directory progress after every *n* steps Returns: sorted list of every file extension found
juraj-google-style
def populate_settings_dir(force: bool=False) -> bool: res = False if (_default_settings_path == _settings_path): return res for src in list(_default_settings_path.glob('**/*.json')): dest = (_settings_path / src.relative_to(_default_settings_path)) if ((not force) and dest.exists()): continue res = True dest.parent.mkdir(parents=True, exist_ok=True) shutil.copy(src, dest) return res
Populate settings directory with default settings files Args: force: if ``True``, replace existing settings files with default ones Returns: ``True`` if any files were copied and ``False`` otherwise
codesearchnet
def __call__(self, text): text = remove(text, string.punctuation) words = text.split() invalid_words = list(filter(lambda word: word and word.lower() not in self.words, words)) return len(invalid_words) * self.floor
Score based on number of words not in the corpus. Example: >>> fitness = Corpus(["example"]) >>> fitness("example") 0 >>> fitness("different") -2.0 Args: text (str): The text to score Returns: Corpus score for text
juraj-google-style
def simulate_values(cls, num_events, lr_scheduler, **kwargs): copy_lr_scheduler = LRScheduler._replicate_lr_scheduler(lr_scheduler) values = [] scheduler = cls(save_history=False, lr_scheduler=copy_lr_scheduler) for i in range(num_events): scheduler(engine=None) values.append([i, scheduler.optimizer_param_groups[0][scheduler.param_name]]) return values
Method to simulate scheduled values during num_events events. Args: num_events (int): number of events during the simulation. lr_scheduler (subclass of `torch.optim.lr_scheduler._LRScheduler`): lr_scheduler object to wrap. Returns: list of pairs: [event_index, value]
juraj-google-style
def __render_config_block(self, config_block): config_block_str = '' for line in config_block: if isinstance(line, config.Option): line_str = self.__render_option(line) elif isinstance(line, config.Config): line_str = self.__render_config(line) elif isinstance(line, config.Server): line_str = self.__render_server(line) elif isinstance(line, config.Bind): line_str = self.__render_bind(line) elif isinstance(line, config.Acl): line_str = self.__render_acl(line) elif isinstance(line, config.UseBackend): line_str = self.__render_usebackend(line) elif isinstance(line, config.User): line_str = self.__render_user(line) elif isinstance(line, config.Group): line_str = self.__render_group(line) config_block_str = config_block_str + line_str return config_block_str
Summary Args: config_block [config.Item, ...]: config lines Returns: str: config block str
juraj-google-style
def LoadConfig(config_obj, config_file=None, config_fd=None, secondary_configs=None, contexts=None, reset=False, parser=ConfigFileParser): if ((config_obj is None) or reset): config_obj = _CONFIG.MakeNewConfig() if (config_file is not None): config_obj.Initialize(filename=config_file, must_exist=True, parser=parser) elif (config_fd is not None): config_obj.Initialize(fd=config_fd, parser=parser) if secondary_configs: for config_file in secondary_configs: config_obj.LoadSecondaryConfig(config_file) if contexts: for context in contexts: config_obj.AddContext(context) return config_obj
Initialize a ConfigManager with the specified options. Args: config_obj: The ConfigManager object to use and update. If None, one will be created. config_file: Filename to read the config from. config_fd: A file-like object to read config data from. secondary_configs: A list of secondary config URLs to load. contexts: Add these contexts to the config object. reset: Completely wipe previous config before doing the load. parser: Specify which parser to use. Returns: The resulting config object. The one passed in, unless None was specified.
codesearchnet
def cancel(batch_fn, cancel_fn, ops): canceled_ops = [] error_messages = [] max_batch = 256 total_ops = len(ops) for first_op in range(0, total_ops, max_batch): batch_canceled, batch_messages = _cancel_batch( batch_fn, cancel_fn, ops[first_op:first_op + max_batch]) canceled_ops.extend(batch_canceled) error_messages.extend(batch_messages) return canceled_ops, error_messages
Cancel operations. Args: batch_fn: API-specific batch function. cancel_fn: API-specific cancel function. ops: A list of operations to cancel. Returns: A list of operations canceled and a list of error messages.
juraj-google-style
def add_range_headers(self, range_header): self['Accept-Ranges'] = 'bytes' size = self.ranged_file.size try: ranges = self.ranged_file.parse_range_header(range_header, size) except ValueError: ranges = None if ranges is not None and len(ranges) == 1: start, stop = ranges[0] if start >= size: self.status_code = 416 return if stop >= size: stop = size self.ranged_file.start = start self.ranged_file.stop = stop self['Content-Range'] = 'bytes %d-%d/%d' % (start, stop - 1, size) self['Content-Length'] = stop - start self.status_code = 206
Adds several headers that are necessary for a streaming file response, in order for Safari to play audio files. Also sets the HTTP status_code to 206 (partial content). Args: range_header (str): Browser HTTP_RANGE request header.
juraj-google-style
def clause(self, *args, **kwargs): if (args and isinstance(args[0], Clause)): clause = args[0] else: clause = Clause(*args, **kwargs) if (not clause.fields): clause.fields = self.all_fields if ((clause.wildcard & Query.WILDCARD_LEADING) and (clause.term[0] != Query.WILDCARD)): clause.term = (Query.WILDCARD + clause.term) if ((clause.wildcard & Query.WILDCARD_TRAILING) and (clause.term[(- 1)] != Query.WILDCARD)): clause.term = (clause.term + Query.WILDCARD) self.clauses.append(clause) return self
Adds a `lunr.Clause` to this query. Unless the clause contains the fields to be matched all fields will be matched. In addition a default boost of 1 is applied to the clause. If the first argument is a `lunr.Clause` it will be mutated and added, otherwise args and kwargs will be used in the constructor. Returns: lunr.Query: The Query itself.
codesearchnet
def serialize_to_nested(self, name, datas): keys = datas.get('keys', None) splitter = datas.get('splitter', self._DEFAULT_SPLITTER) if not keys: msg = ("Nested reference '{}' lacks of required 'keys' variable " "or is empty") raise SerializerError(msg.format(name)) else: keys = self.value_splitter(name, 'keys', keys, mode=splitter) context = OrderedDict() for k in keys: context[k] = OrderedDict() for k, v in datas.items(): if k not in ('keys', 'structure', 'splitter'): values = self.value_splitter(name, 'values', v, mode=splitter) if len(values) != len(keys): msg = ("Nested reference '{}' has different length for " "values of '{}' and 'keys'") raise SerializerError(msg.format(name, k)) for i, item in enumerate(values): ref = keys[i] context[ref][k] = item return context
Serialize given datas to a nested structure where each key create an item and each other variable is stored as a subitem with corresponding value (according to key index position). Arguments: name (string): Name only used inside possible exception message. datas (dict): Datas to serialize. Returns: dict: Nested dictionnary of serialized reference datas.
juraj-google-style
def merge_default_values(resource_list, default_values): def merge_item(resource): return merge_resources(default_values, resource) return lmap(merge_item, resource_list)
Generate a new list where each item of original resource_list will be merged with the default_values. Args: resource_list: list with items to be merged default_values: properties to be merged with each item list. If the item already contains some property the original value will be maintained. Returns: list: list containing each item merged with default_values
juraj-google-style
def get_iterator_spec_from_dataset(strategy, dataset): output_element_spec = dataset.element_spec if isinstance(dataset._type_spec, (DistributedDatasetSpec, DistributedDatasetsFromFunctionSpec)): iterator_type_spec = DistributedIteratorSpec(strategy.extended._input_workers_with_options(), output_element_spec, strategy.extended._container_strategy(), options=None, cardinality=dataset.cardinality, enable_get_next_as_optional=True) else: if strategy.extended._num_gpus_per_worker: logging.warning(f'{strategy.extended._num_gpus_per_worker} GPUs are allocated per worker. Please use DistributedDataset by calling strategy.experimental_distribute_dataset or strategy.distribute_datasets_from_function to make best use of GPU resources') iterator_type_spec = iterator_ops.IteratorSpec(output_element_spec) return iterator_type_spec
Returns an iterator spec from dataset function. This function constructs type spec for iterator obtained from iter(dataset). Args: strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. dataset: A tf.data.Dataset instance. If using a function that returns a tf.data.Dataset instance, pass dataset_fn.structured_outputs. Returns: A type_spec for iterator for dataset instance.
github-repos
def run(self, dag): coupling_map = self._coupling_map ordered_virtual_gates = list(dag.serial_layers()) if self.initial_layout is None: if self.property_set["layout"]: self.initial_layout = self.property_set["layout"] else: self.initial_layout = Layout.generate_trivial_layout(*dag.qregs.values()) if len(dag.qubits()) != len(self.initial_layout): raise TranspilerError('The layout does not match the amount of qubits in the DAG') if len(self._coupling_map.physical_qubits) != len(self.initial_layout): raise TranspilerError( "Mappers require to have the layout to be the same size as the coupling map") mapped_gates = [] layout = self.initial_layout.copy() gates_remaining = ordered_virtual_gates.copy() while gates_remaining: best_step = _search_forward_n_swaps(layout, gates_remaining, coupling_map) layout = best_step['layout'] gates_mapped = best_step['gates_mapped'] gates_remaining = best_step['gates_remaining'] mapped_gates.extend(gates_mapped) mapped_dag = _copy_circuit_metadata(dag, coupling_map) for node in mapped_gates: mapped_dag.apply_operation_back(op=node.op, qargs=node.qargs, cargs=node.cargs) return mapped_dag
Run one pass of the lookahead mapper on the provided DAG. Args: dag (DAGCircuit): the directed acyclic graph to be mapped Returns: DAGCircuit: A dag mapped to be compatible with the coupling_map in the property_set. Raises: TranspilerError: if the coupling map or the layout are not compatible with the DAG
juraj-google-style
def merge_sketches(outdir, sketch_paths): merge_sketch_path = os.path.join(outdir, 'sistr.msh') args = ['mash', 'paste', merge_sketch_path] for x in sketch_paths: args.append(x) args.append(MASH_SKETCH_FILE) logging.info('Running Mash paste with command: %s', ' '.join(args)) p = Popen(args) p.wait() assert os.path.exists(merge_sketch_path), 'Merged sketch was not created at {}'.format(merge_sketch_path) return merge_sketch_path
Merge new Mash sketches with current Mash sketches Args: outdir (str): output directory to write merged Mash sketch file sketch_paths (list of str): Mash sketch file paths for input fasta files Returns: str: output path for Mash sketch file with new and old sketches
juraj-google-style
def get_typed_value_descriptor(obj): if isinstance(obj, (bytes, str)): type_name = 'Text' elif isinstance(obj, bool): type_name = 'Boolean' elif isinstance(obj, int): type_name = 'Integer' elif isinstance(obj, float): type_name = 'Float' else: raise TypeError('Cannot get a type descriptor for %s.' % repr(obj)) return {'@type': 'http:
For internal use only; no backwards-compatibility guarantees. Converts a basic type into a @type/value dictionary. Args: obj: A bytes, unicode, bool, int, or float to be converted. Returns: A dictionary containing the keys ``@type`` and ``value`` with the value for the ``@type`` of appropriate type. Raises: TypeError: if the Python object has a type that is not supported.
github-repos
def _RegisterDebuggee(self, service): try: request = {'debuggee': self._GetDebuggee()} try: response = service.debuggees().register(body=request).execute() project_number = response['debuggee'].get('project') self._project_number = project_number or self._project_number self._debuggee_id = response['debuggee']['id'] native.LogInfo('Debuggee registered successfully, ID: %s' % ( self._debuggee_id)) self.register_backoff.Succeeded() return (False, 0) except BaseException: native.LogInfo('Failed to register debuggee: %s, %s' % (request, traceback.format_exc())) except BaseException: native.LogWarning('Debuggee information not available: ' + traceback.format_exc()) return (True, self.register_backoff.Failed())
Single attempt to register the debuggee. If the registration succeeds, sets self._debuggee_id to the registered debuggee ID. Args: service: client to use for API calls Returns: (registration_required, delay) tuple
juraj-google-style
def var(x, axis=None, keepdims=False): if any_symbolic_tensors((x,)): return Var(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.var(x, axis=axis, keepdims=keepdims)
Compute the variance along the specified axes. Args: x: Input tensor. axis: Axis or axes along which the variance is computed. The default is to compute the variance of the flattened tensor. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Returns: Output tensor containing the variance.
github-repos
def route(self, dst=None, verbose=conf.verb): dst = dst or "0.0.0.0" if isinstance(dst, bytes): try: dst = plain_str(dst) except UnicodeDecodeError: raise TypeError("Unknown IP address input (bytes)") if dst in self.cache: return self.cache[dst] _dst = dst.split("/")[0].replace("*", "0") while True: idx = _dst.find("-") if idx < 0: break m = (_dst[idx:] + ".").find(".") _dst = _dst[:idx] + _dst[idx + m:] atol_dst = atol(_dst) paths = [] for d, m, gw, i, a, me in self.routes: if not a: continue aa = atol(a) if aa == atol_dst: paths.append( (0xffffffff, 1, (scapy.consts.LOOPBACK_INTERFACE, a, "0.0.0.0")) ) if (atol_dst & m) == (d & m): paths.append((m, me, (i, a, gw))) if not paths: if verbose: warning("No route found (no default route?)") return scapy.consts.LOOPBACK_INTERFACE, "0.0.0.0", "0.0.0.0" paths.sort(key=lambda x: (-x[0], x[1])) ret = paths[0][2] self.cache[dst] = ret return ret
Returns the IPv4 routes to a host. parameters: - dst: the IPv4 of the destination host returns: (iface, output_ip, gateway_ip) - iface: the interface used to connect to the host - output_ip: the outgoing IP that will be used - gateway_ip: the gateway IP that will be used
juraj-google-style
def _time_delta_from_info(info): delta_seconds = (int(time.time()) - info.start_time) return str(datetime.timedelta(seconds=delta_seconds))
Format the elapsed time for the given TensorBoardInfo. Args: info: A TensorBoardInfo value. Returns: A human-readable string describing the time since the server described by `info` started: e.g., "2 days, 0:48:58".
codesearchnet
def union(self, *others): result = self.__copy__() _elements = result._elements _total = result._total for other in map(self._as_mapping, others): for (element, multiplicity) in other.items(): old_multiplicity = _elements.get(element, 0) if (multiplicity > old_multiplicity): _elements[element] = multiplicity _total += (multiplicity - old_multiplicity) result._total = _total return result
r"""Return a new multiset with all elements from the multiset and the others with maximal multiplicities. >>> ms = Multiset('aab') >>> sorted(ms.union('bc')) ['a', 'a', 'b', 'c'] You can also use the ``|`` operator for the same effect. However, the operator version will only accept a set as other operator, not any iterable, to avoid errors. >>> ms = Multiset('aab') >>> sorted(ms | Multiset('aaa')) ['a', 'a', 'a', 'b'] For a variant of the operation which modifies the multiset in place see :meth:`union_update`. Args: *others: The other sets to union the multiset with. Can also be any :class:`~typing.Iterable`\[~T] or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T]. Returns: The multiset resulting from the union.
codesearchnet
def restructure(modality_sizes: ModalitySizeType, inputs: torch.Tensor) -> Mapping[str, torch.Tensor]: outputs = {} index = 0 for modality in sorted(modality_sizes.keys()): size = modality_sizes[modality] inp = inputs[:, index:index + size] index += size outputs[modality] = inp return outputs
Partitions a [B, N, C] tensor into tensors for each modality. Args: modality_sizes dict specifying the size of the modality inputs: input tensor Returns: dict mapping name of modality to its associated tensor.
github-repos
def test_error(self, e=None): self._test_end(TestResultEnums.TEST_RESULT_ERROR, e)
To mark the test as error in this record. Args: e: An exception object.
github-repos
def has_event_handler(self, handler, event_name=None): if event_name is not None: if event_name not in self._event_handlers: return False events = [event_name] else: events = self._event_handlers for e in events: for h, _, _ in self._event_handlers[e]: if h == handler: return True return False
Check if the specified event has the specified handler. Args: handler (callable): the callable event handler. event_name: The event the handler attached to. Set this to ``None`` to search all events.
juraj-google-style
def get_value(self, field, quick): if callable(field.default): default = field.default(self) else: default = field.default if (quick and (default is not None)): return default shell.cprint('<90>{}', field.help) while True: try: answer = click.prompt(field.pretty_prompt, default=default) return field.type(answer) except ValueError: shell.cprint('<31>Unsupported value')
Ask user the question represented by this instance. Args: field (Field): The field we're asking the user to provide the value for. quick (bool): Enable quick mode. In quick mode, the form will reduce the number of question asked by using defaults wherever possible. This can greatly reduce the number of interactions required on the user part, but will obviously limit the user choices. This should probably be enabled only by a specific user action (like passing a ``--quick`` flag etc.). Returns: The user response converted to a python type using the :py:attr:`cliform.core.Field.type` converter.
codesearchnet
def with_env_recursive(cmd, **envvars): from plumbum.commands.base import BoundCommand, BoundEnvCommand if isinstance(cmd, BoundCommand): cmd.cmd = with_env_recursive(cmd.cmd, **envvars) elif isinstance(cmd, BoundEnvCommand): cmd.envvars.update(envvars) cmd.cmd = with_env_recursive(cmd.cmd, **envvars) return cmd
Recursively updates the environment of cmd and all its subcommands. Args: cmd - A plumbum command-like object **envvars - The environment variables to update Returns: The updated command.
juraj-google-style
def prepare_headers(headers: list[str], srcs_dir: str) -> None: path_to_exclude = ['cuda_cccl/_virtual_includes', 'cuda_cublas/_virtual_includes', 'cuda_cudart/_virtual_includes', 'cuda_cudnn/_virtual_includes', 'cuda_cufft/_virtual_includes', 'cuda_cupti/_virtual_includes', 'cuda_curand/_virtual_includes', 'cuda_cusolver/_virtual_includes', 'cuda_cusparse/_virtual_includes', 'cuda_nccl/_virtual_includes', 'cuda_nvcc/_virtual_includes', 'cuda_nvjitlink/_virtual_includes', 'cuda_nvml/_virtual_includes', 'cuda_nvrtc/_virtual_includes', 'cuda_nvtx/_virtual_includes', 'external/pypi', 'external/jsoncpp_git/src', 'local_config_cuda/cuda/_virtual_includes', 'local_config_tensorrt', 'python_x86_64', 'python_aarch64', 'llvm-project/llvm/', 'external/cpuinfo', 'external/FXdiv', 'external/net_zstd', 'external/org_brotli/c', 'external/org_brotli/_virtual_includes', 'external/pthreadpool', 'external/riegeli/riegeli', 'external/XNNPACK/src/'] path_to_replace = {'external/com_google_absl/': '', 'external/eigen_archive/': '', 'external/jsoncpp_git/': '', 'external/com_google_protobuf/src/': '', 'external/local_xla/': 'tensorflow/compiler', 'external/local_tsl/': 'tensorflow'} for file in headers: if file.endswith('cc.inc'): continue if any((i in file for i in path_to_exclude)): continue for path, val in path_to_replace.items(): if path in file: copy_file(file, os.path.join(srcs_dir, val), path) break else: copy_file(file, srcs_dir) create_local_config_python(os.path.join(srcs_dir, 'external/local_config_python')) shutil.copytree(os.path.join(srcs_dir, 'external/local_config_cuda/cuda'), os.path.join(srcs_dir, 'third_party/gpus')) _copy_cuda_tree(srcs_dir, 'external/cuda_cccl', 'third_party/gpus/cuda') _copy_cuda_tree(srcs_dir, 'external/cuda_cublas', 'third_party/gpus/cuda') _copy_cuda_tree(srcs_dir, 'external/cuda_cudart', 'third_party/gpus/cuda') _copy_cuda_tree(srcs_dir, 'external/cuda_cudnn', 'third_party/gpus/cudnn') _copy_cuda_tree(srcs_dir, 'external/cuda_cufft', 'third_party/gpus/cuda') _copy_cuda_tree(srcs_dir, 'external/cuda_cupti', 'third_party/gpus/cuda/extras/CUPTI') _copy_cuda_tree(srcs_dir, 'external/cuda_curand', 'third_party/gpus/cuda') _copy_cuda_tree(srcs_dir, 'external/cuda_cusolver', 'third_party/gpus/cuda') _copy_cuda_tree(srcs_dir, 'external/cuda_cusparse', 'third_party/gpus/cuda') _copy_cuda_tree(srcs_dir, 'external/cuda_nvcc', 'third_party/gpus/cuda') _copy_cuda_tree(srcs_dir, 'external/cuda_nvjitlink', 'third_party/gpus/cuda') _copy_cuda_tree(srcs_dir, 'external/cuda_nvml', 'third_party/gpus/cuda/nvml') _copy_cuda_tree(srcs_dir, 'external/cuda_nvrtc', 'third_party/gpus/cuda') _copy_cuda_tree(srcs_dir, 'external/cuda_nvtx', 'third_party/gpus/cuda') shutil.copytree(os.path.join(srcs_dir, 'tensorflow/compiler/xla'), os.path.join(srcs_dir, 'xla')) shutil.copytree(os.path.join(srcs_dir, 'tensorflow/tsl'), os.path.join(srcs_dir, 'tsl'))
Copy and rearrange header files in the target directory. Filter out headers by their path and replace paths for some of them. Args: headers: a list of paths to header files. srcs_dir: target directory where headers are copied to.
github-repos
def make_config_get(conf_path): project_root = _get_project_root_from_conf_path(conf_path) config = load_config_in_dir(project_root) return partial(config_get, config)
Return a function to get configuration options for a specific project Args: conf_path (path-like): path to project's conf file (i.e. foo.conf module)
juraj-google-style
def verify(self, obj): if not isinstance(obj, bool): raise ValidationError("Object is not a bool", reason='object is not a bool', object=obj) if self._require_value is not None and obj != self._require_value: raise ValidationError("Boolean is not equal to specified literal", reason='boolean value %s should be %s' % (str(obj), str(self._require_value))) return obj
Verify that the object conforms to this verifier's schema Args: obj (object): A python object to verify Raises: ValidationError: If there is a problem verifying the dictionary, a ValidationError is thrown with at least the reason key set indicating the reason for the lack of validation.
juraj-google-style
def neighborhood_probability(self, threshold, radius): weights = disk(radius, dtype=np.uint8) thresh_data = np.zeros(self.data.shape[1:], dtype=np.uint8) neighbor_prob = np.zeros(self.data.shape, dtype=np.float32) for t in np.arange(self.data.shape[0]): thresh_data[(self.data[t] >= threshold)] = 1 maximized = fftconvolve(thresh_data, weights, mode='same') maximized[(maximized > 1)] = 1 maximized[(maximized < 1)] = 0 neighbor_prob[t] = fftconvolve(maximized, weights, mode='same') thresh_data[:] = 0 neighbor_prob[(neighbor_prob < 1)] = 0 neighbor_prob /= weights.sum() return neighbor_prob
Calculate a probability based on the number of grid points in an area that exceed a threshold. Args: threshold: radius: Returns:
codesearchnet
def remove_feature(feature, remove_payload=False, image=None, restart=False): cmd = ['DISM', '/Quiet', ('/Image:{0}'.format(image) if image else '/Online'), '/Disable-Feature', '/FeatureName:{0}'.format(feature)] if remove_payload: cmd.append('/Remove') if (not restart): cmd.append('/NoRestart') return __salt__['cmd.run_all'](cmd)
Disables the feature. Args: feature (str): The feature to uninstall remove_payload (Optional[bool]): Remove the feature's payload. Must supply source when enabling in the future. image (Optional[str]): The path to the root directory of an offline Windows image. If `None` is passed, the running operating system is targeted. Default is None. restart (Optional[bool]): Reboot the machine if required by the install Returns: dict: A dictionary containing the results of the command CLI Example: .. code-block:: bash salt '*' dism.remove_feature NetFx3
codesearchnet
def log_estimator_evaluation_result(self, eval_results): if not isinstance(eval_results, dict): tf.logging.warning("eval_results should be directory for logging. Got %s", type(eval_results)) return global_step = eval_results[tf.GraphKeys.GLOBAL_STEP] for key in sorted(eval_results): if key != tf.GraphKeys.GLOBAL_STEP: self.log_metric(key, eval_results[key], global_step=global_step)
Log the evaluation result for a estimator. The evaluate result is a directory that contains metrics defined in model_fn. It also contains a entry for global_step which contains the value of the global step when evaluation was performed. Args: eval_results: dict, the result of evaluate() from a estimator.
juraj-google-style
def shift(self, time: int) -> 'Timeslot': return Timeslot(self.interval.shift(time), self.channel)
Return a new Timeslot shifted by `time`. Args: time: time to be shifted
juraj-google-style
def _WriteData(self, target, entry): sshkey_entry = '%s:%s' % (entry.name, entry.sshkey) target.write(sshkey_entry.encode() + b'\n') return len(sshkey_entry) + 1
Write a SshekeyMapEntry to the target cache. Args: target: A file-like object. entry: A SshkeyMapEntry. Returns: Number of bytes written to the target.
github-repos
def default_get_arg_names_from_class_name(class_name): parts = [] rest = class_name if rest.startswith('_'): rest = rest[1:] while True: m = re.match(r'([A-Z][a-z]+)(.*)', rest) if m is None: break parts.append(m.group(1)) rest = m.group(2) if not parts: return [] return ['_'.join(part.lower() for part in parts)]
Converts normal class names into normal arg names. Normal class names are assumed to be CamelCase with an optional leading underscore. Normal arg names are assumed to be lower_with_underscores. Args: class_name: a class name, e.g., "FooBar" or "_FooBar" Returns: all likely corresponding arg names, e.g., ["foo_bar"]
juraj-google-style
def remove(path, follow_symlink=False): if os.path.isfile(path): os.remove(path) elif os.path.islink(path): if follow_symlink: remove(os.readlink(path)) os.unlink(path) else: shutil.rmtree(path)
Implements an remove function that will delete files, folder trees and symlink trees 1.) Remove a file 2.) Remove a symlink and follow into with a recursive rm if follow_symlink 3.) Remove directory with rmtree Args: path (str): path to remove follow_symlink(bool): follow symlinks and removes whatever is in them
codesearchnet
def convert(self, calibration_inputs=None, num_runs=1) -> None: for trt_model in self._trt_models: trt_model.convert(calibration_inputs, num_runs)
Converts models with TensorRT and calibrates if using INT8 precision mode. Args: calibration_inputs: Mapping from input names to ndarrays in TF1. Or a sequence of tensors in TF2. Used as calibration data. num_runs: Number of calibration runs.
github-repos
def segment(self, text): files = {'text': text} res, status_code = self.post(self.segmentation_service, files=files) if status_code != 200: logger.debug('Segmentation failed.') return self.decode(res), status_code
Call the segmenter in order to split text in sentences. Args: text (str): Text to be segmented. Returns: dict, int: A dict containing a list of dicts with the offsets of each sentence; an integer representing the response code.
juraj-google-style
def DeregisterFormatter(cls, formatter_class): formatter_data_type = formatter_class.DATA_TYPE.lower() if (formatter_data_type not in cls._formatter_classes): raise KeyError('Formatter class not set for data type: {0:s}.'.format(formatter_class.DATA_TYPE)) del cls._formatter_classes[formatter_data_type]
Deregisters a formatter class. The formatter classes are identified based on their lower case data type. Args: formatter_class (type): class of the formatter. Raises: KeyError: if formatter class is not set for the corresponding data type.
codesearchnet
def __init__(self, sess): self._sess = sess self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession)
Creates a `_WrappedSession`. Args: sess: A `tf.compat.v1.Session` or `_WrappedSession` object. The wrapped session.
github-repos
def _ass_refresh_attrs(self, cached_ass, file_ass): loaded_ass = yaml_loader.YamlLoader.load_yaml_by_path(file_ass['source'], log_debug=True) attrs = loaded_ass yaml_checker.check(file_ass['source'], attrs) cached_ass['source'] = file_ass['source'] cached_ass['ctime'] = os.path.getctime(file_ass['source']) cached_ass['attrs'] = {} cached_ass['snippets'] = {} for a in ['fullname', 'description', 'icon_path']: if (a in attrs): cached_ass['attrs'][a] = attrs.get(a) if ('args' in attrs): cached_ass['attrs']['args'] = {} for (argname, argparams) in attrs.get('args', {}).items(): if (('use' in argparams) or ('snippet' in argparams)): snippet_name = (argparams.pop('use', None) or argparams.pop('snippet')) snippet = yaml_snippet_loader.YamlSnippetLoader.get_snippet_by_name(snippet_name) cached_ass['attrs']['args'][argname] = snippet.get_arg_by_name(argname) cached_ass['attrs']['args'][argname].update(argparams) cached_ass['snippets'][snippet.name] = self._get_snippet_ctime(snippet.name) else: cached_ass['attrs']['args'][argname] = argparams
Completely refreshes cached assistant from file. Args: cached_ass: an assistant from cache hierarchy (for format see Cache class docstring) file_ass: the respective assistant from filesystem hierarchy (for format see what refresh_role accepts)
codesearchnet
def energies(self, samples_like, dtype=np.float): (samples, labels) = as_samples(samples_like) if labels: (idx, label) = zip(*enumerate(labels)) labeldict = dict(zip(label, idx)) else: labeldict = {} num_samples = samples.shape[0] energies = np.zeros(num_samples, dtype=dtype) for (term, bias) in self.items(): if (len(term) == 0): energies += bias else: energies += (np.prod([samples[(:, labeldict[v])] for v in term], axis=0) * bias) return energies
The energies of the given samples. Args: samples_like (samples_like): A collection of raw samples. `samples_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. dtype (:class:`numpy.dtype`, optional): The data type of the returned energies. Defaults to float. Returns: :obj:`numpy.ndarray`: The energies.
codesearchnet
def nmf_ensemble(data, k, n_runs=10, W_list=[], **nmf_params): nmf = NMF(k) if len(W_list)==0: W_list = [] for i in range(n_runs): W = nmf.fit_transform(data) W_list.append(W) W_stacked = np.hstack(W_list) nmf_w = nmf.fit_transform(W_stacked) nmf_h = nmf.components_ H_new = data.T.dot(nmf_w).T nmf2 = NMF(k, init='custom') nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new) H_new = nmf2.components_ return nmf_w, H_new
Runs an ensemble method on the list of NMF W matrices... Args: data: genes x cells array (should be log + cell-normalized) k: number of classes n_runs (optional): number of random initializations of state estimation M_list (optional): list of M arrays from state estimation se_params (optional): optional poisson_estimate_state params Returns: W_new H_new
juraj-google-style
def delete(self, paths): exceptions = {} for path in paths: if path.endswith('/'): self._gcsIO().delete(path, recursive=True) continue else: path_to_use = path match_result = self.match([path_to_use])[0] statuses = self._gcsIO().delete_batch([m.path for m in match_result.metadata_list]) for target, exception in statuses: if exception: exceptions[target] = exception if exceptions: raise BeamIOError('Delete operation failed', exceptions)
Deletes files or directories at the provided paths. Directories will be deleted recursively. Args: paths: list of paths that give the file objects to be deleted
github-repos
def convert_idx_to_name(self, y, lens): y = [[self.id2label[idx] for idx in row[:l]] for (row, l) in zip(y, lens)] return y
Convert label index to name. Args: y (list): label index list. lens (list): true length of y. Returns: y: label name list. Examples: >>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'} >>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]] >>> lens = [1, 2, 3] >>> self.convert_idx_to_name(y, lens) [['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']]
codesearchnet
def _FormatSubjectExOrProcessExToken(self, token_data): if token_data.net_type == 4: ip_address = self._FormatPackedIPv4Address(token_data.ip_address) elif token_data.net_type == 16: ip_address = self._FormatPackedIPv6Address(token_data.ip_address) else: ip_address = 'unknown' return { 'aid': token_data.audit_user_identifier, 'euid': token_data.effective_user_identifier, 'egid': token_data.effective_group_identifier, 'uid': token_data.real_user_identifier, 'gid': token_data.real_group_identifier, 'pid': token_data.process_identifier, 'session_id': token_data.session_identifier, 'terminal_port': token_data.terminal_port, 'terminal_ip': ip_address}
Formats a subject or process token as a dictionary of values. Args: token_data (bsm_token_data_subject32_ex|bsm_token_data_subject64_ex): AUT_SUBJECT32_EX, AUT_PROCESS32_EX, AUT_SUBJECT64_EX or AUT_PROCESS64_EX token data. Returns: dict[str, str]: token values.
juraj-google-style
def with_dependencies(dependencies, output_tensor, name=None): if tf.executing_eagerly(): return output_tensor with tf.name_scope((name or 'control_dependency')) as name: with tf.control_dependencies((d for d in dependencies if (d is not None))): output_tensor = tf.convert_to_tensor(value=output_tensor) if isinstance(output_tensor, tf.Tensor): return tf.identity(output_tensor, name=name) else: return tf.IndexedSlices(tf.identity(output_tensor.values, name=name), output_tensor.indices, output_tensor.dense_shape)
Produces the content of `output_tensor` only after `dependencies`. In some cases, a user may want the output of an operation to be consumed externally only after some other dependencies have run first. This function returns `output_tensor`, but only after all operations in `dependencies` have run. Note that this means that there is no guarantee that `output_tensor` will be evaluated after any `dependencies` have run. See also `tf.tuple` and `tf.group`. Args: dependencies: Iterable of operations to run before this op finishes. output_tensor: A `Tensor` or `IndexedSlices` that will be returned. name: (Optional) A name for this operation. Returns: output_with_deps: Same as `output_tensor` but with embedded dependencies. Raises: TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
codesearchnet
def parse_arguments(argv): parser = argparse.ArgumentParser( description='Runs Prediction inside a beam or Dataflow job.') parser.add_argument('--project-id', help='The project to which the job will be submitted.') parser.add_argument('--cloud', action='store_true', help='Run preprocessing on the cloud.') parser.add_argument('--job-name', default=('mltoolbox-batch-prediction-' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')), help='Dataflow job name. Must be unique over all jobs.') parser.add_argument('--extra-package', default=[], action='append', help=('If using --cloud, also installs these packages on ' 'each dataflow worker')) parser.add_argument('--predict-data', required=True, help='Data to run prediction on') parser.add_argument('--trained-model-dir', required=True, help='Usually train_output_path/model.') parser.add_argument('--output-dir', required=True, help=('Location to save output.')) parser.add_argument('--batch-size', required=False, default=1000, type=int, help=('Batch size. Larger values consumes more memrory ' 'but takes less time to finish.')) parser.add_argument('--shard-files', dest='shard_files', action='store_true', help='Shard files') parser.add_argument('--no-shard-files', dest='shard_files', action='store_false', help='Don\'t shard files') parser.set_defaults(shard_files=True) parser.add_argument('--output-format', choices=['csv', 'json'], default='csv', help=) args, _ = parser.parse_known_args(args=argv[1:]) if args.cloud: if not args.project_id: raise ValueError('--project-id needed with --cloud') if not args.trained_model_dir.startswith('gs: raise ValueError('--trained-model-dir needs to be a GCS path,') if not args.output_dir.startswith('gs: raise ValueError('--output-dir needs to be a GCS path.') if not args.predict_data.startswith('gs: raise ValueError('--predict-data needs to be a GCS path.') return args
Parse command line arguments. Args: argv: includes the script's name. Returns: argparse object
juraj-google-style
def _get_dict_of_block_index(self, axis, indices, ordered=False): all_partitions_and_idx = [self._get_blocks_containing_index(axis, i) for i in indices] if ordered: partitions_dict = [] last_part = (- 1) for (part_idx, internal_idx) in all_partitions_and_idx: if (part_idx == last_part): partitions_dict[(- 1)][(- 1)].append(internal_idx) else: partitions_dict.append((part_idx, [internal_idx])) last_part = part_idx else: partitions_dict = {} for (part_idx, internal_idx) in all_partitions_and_idx: if (part_idx not in partitions_dict): partitions_dict[part_idx] = [internal_idx] else: partitions_dict[part_idx].append(internal_idx) return partitions_dict
Convert indices to a dict of block index to internal index mapping. Note: See `_get_blocks_containing_index` for primary usage. This method accepts a list of indices rather than just a single value, and uses `_get_blocks_containing_index`. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) indices: A list of global indices to convert. Returns For unordered: a dictionary of {block index: list of local indices}. For ordered: a list of tuples mapping block index: list of local indices.
codesearchnet
def _configure_common(self, prefix, fallback_level, fallback_format, handler_name, handler, custom_args=''): log_level = self.config.get_option('LOGGING', (prefix + 'log_level'), None, fallback_level) log_format_name = self.config.get_option('LOGGING', (prefix + 'log_format'), None, None) log_format = (ReportingFormats[log_format_name].value if log_format_name else fallback_format) log_format = log_format.format(custom_args=custom_args) formatter = logging.Formatter(log_format) handler.setFormatter(formatter) handler.setLevel(log_level) self.logger.addHandler(handler) if (not self.logger.isEnabledFor(logging.getLevelName(log_level))): self.logger.setLevel(log_level) self.log_info.append(((handler_name + ' @ ') + str(log_level))) self.log_handlers.append(handler)
commom configuration code Args: prefix (str): A prefix for the `log_level` and `log_format` keys to use with the config. #FIXME: Hacky, add separate sections for each logger config? fallback_level (str): Fallback/minimum log level, for if config does not have one. fallback_format (str): Fallback format for if it's not in the config. handler_name (str): Handler used in debug messages. handler (str): The handler to configure and use. custom_args (str): special ID to include in messages
codesearchnet
def set_speech_ssml(self, ssml): self.response.outputSpeech.type = 'SSML' self.response.outputSpeech.ssml = ssml
Set response output speech as SSML type. Args: ssml: str. Response speech used when type is 'SSML', should be formatted with Speech Synthesis Markup Language. Cannot exceed 8,000 characters.
codesearchnet
def chosen_probabs(probab_observations, actions): (B, T) = actions.shape assert ((B, (T + 1)) == probab_observations.shape[:2]) return probab_observations[(np.arange(B)[(:, None)], np.arange(T), actions)]
Picks out the probabilities of the actions along batch and time-steps. Args: probab_observations: ndarray of shape `[B, T+1, A]`, where probab_observations[b, t, i] contains the log-probability of action = i at the t^th time-step in the b^th trajectory. actions: ndarray of shape `[B, T]`, with each entry in [0, A) denoting which action was chosen in the b^th trajectory's t^th time-step. Returns: `[B, T]` ndarray with the log-probabilities of the chosen actions.
codesearchnet
def sendto(self, transport, addr): msg = bytes(self) + b'\r\n' logger.debug("%s:%s < %s", *(addr + (self,))) transport.sendto(msg, addr)
Send request to a given address via given transport. Args: transport (asyncio.DatagramTransport): Write transport to send the message on. addr (Tuple[str, int]): IP address and port pair to send the message to.
juraj-google-style
def zip(self, second_iterable, result_selector=(lambda x, y: (x, y))): if self.closed(): raise ValueError('Attempt to call zip() on a closed Queryable.') if (not is_iterable(second_iterable)): raise TypeError('Cannot compute zip() with second_iterable of non-iterable {0}'.format(str(type(second_iterable))[7:(- 1)])) if (not is_callable(result_selector)): raise TypeError('zip() parameter result_selector={0} is not callable'.format(repr(result_selector))) return self._create((result_selector(*t) for t in izip(self, second_iterable)))
Elementwise combination of two sequences. The source sequence and the second iterable are merged element-by- element using a function to combine them into the single corresponding element of the result sequence. The length of the result sequence is equal to the length of the shorter of the two input sequences. Note: This method uses deferred execution. Args: second_iterable: The second sequence to be combined with the source sequence. result_selector: An optional binary function for combining corresponding elements of the source sequences into an element of the result sequence. The first and second positional arguments are the elements from the source sequences. The result should be the result sequence element. If omitted, the result sequence will consist of 2-tuple pairs of corresponding elements from the source sequences. Returns: A Queryable over the merged elements. Raises: ValueError: If the Queryable is closed. TypeError: If result_selector is not callable.
codesearchnet
def _ReadSources(self, artifact_definition_values, artifact_definition, name): sources = artifact_definition_values.get('sources') if not sources: raise errors.FormatError( 'Invalid artifact definition: {0:s} missing sources.'.format(name)) for source in sources: type_indicator = source.get('type', None) if not type_indicator: raise errors.FormatError( 'Invalid artifact definition: {0:s} source type.'.format(name)) attributes = source.get('attributes', None) try: source_type = artifact_definition.AppendSource( type_indicator, attributes) except errors.FormatError as exception: raise errors.FormatError( 'Invalid artifact definition: {0:s}, with error: {1!s}'.format( name, exception)) if source_type: if source.get('returned_types', None): raise errors.FormatError(( 'Invalid artifact definition: {0:s} returned_types no longer ' 'supported.').format(name)) source_type.conditions = source.get('conditions', []) self._ReadSupportedOS(source, source_type, name) if set(source_type.supported_os) - set( artifact_definition.supported_os): raise errors.FormatError(( 'Invalid artifact definition: {0:s} missing ' 'supported_os.').format(name))
Reads the artifact definition sources. Args: artifact_definition_values (dict[str, object]): artifact definition values. artifact_definition (ArtifactDefinition): an artifact definition. name (str): name of the artifact definition. Raises: FormatError: if the type indicator is not set or unsupported, or if required attributes are missing.
juraj-google-style
def my_sum(x, y, *args, **kwargs): del args, kwargs return x + y
Returns the sum of two integers. This function will return the sum of two integers. Examples: ``` ret = sum(1, 2) print(ret) ``` Args: x: An integer. y: Another integer. *args: Variable positional args. **kwargs: Variable keyword args. Returns: The sum of both. Raises: ValueError: when either `x` and `y` is not an integer.
github-repos
def __init__(self, options): self.event = Event.create(__name__) self.options = options self.logging_level = logging.DEBUG self.setup_logging() self.logger = Logger.get_logger(__name__)
Initialize application with command line options. Args: options (ApplicationOptions): given command line options.
juraj-google-style
def sentencecase(string): joiner = ' ' string = re.sub('[\\-_\\.\\s]', joiner, str(string)) if (not string): return string return capitalcase(trimcase(re.sub('[A-Z]', (lambda matched: (joiner + lowercase(matched.group(0)))), string)))
Convert string into sentence case. First letter capped and each punctuations are joined with space. Args: string: String to convert. Returns: string: Sentence cased string.
codesearchnet
def __init__(self, dsn, echo=False, foreign_keys=True, engine_kwargs=None, application_prefix='ambry'): self.dsn = dsn d = parse_url_to_dict(self.dsn) self.path = d['path'].replace(' self.driver = d['scheme'] self.engine_kwargs = engine_kwargs or {} self.Session = None self._session = None self._engine = None self._connection = None self._echo = echo self._foreign_keys = foreign_keys self._raise_on_commit = False if self.driver in ['postgres', 'postgresql', 'postgresql+psycopg2', 'postgis']: self.driver = 'postgres' self._schema = POSTGRES_SCHEMA_NAME else: self._schema = None self.logger = logger self.library = None self._application_prefix = application_prefix
Initializes database. Args: dsn (str): database connect string, 'sqlite://' for example. echo (boolean): echo parameter of the create_engine. engine_kwargs (dict): parameters to pass to the create_engine method of the Sqlalchemy.
juraj-google-style
def move(self, delta): pos = self.pos self.pos = ((pos[0] + delta[0]), (pos[1] + delta[1]), (pos[2] + delta[0]), (pos[3] + delta[1])) for age in self.nodes: for node in age: node.move(delta)
Move the tree. Args: delta (tupel): The adjustment of the position.
codesearchnet
def check_rank(player, platform="steam"): webpage = requests.get( "https: ).text try: playerid_index = webpage.index("/live?ids=") + len("/live?ids=") playerid_end_index = webpage.index(, playerid_index) playerid = webpage[playerid_index:playerid_end_index] name_index = webpage.index("Stats Profile : ") + len("Stats Profile : ") name_end_index = webpage.index(, name_index) name = webpage[name_index:name_end_index] except (ValueError, IndexError): return False, () livedata = json.loads( requests.post( "https: json={"playerIds": [playerid]} ).text ) stats = [] try: for statpack in livedata['players'][0]['Stats']: field = statpack['Value']['Label'] value = str(statpack['Value']['DisplayValue']) if statpack['Value']['Percentile']: percentile = str(statpack['Value']['Percentile']) else: percentile = None stats.append((field, value, percentile)) except (IndexError, KeyError): return False, () dp = "https: platform_display = platform if platform == "steam": platform_display = "Steam" elif platform == "ps": platform_display = "PlayStation" elif platform == "xbox": platform_display = "Xbox" return True, (stats, name, platform_display, dp)
Gets the Rocket League stats and name and dp of a UserID Args: player (str): The UserID of the player we want to rank check platform (str): The platform to check for, can be 'steam', 'ps', or 'xbox' Returns: success (bool): Whether the rank check was successful package (tuple): If successful, the retrieved stats, in order (stats, name, dp)
juraj-google-style