code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def clips(self, **kwargs): path = self._get_id_path('clips') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get related clips and trailers for a movie specified by id from the API. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def parse(self, line): tree = list(self.parser.raw_parse(line))[0] tree = tree[0] return tree
Returns tree objects from a sentence Args: line: Sentence to be parsed into a tree Returns: Tree object representing parsed sentence None if parse fails
codesearchnet
def splitext(path): (parent_path, pathname) = split(path) if (pathname.startswith('.') and (pathname.count('.') == 1)): return (path, '') if ('.' not in pathname): return (path, '') (pathname, ext) = pathname.rsplit('.', 1) path = join(parent_path, pathname) return (path, ('.' + ext))
Split the extension from the path. Arguments: path (str): A path to split. Returns: (str, str): A tuple containing the path and the extension. Example: >>> splitext('baz.txt') ('baz', '.txt') >>> splitext('foo/bar/baz.txt') ('foo/bar/baz', '.txt') >>> splitext('foo/bar/.foo') ('foo/bar/.foo', '')
codesearchnet
def fill_wildcards(self, field=None, value=0): if field in [None, 'wildcards'] or isinstance(value, Pad): return default_value = getattr(Match, field) if isinstance(default_value, IPAddress): if field == 'nw_dst': shift = FlowWildCards.OFPFW_NW_DST_SHIFT base_mask = FlowWildCards.OFPFW_NW_DST_MASK else: shift = FlowWildCards.OFPFW_NW_SRC_SHIFT base_mask = FlowWildCards.OFPFW_NW_SRC_MASK self.wildcards &= FlowWildCards.OFPFW_ALL ^ base_mask wildcard = (value.max_prefix - value.netmask) << shift self.wildcards |= wildcard else: wildcard_field = "OFPFW_{}".format(field.upper()) wildcard = getattr(FlowWildCards, wildcard_field) if value == default_value and not (self.wildcards & wildcard) or \ value != default_value and (self.wildcards & wildcard): self.wildcards ^= wildcard
Update wildcards attribute. This method update a wildcards considering the attributes of the current instance. Args: field (str): Name of the updated field. value (GenericType): New value used in the field.
juraj-google-style
def size(self, name=None): if name is None: name = '%s_size' % self._name return gen_data_flow_ops.stage_size(name=name, shared_name=self._name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)
Returns the number of elements in the staging area. Args: name: A name for the operation (optional) Returns: The created op
github-repos
def report_fhir_path_error(self, element_path: str, fhir_path_constraint: str, msg: str) -> None:
Reports a FHIRPath constraint error during validation and/or encoding. The base implementation logs to the `error` context and raises `e` by default. Subclasses should override this behavior as necessary. Args: element_path: The path to the field that the constraint is defined on. fhir_path_constraint: The FHIRPath constraint expression. msg: The error message produced.
github-repos
def result_to_dict(raw_result): result = {} for (channel_index, channel) in enumerate(raw_result): (channel_id, channel_name) = (channel[0], channel[1]) channel_result = {'id': channel_id, 'name': channel_name, 'movies': []} for movie in channel[2]: channel_result['movies'].append({'title': movie[1], 'start_time': datetime.fromtimestamp(movie[2]), 'end_time': datetime.fromtimestamp((movie[2] + movie[3])), 'inf': (True if movie[3] else False)}) result[channel_id] = channel_result return result
Parse raw result from fetcher into readable dictionary Args: raw_result (dict) - raw data from `fetcher` Returns: dict - readable dictionary
codesearchnet
def filter_by_moys(self, moys): t_s = 60 / self.header.analysis_period.timestep st_ind = self.header.analysis_period.st_time.moy / t_s if self.header.analysis_period.is_reversed is False: _filt_indices = [int(moy / t_s - st_ind) for moy in moys] else: if self.header.analysis_period.is_leap_year is False: eoy_ind = 8759 * self.header.analysis_period.timestep - st_ind else: eoy_ind = 8783 * self.header.analysis_period.timestep - st_ind _filt_indices = [] for moy in moys: ind = moy / t_s if ind > st_ind: _filt_indices.append(int(ind - st_ind)) else: _filt_indices.append(int(ind + eoy_ind)) _filt_values = [self._values[i] for i in _filt_indices] _filt_datetimes = [self.datetimes[i] for i in _filt_indices] _filt_header = self.header.duplicate() coll = HourlyDiscontinuousCollection(_filt_header, _filt_values, _filt_datetimes) coll._validated_a_period = True return coll
Filter the Data Collection based on a list of minutes of the year. Args: moys: A List of minutes of the year [0..8759 * 60] Return: A new Data Collection with filtered data
juraj-google-style
def CsvToTable(self, buf, header=True, separator=','): self.Reset() header_row = self.row_class() if header: line = buf.readline() header_str = '' while (not header_str): header_str = line.split(' if (not header_str): line = buf.readline() header_list = header_str.split(separator) header_length = len(header_list) for entry in header_list: entry = entry.strip() if (entry in header_row): raise TableError(('Duplicate header entry %r.' % entry)) header_row[entry] = entry header_row.row = 0 self._table[0] = header_row for line in buf: if line.startswith(' continue lst = line.split(separator) lst = [l.strip() for l in lst] if (header and (len(lst) != header_length)): continue if (not header): header_row = self.row_class() header_length = len(lst) header_row.values = dict(zip(range(header_length), range(header_length))) self._table[0] = header_row header = True continue new_row = self.NewRow() new_row.values = lst header_row.row = (self.size + 1) self._table.append(new_row) return self.size
Parses buffer into tabular format. Strips off comments (preceded by '#'). Optionally parses and indexes by first line (header). Args: buf: String file buffer containing CSV data. header: Is the first line of buffer a header. separator: String that CSV is separated by. Returns: int, the size of the table created. Raises: TableError: A parsing error occurred.
codesearchnet
def are_equivalent_xml(a_xml, b_xml): return are_equivalent_pyxb(d1_common.xml.deserialize(a_xml), d1_common.xml.deserialize(b_xml))
Check if two ReplicationPolicy XML docs are semantically equivalent. The ReplicationPolicy XML docs are normalized before comparison. Args: a_xml, b_xml: ReplicationPolicy XML docs to compare Returns: bool: ``True`` if the resulting policies for the two objects are semantically equivalent.
codesearchnet
def add_prefix(self, ns_uri, prefix, set_as_preferred=False): assert prefix ni = self.__lookup_uri(ns_uri) self.__check_prefix_conflict(ni, prefix) ni.prefixes.add(prefix) self.__prefix_map[prefix] = ni if set_as_preferred: ni.preferred_prefix = prefix
Adds prefix for the given namespace URI. The namespace must already exist in this set. If set_as_preferred is True, also set this namespace as the preferred one. ``prefix`` must be non-None; a default preference can't be set this way. See :meth:`set_preferred_prefix_for_namespace` for that. Args: ns_uri (str): The namespace URI to add the prefix to prefix (str): The prefix to add (not None) set_as_preferred (bool): Whether to set the new prefix as preferred Raises: NamespaceNotFoundError: If namespace ``ns_uri`` isn't in this set
codesearchnet
def offsets(self, group=None): if (not group): return {'fetch': self.offsets('fetch'), 'commit': self.offsets('commit'), 'task_done': self.offsets('task_done'), 'highwater': self.offsets('highwater')} else: return dict(deepcopy(getattr(self._offsets, group)))
Get internal consumer offset values Keyword Arguments: group: Either "fetch", "commit", "task_done", or "highwater". If no group specified, returns all groups. Returns: A copy of internal offsets struct
codesearchnet
def _get_dtype_and_weakness(x): if isinstance(x, weak_tensor.WeakTensor): return (x.dtype, True) if isinstance(x, dtypes.DType): return (x, False) tf_dtype = getattr(x, 'dtype', None) if isinstance(tf_dtype, dtypes.DType): return (tf_dtype, False) if isinstance(x, (np.ndarray, np.generic)) or isinstance(tf_dtype, np.dtype): infer_dtype = dtypes.as_dtype(tf_dtype) return (infer_dtype, False) if isinstance(x, (bytes, str)) or tf_dtype in _all_str_dtypes: return _str try: if x in _NP_TO_TF: return (_NP_TO_TF[x], False) except TypeError: pass if isinstance(x, bool) or x == bool: return _b8 if isinstance(x, _pi): if x < np.iinfo(np.int32).min or x > np.iinfo(np.int32).max: raise OverflowError(f'Python int {x} too large to convert to np.int32') return _i32w if x == int: return _i32w if isinstance(x, _pf) or x == float: return _f32w if isinstance(x, _pc) or x == complex: return _c128w if isinstance(x, tensor_shape.TensorShape): return _i32 if isinstance(x, np.dtype): try: np_dtype = dtypes.as_dtype(x) return (np_dtype, False) except TypeError as exc: raise NotImplementedError(f'Auto dtype conversion semantics does not support {x}. Try using a NumPy built-in dtype objects or cast them explicitly.') from exc raise NotImplementedError(f'Auto dtype conversion semantics does not support {type(x)} type.')
Returns a TF type and weak type information from x. Args: x: an input scalar, array or a NumPy/TF/Python dtype. Raises: OverflowError: if Python int x is too large to convert to int32. NotImplementedError: when x is an unsupported input type. Returns: TF type and weak type information inferred from x in the form of (dtype, bool).
github-repos
def napoleon_to_sphinx(docstring, **config_params): if ('napoleon_use_param' not in config_params): config_params['napoleon_use_param'] = False if ('napoleon_use_rtype' not in config_params): config_params['napoleon_use_rtype'] = False config = Config(**config_params) return str(GoogleDocstring(docstring, config))
Convert napoleon docstring to plain sphinx string. Args: docstring (str): Docstring in napoleon format. **config_params (dict): Whatever napoleon doc configuration you want. Returns: str: Sphinx string.
codesearchnet
def enter_cond_section(self, section_id): assert section_id not in self.cond_entry assert section_id not in self.cond_leaves self.cond_leaves[section_id] = []
Enters a conditional section. Conditional sections define an entry node, and one or more branches. Args: section_id: Hashable, the same node that will be used in calls to the section_id arg passed to new_cond_branch
github-repos
def run(self, text): for regex in self.regexes: text = regex.sub(self.repl, text) return text
Run each regex substitution on ``text``. Args: text (string): the input text. Returns: string: text after all substitutions have been sequentially applied.
codesearchnet
def emit_tid(self, name, pid, tid): event = {} event['name'] = 'thread_name' event['ph'] = 'M' event['pid'] = pid event['tid'] = tid event['args'] = {'name': name} self._metadata.append(event)
Adds a thread metadata event to the trace. Args: name: The thread name as a string. pid: Identifier of the process as an integer. tid: Identifier of the thread as an integer.
github-repos
def make_df_from_batch(batch_name, batch_col='b01', reader=None, reader_label=None): batch_name = batch_name batch_col = batch_col logger.debug(f'batch_name, batch_col: {batch_name}, {batch_col}') if (reader is None): reader_obj = get_db_reader(reader_label) reader = reader_obj() srnos = reader.select_batch(batch_name, batch_col) logger.debug(('srnos:' + str(srnos))) info_dict = _create_info_dict(reader, srnos) info_df = pd.DataFrame(info_dict) info_df = info_df.sort_values(['groups', 'filenames']) info_df = _make_unique_groups(info_df) info_df['labels'] = info_df['filenames'].apply(create_labels) info_df.set_index('filenames', inplace=True) return info_df
Create a pandas DataFrame with the info needed for ``cellpy`` to load the runs. Args: batch_name (str): Name of the batch. batch_col (str): The column where the batch name is in the db. reader (method): the db-loader method. reader_label (str): the label for the db-loader (if db-loader method is not given) Returns: info_df (pandas DataFrame)
codesearchnet
def from_file(cls, filename): with open(filename, 'r') as stream: data = yaml.load(stream, Loader=yaml.SafeLoader) notes = data.get('notes') v_type = data.get('type') track = data.get('track') xargs = {} if track: if (type(track) is str): track = [track] xargs['track'] = track vaspmeta = VASPMeta(data['title'], data['description'], data['status'], notes=notes, type=v_type, **xargs) return vaspmeta
Create a VASPMeta object by reading a `vaspmeta.yaml` file Args: filename (Str): filename to read in. Returns: (vasppy.VASPMeta): the VASPMeta object
codesearchnet
def getsize(self, path): try: file_obj = self.filesystem.resolve(path) if (self.filesystem.ends_with_path_separator(path) and S_IFMT(file_obj.st_mode) != S_IFDIR): error_nr = (errno.EINVAL if self.filesystem.is_windows_fs else errno.ENOTDIR) self.filesystem.raise_os_error(error_nr, path) return file_obj.st_size except IOError as exc: raise os.error(exc.errno, exc.strerror)
Return the file object size in bytes. Args: path: path to the file object. Returns: file size in bytes.
juraj-google-style
def from_config(cls, config, custom_objects=None): if 'initial_accumulator_value' not in config: config['initial_accumulator_value'] = 0.1 if 'lr' in config: config['learning_rate'] = config.pop('lr') return cls(**config)
Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a hyperparameter. Returns: An optimizer instance.
github-repos
def fit_freq_std_dev(self, training_signal): window_length = len(self.window) window_weight = sum(self.window) num_of_windows = len(training_signal) - window_length - 1 mean = np.zeros(int(window_length / 2) + 1) pow = np.zeros(int(window_length / 2) + 1) temp = np.zeros(int(window_length / 2) + 1) rfft = np.fft.rfft(training_signal[0:0 + window_length] * self.window) max = np.abs(rfft) / window_weight min = max for i in range(0, num_of_windows): rfft = np.fft.rfft(training_signal[i:i + window_length] * self.window) temp = np.abs(rfft) / window_weight max = np.maximum(temp, max) min = np.minimum(temp, min) mean = mean + temp pow = pow + np.power(temp, 2) mean = mean / num_of_windows pow = pow / num_of_windows std_dev = np.sqrt(pow - np.power(mean, 2)) self.mask_top = mean + self.gain * std_dev self.mask_bottom = np.maximum(mean - self.gain * std_dev, np.zeros(int(window_length / 2) + 1))
Defines a spectral mask based on training data using the standard deviation values of each frequency component Args: training_signal: Training data
juraj-google-style
def GetBudget(self, client_customer_id, budget_id): self.client.SetClientCustomerId(client_customer_id) selector = { 'fields': ['BudgetId', 'BudgetName', 'BudgetStatus', 'Amount', 'DeliveryMethod', 'BudgetReferenceCount', 'IsBudgetExplicitlyShared'], 'predicates': [ { 'field': 'BudgetId', 'operator': 'EQUALS', 'values': [budget_id] } ] } budgets = self.client.GetService('BudgetService').get(selector) if int(budgets['totalNumEntries']) > 0: return budgets['entries'][0] else: return None
Return a Budget with the associated budgetId. Args: client_customer_id: str Client Customer Id to which the budget belongs. budget_id: str id of the budget we want to examine. Returns: Budget A Budget data object.
juraj-google-style
def update_connection_public_key(self, connection_id, public_key): if (connection_id in self._connections): connection_info = self._connections[connection_id] self._connections[connection_id] = ConnectionInfo(connection_info.connection_type, connection_info.connection, connection_info.uri, connection_info.status, public_key) else: LOGGER.debug('Could not update the public key %s for connection_id %s. The connection does not exist.', public_key, connection_id)
Adds the public_key to the connection definition. Args: connection_id (str): The identifier for the connection. public_key (str): The public key used to enforce permissions on connections.
codesearchnet
def _total_variation_np(self, x_np): dim = len(x_np.shape) if dim == 3: dif1 = x_np[1:, :, :] - x_np[:-1, :, :] dif2 = x_np[:, 1:, :] - x_np[:, :-1, :] sum_axis = None elif dim == 4: dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :] dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :] sum_axis = (1, 2, 3) else: pass tot_var = np.sum(np.abs(dif1), axis=sum_axis) + np.sum(np.abs(dif2), axis=sum_axis) return tot_var
Calculate the total variation of x_np using numpy. This implements the same function as TensorFlow but using numpy instead. Args: x_np: Numpy array with 3 or 4 dimensions.
github-repos
def label_TM_tmhmm_residue_numbers_and_leaflets(tmhmm_seq): TM_number_dict = {} T_index = [] T_residue = [] residue_count = 1 for residue_label in tmhmm_seq: if residue_label == 'T': T_residue.append(residue_count) residue_count = residue_count + 1 TM_number_dict.update({'T_residue': T_residue}) T_residue_list = TM_number_dict['T_residue'] count = 0 max_count = len(T_residue_list) - 1 TM_helix_count = 0 TM_boundary_dict = {} while count <= max_count: if count == 0: TM_start = T_residue_list[count] count = count + 1 continue elif count == max_count: TM_end = T_residue_list[count] TM_helix_count = TM_helix_count + 1 TM_boundary_dict.update({'TM_helix_' + str(TM_helix_count): [TM_start, TM_end]}) break elif T_residue_list[count] != T_residue_list[count + 1] - 1: TM_end = T_residue_list[count] TM_helix_count = TM_helix_count + 1 TM_boundary_dict.update({'TM_helix_' + str(TM_helix_count): [TM_start, TM_end]}) TM_start = T_residue_list[count + 1] count = count + 1 leaflet_dict = {} for leaflet in ['O', 'I']: leaflet_list = [] for TM_helix, TM_residues in TM_boundary_dict.items(): for residue_num in TM_residues: tmhmm_seq_index = residue_num - 1 previous_residue = tmhmm_seq_index - 1 next_residue = tmhmm_seq_index + 1 if tmhmm_seq[previous_residue] == leaflet or tmhmm_seq[next_residue] == leaflet: leaflet_list.append(residue_num) leaflet_dict.update({'tmhmm_leaflet_' + leaflet: leaflet_list}) return TM_boundary_dict, leaflet_dict
Determine the residue numbers of the TM-helix residues that cross the membrane and label them by leaflet. Args: tmhmm_seq: g.protein.representative_sequence.seq_record.letter_annotations['TM-tmhmm'] Returns: leaflet_dict: a dictionary with leaflet_variable : [residue list] where the variable is inside or outside TM_boundary dict: outputs a dictionar with : TM helix number : [TM helix residue start , TM helix residue end] TODO: untested method!
juraj-google-style
def fit_circular_gaussian(samples, high=np.pi, low=0): cl_func = SimpleCLFunction.from_string() def run_cl(samples): data = {'samples': Array(samples, 'mot_float_type'), 'means': Zeros(samples.shape[0], 'mot_float_type'), 'stds': Zeros(samples.shape[0], 'mot_float_type'), 'nmr_samples': Scalar(samples.shape[1]), 'low': Scalar(low), 'high': Scalar(high), } cl_func.evaluate(data, samples.shape[0]) return data['means'].get_data(), data['stds'].get_data() if len(samples.shape) == 1: mean, std = run_cl(samples[None, :]) return mean[0], std[0] return run_cl(samples)
Compute the circular mean for samples in a range Args: samples (ndarray): a one or two dimensional array. If one dimensional we calculate the fit using all values. If two dimensional, we fit the Gaussian for every set of samples over the first dimension. high (float): The maximum wrap point low (float): The minimum wrap point
juraj-google-style
def modutf7_encode(data: str) -> bytes: ret = bytearray() is_usascii = True encode_start = None for (i, symbol) in enumerate(data): charpoint = ord(symbol) if is_usascii: if (charpoint == 38): ret.extend(b'&-') elif (32 <= charpoint <= 126): ret.append(charpoint) else: encode_start = i is_usascii = False elif (32 <= charpoint <= 126): to_encode = data[encode_start:i] encoded = _modified_b64encode(to_encode) ret.append(38) ret.extend(encoded) ret.extend((45, charpoint)) is_usascii = True if (not is_usascii): to_encode = data[encode_start:] encoded = _modified_b64encode(to_encode) ret.append(38) ret.extend(encoded) ret.append(45) return bytes(ret)
Encode the string using modified UTF-7. Args: data: The input string to encode.
codesearchnet
def get_merged_env(self, include_os=False): env = {} if include_os: env.update(os.environ.copy()) for level in range(3): env.update(self.pipeline.data.env_list[level].copy()) return env
Copying and merging environment variables. Args: include_os (bool): when true then include the environment variables (default: False) Returns: dict: environment variables as defined in the pipeline (optional including system environment variables).
codesearchnet
def sync_model(self, comment='', compact_central=False, release_borrowed=True, release_workset=True, save_local=False): self._add_entry(templates.FILE_SYNC_START) if compact_central: self._add_entry(templates.FILE_SYNC_COMPACT) if release_borrowed: self._add_entry(templates.FILE_SYNC_RELEASE_BORROWED) if release_workset: self._add_entry(templates.FILE_SYNC_RELEASE_USERWORKSETS) if save_local: self._add_entry(templates.FILE_SYNC_RELEASE_SAVELOCAL) self._add_entry(templates.FILE_SYNC_COMMENT_OK.format(sync_comment=comment))
Append a sync model entry to the journal. This instructs Revit to sync the currently open workshared model. Args: comment (str): comment to be provided for the sync step compact_central (bool): if True compacts the central file release_borrowed (bool): if True releases the borrowed elements release_workset (bool): if True releases the borrowed worksets save_local (bool): if True saves the local file as well
codesearchnet
def paint(self): snippet = {'line-opacity': VectorStyle.get_style_value(self.opacity), 'line-color': VectorStyle.get_style_value(self.color), 'line-width': VectorStyle.get_style_value(self.width)} if self.translate: snippet['line-translate'] = self.translate if self.dasharray: snippet['line-dasharray'] = VectorStyle.get_style_value(self.dasharray) return snippet
Renders a javascript snippet suitable for use as a mapbox-gl line paint entry Returns: A dict that can be converted to a mapbox-gl javascript paint snippet
codesearchnet
def get_properties_of_kind(kind, start=None, end=None): q = Property.query(ancestor=Property.key_for_kind(kind)) if start is not None and start != '': q = q.filter(Property.key >= Property.key_for_property(kind, start)) if end is not None: if end == '': return [] q = q.filter(Property.key < Property.key_for_property(kind, end)) return [Property.key_to_property(k) for k in q.iter(keys_only=True)]
Return all properties of kind in the specified range. NOTE: This function does not return unindexed properties. Args: kind: name of kind whose properties you want. start: only return properties >= start if start is not None. end: only return properties < end if end is not None. Returns: A list of property names of kind between the (optional) start and end values.
juraj-google-style
def merge(self, obj): if (obj.id in self.cache): self.cache[obj.id].merge(obj) else: self.cache[obj.id] = obj return self.cache[obj.id]
Add a given object to the cache, or update an existing entry to include more fields. Args: obj (SkypeObj): object to add to the cache
codesearchnet
def ContainsAll(self, *values): self._awql = self._CreateMultipleValuesCondition(values, 'CONTAINS_ALL') return self._query_builder
Sets the type of the WHERE clause as "contains all". Args: *values: The values to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
juraj-google-style
def _detect(self): results = [] for contract in self.contracts: shadows = self.detect_shadowing_definitions(contract) if shadows: for shadow in shadows: local_parent_name = shadow[1] local_variable = shadow[2] overshadowed = shadow[3] info = '{}.{}.{} (local variable @ {}) shadows:\n'.format(contract.name, local_parent_name, local_variable.name, local_variable.source_mapping_str) for overshadowed_entry in overshadowed: info += '\t- {}.{} ({} @ {})\n'.format(overshadowed_entry[1], overshadowed_entry[2], overshadowed_entry[0], overshadowed_entry[2].source_mapping_str) json = self.generate_json_result(info) self.add_variable_to_json(local_variable, json) for overshadowed_entry in overshadowed: if (overshadowed_entry[0] in [self.OVERSHADOWED_FUNCTION, self.OVERSHADOWED_MODIFIER, self.OVERSHADOWED_EVENT]): self.add_function_to_json(overshadowed_entry[2], json) elif (overshadowed_entry[0] == self.OVERSHADOWED_STATE_VARIABLE): self.add_variable_to_json(overshadowed_entry[2], json) results.append(json) return results
Detect shadowing local variables Recursively visit the calls Returns: list: {'vuln', 'filename,'contract','func', 'shadow'}
codesearchnet
def CheckCompletedBlocks(self, filename, error): for obj in self.stack: if isinstance(obj, _ClassInfo): error(filename, obj.starting_linenum, 'build/class', 5, 'Failed to find complete declaration of class %s' % obj.name) elif isinstance(obj, _NamespaceInfo): error(filename, obj.starting_linenum, 'build/namespaces', 5, 'Failed to find complete declaration of namespace %s' % obj.name)
Checks that all classes and namespaces have been completely parsed. Call this when all lines in a file have been processed. Args: filename: The name of the current file. error: The function to call with any errors found.
juraj-google-style
def update_asset(self, asset, asset_id, asset_name, asset_type): if not self.can_update(): self._tcex.handle_error(910, [self.type]) if asset == 'PHONE': return self.tc_requests.update_victim_phone_asset(self.unique_id, asset_id, asset_name) if asset == 'EMAIL': return self.tc_requests.update_victim_email_asset( self.unique_id, asset_id, asset_name, asset_type ) if asset == 'NETWORK': return self.tc_requests.update_victim_network_asset( self.unique_id, asset_id, asset_name, asset_type ) if asset == 'SOCIAL': return self.tc_requests.update_victim_social_asset( self.unique_id, asset_id, asset_name, asset_type ) if asset == 'WEB': return self.tc_requests.update_victim_web_asset(self.unique_id, asset_id, asset_name) self._tcex.handle_error( 925, ['asset_type', 'update_asset', 'asset_type', 'asset_type', asset_type] ) return None
Update a asset of a Victim Valid asset_type: + PHONE + EMAIL + NETWORK + SOCIAL + WEB Args: asset: asset_name: asset_id: asset_type: PHONE, EMAIL, NETWORK, SOCIAL, or WEB Returns:
juraj-google-style
def set_cellpydata(self, cellpydata, cycle): self.data = cellpydata self.step_table = self.data.dataset time_voltage = self.data.get_ocv(direction='up', cycles=cycle) time = time_voltage.Step_Time voltage = time_voltage.Voltage self.time = np.array(time) self.voltage = np.array(voltage)
Performing fit of the OCV steps in the cycles set by set_cycles() from the data set by set_data() r is found by calculating v0 / i_start --> err(r)= err(v0) + err(i_start). c is found from using tau / r --> err(c) = err(r) + err(tau) Args: cellpydata (CellpyData): data object from cellreader cycle (int): cycle number to get from CellpyData object Returns: None
juraj-google-style
def call(self, name, *args, **kwargs): payload = name, args, kwargs self._conn.send((self._CALL, payload)) return self._receive
Asynchronously call a method of the external environment. Args: name: Name of the method to call. *args: Positional arguments to forward to the method. **kwargs: Keyword arguments to forward to the method. Returns: Promise object that blocks and provides the return value when called.
juraj-google-style
def record(self, ekey, entry, diff=False): if ekey not in self.entities: self.entities[ekey] = [] if diff and len(self.entities[ekey]) > 0: from acorn.logging.diff import cascade, compress sequence = [e["c"] for e in self.entities[ekey] if e["m"] == entry["m"]] original = cascade(sequence) difference = compress(original, entry["c"]) entry["c"] = difference self.entities[ekey].append(entry) from uuid import UUID uid = None if entry["r"] is not None: uid = entry["r"] elif isinstance(ekey, str): try: uid = str(UUID(ekey)) except ValueError: pass if uid is not None and isinstance(uid, str): self.log_uuid(uid) if entry["a"] is None: return for larg in entry["a"]["_"]: if not isinstance(larg, str): continue try: uid = str(UUID(larg)) self.log_uuid(uid) except ValueError: pass for key, karg in entry["a"].items(): if key == "_" or not isinstance(karg, str): continue try: uid = str(UUID(karg)) self.log_uuid(uid) except ValueError: pass
Records the specified entry to the key-value store under the specified entity key. Args: ekey (str): fqdn/uuid of the method/object to store the entry for. entry (dict): attributes and values gleaned from the execution. diff (bool): when True, the "c" element of `entry` will be diffed against previous entries under the same `ekey` if their method (attribute "m") matches.
juraj-google-style
def __init__(self, map_values, validate_args=False, name='categorical_to_discrete'): with tf.name_scope(name): map_values = tf.convert_to_tensor(value=map_values, name='map_values') assertions = _maybe_check_valid_map_values(map_values, validate_args) if assertions: with tf.control_dependencies(assertions): map_values = tf.identity(map_values) self._map_values = map_values super(CategoricalToDiscrete, self).__init__( graph_parents=[map_values], forward_min_event_ndims=0, is_constant_jacobian=True, validate_args=validate_args, name=name)
Instantiates `CategoricalToDiscrete` bijector. Args: map_values: 1D numerical tensor of discrete values to map to, sorted in strictly increasing order. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object.
juraj-google-style
def load(self, filename, bs=512): with open(filename, 'rb') as f: f.seek((GPT_HEADER_OFFSET + 12)) header_size = struct.unpack('<I', f.read(4))[0] f.seek(GPT_HEADER_OFFSET) header_data = f.read(header_size) self.header = GPT_HEADER(header_data) if (self.header.signature != GPT_SIGNATURE): raise Exception('Invalid GPT signature') self.__load_partition_entries(f, bs)
Loads GPT partition table. Args: filename (str): path to file or device to open for reading bs (uint): Block size of the volume, default: 512 Raises: IOError: If file does not exist or not readable
codesearchnet
def create_stack(self, fqn, template, parameters, tags, force_change_set=False, stack_policy=None, **kwargs): logger.debug('Attempting to create stack %s:.', fqn) logger.debug(' parameters: %s', parameters) logger.debug(' tags: %s', tags) if template.url: logger.debug(' template_url: %s', template.url) else: logger.debug(' no template url, uploading template directly.') if force_change_set: logger.debug('force_change_set set to True, creating stack with changeset.') (_changes, change_set_id) = create_change_set(self.cloudformation, fqn, template, parameters, tags, 'CREATE', service_role=self.service_role, **kwargs) self.cloudformation.execute_change_set(ChangeSetName=change_set_id) else: args = generate_cloudformation_args(fqn, parameters, tags, template, service_role=self.service_role, stack_policy=stack_policy) try: self.cloudformation.create_stack(**args) except botocore.exceptions.ClientError as e: if (e.response['Error']['Message'] == 'TemplateURL must reference a valid S3 object to which you have access.'): s3_fallback(fqn, template, parameters, tags, self.cloudformation.create_stack, self.service_role) else: raise
Create a new Cloudformation stack. Args: fqn (str): The fully qualified name of the Cloudformation stack. template (:class:`stacker.providers.base.Template`): A Template object to use when creating the stack. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. force_change_set (bool): Whether or not to force change set use. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy.
codesearchnet
def parse_conf(self, keys=[]): confs = self.app.config.get('WAFFLE_CONFS', {}) if (not keys): keys = confs.keys() result = {} for key in keys: if key.startswith('WAFFLE_'): continue if (key not in confs.keys()): continue stored_conf = self.configstore.get(key) if (not stored_conf): value = confs[key].get('default', '') stored_conf = self.configstore.put(key, util.serialize(value)) self.configstore.commit() else: value = util.deserialize(stored_conf.get_value()) result[stored_conf.get_key()] = value return result
Parse configuration values from the database. The extension must have been previously initialized. If a key is not found in the database, it will be created with the default value specified. Arguments: keys (list[str]): list of keys to parse. If the list is empty, then all the keys known to the application will be used. Returns: dict of the parsed config values.
codesearchnet
class EarlyStoppingCallback(TrainerCallback, ExportableState): def __init__(self, early_stopping_patience: int=1, early_stopping_threshold: Optional[float]=0.0): self.early_stopping_patience = early_stopping_patience self.early_stopping_threshold = early_stopping_threshold self.early_stopping_patience_counter = 0 def check_metric_value(self, args, state, control, metric_value): operator = np.greater if args.greater_is_better else np.less if state.best_metric is None or (operator(metric_value, state.best_metric) and abs(metric_value - state.best_metric) > self.early_stopping_threshold): self.early_stopping_patience_counter = 0 else: self.early_stopping_patience_counter += 1 def on_train_begin(self, args, state, control, **kwargs): if not args.load_best_model_at_end: logger.warning('Using EarlyStoppingCallback without load_best_model_at_end=True. Once training is finished, the best model will not be loaded automatically.') assert args.metric_for_best_model is not None, 'EarlyStoppingCallback requires metric_for_best_model to be defined' assert args.eval_strategy != IntervalStrategy.NO, 'EarlyStoppingCallback requires IntervalStrategy of steps or epoch' def on_evaluate(self, args, state, control, metrics, **kwargs): metric_to_check = args.metric_for_best_model if not metric_to_check.startswith('eval_'): metric_to_check = f'eval_{metric_to_check}' metric_value = metrics.get(metric_to_check) if metric_value is None: logger.warning(f'early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping is disabled') return self.check_metric_value(args, state, control, metric_value) if self.early_stopping_patience_counter >= self.early_stopping_patience: control.should_training_stop = True def state(self) -> dict: return {'args': {'early_stopping_patience': self.early_stopping_patience, 'early_stopping_threshold': self.early_stopping_threshold}, 'attributes': {'early_stopping_patience_counter': self.early_stopping_patience_counter}}
A [`TrainerCallback`] that handles early stopping. Args: early_stopping_patience (`int`): Use with `metric_for_best_model` to stop training when the specified metric worsens for `early_stopping_patience` evaluation calls. early_stopping_threshold(`float`, *optional*): Use with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how much the specified metric must improve to satisfy early stopping conditions. ` This callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality to set best_metric in [`TrainerState`]. Note that if the [`TrainingArguments`] argument *save_steps* differs from *eval_steps*, the early stopping will not occur until the next save step.
github-repos
def connect(self, address, port): self.peeraddress = socket.gethostbyname(address) self.peerport = port self.buffer = buffer.LineBuffer() self.handlers = {} self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: self.socket.connect((self.peeraddress, self.peerport)) except socket.error as x: raise DCCConnectionError("Couldn't connect to socket: %s" % x) self.connected = True self.reactor._on_connect(self.socket) return self
Connect/reconnect to a DCC peer. Arguments: address -- Host/IP address of the peer. port -- The port number to connect to. Returns the DCCConnection object.
juraj-google-style
def SetRowsCustomProperties(self, rows, custom_properties): if not hasattr(rows, "__iter__"): rows = [rows] for row in rows: self.__data[row] = (self.__data[row][0], custom_properties)
Sets the custom properties for given row(s). Can accept a single row or an iterable of rows. Sets the given custom properties for all specified rows. Args: rows: The row, or rows, to set the custom properties for. custom_properties: A string to string dictionary of custom properties to set for all rows.
juraj-google-style
def _tidy_names(names, nnames, extra_names=None): if ((len(names) < nnames) and (extra_names is not None)): names.extend(extra_names) names.extend(range((nnames - len(names)))) del names[nnames:]
Truncate or extend names so that its len is nnames. The list is modified, this function returns nothing. Args: names (list): list of names. nnames (int): desired number of names. extra_names (list of str): list of names to be used to extend the list if needed. If this list isn't provided, a range is used instead.
codesearchnet
def append(self, instruction, qargs=None, cargs=None): qargs = (qargs or []) cargs = (cargs or []) if ((not isinstance(instruction, Instruction)) and hasattr(instruction, 'to_instruction')): instruction = instruction.to_instruction() if (not isinstance(instruction, Instruction)): raise QiskitError('object is not an Instruction.') self._check_dups(qargs) self._check_qargs(qargs) self._check_cargs(cargs) if ((instruction.num_qubits != len(qargs)) or (instruction.num_clbits != len(cargs))): raise QiskitError(('instruction %s with %d qubits and %d clbits cannot be appended onto %d qubits and %d clbits.' % (instruction.name, instruction.num_qubits, instruction.num_clbits, len(qargs), len(cargs)))) instruction_context = (instruction, qargs, cargs) self.data.append(instruction_context) for (param_index, param) in enumerate(instruction.params): if isinstance(param, Parameter): current_symbols = self.parameters if (param in current_symbols): self._parameter_table[param].append((instruction, param_index)) else: self._parameter_table[param] = [(instruction, param_index)] return instruction
Append an instruction to the end of the circuit, modifying the circuit in place. Args: instruction (Instruction or Operator): Instruction instance to append qargs (list(tuple)): qubits to attach instruction to cargs (list(tuple)): clbits to attach instruction to Returns: Instruction: a handle to the instruction that was just added Raises: QiskitError: if the gate is of a different shape than the wires it is being attached to.
codesearchnet
def from_petrel(cls, filename, stop=None, points=False, null=None, function=None, include=None, exclude=None, remap=None, ignore=None): result = utils.read_petrel(filename, function=function, remap=remap) data = cls._clean_longitudinal_data(result, null=null) list_of_Intervals = cls._build_list_of_Intervals(data, stop=stop, points=points, include=include, exclude=exclude, ignore=ignore) if list_of_Intervals: return cls(list_of_Intervals) return None
Makes a striplog from a Petrel text file. Returns: striplog.
codesearchnet
def disconnect_sync(self, conn_id): done = threading.Event() result = {} def disconnect_done(conn_id, adapter_id, status, reason): result['success'] = status result['failure_reason'] = reason done.set() self.disconnect_async(conn_id, disconnect_done) done.wait() return result
Synchronously disconnect from a connected device Args: conn_id (int): A unique identifier that will refer to this connection Returns: dict: A dictionary with two elements 'success': a bool with the result of the connection attempt 'failure_reason': a string with the reason for the failure if we failed
juraj-google-style
def distribute_equally(daily_data, divide=False): index = hourly_index(daily_data.index) hourly_data = daily_data.reindex(index) hourly_data = hourly_data.groupby(hourly_data.index.day).transform( lambda x: x.fillna(method='ffill', limit=23)) if divide: hourly_data /= 24 return hourly_data
Obtains hourly values by equally distributing the daily values. Args: daily_data: daily values divide: if True, divide resulting values by the number of hours in order to preserve the daily sum (required e.g. for precipitation). Returns: Equally distributed hourly values.
juraj-google-style
def Convert(self, metadata, stat_entry, token=None): return self.BatchConvert([(metadata, stat_entry)], token=token)
Converts StatEntry to ExportedFile. Does nothing if StatEntry corresponds to a registry entry and not to a file. Args: metadata: ExportedMetadata to be used for conversion. stat_entry: StatEntry to be converted. token: Security token. Returns: List or generator with resulting RDFValues. Empty list if StatEntry corresponds to a registry entry and not to a file.
codesearchnet
def ascii_tree(node, get_children, get_description=None): out = io.StringIO() _ascii_tree(out, node, '', '', set(), get_children, get_description) return out.getvalue()
Draw a graph, starting at a given position. Args: node: The node from where to draw. get_children: The function to call to retrieve children. get_description: Optional. A function to call to describe a node. Returns: A string.
github-repos
def enable_auto_login(name, password): cmd = ['defaults', 'write', '/Library/Preferences/com.apple.loginwindow.plist', 'autoLoginUser', name] __salt__['cmd.run'](cmd) current = get_auto_login() o_password = _kcpassword(password=password) with salt.utils.files.set_umask(0o077): with salt.utils.files.fopen('/etc/kcpassword', 'w' if six.PY2 else 'wb') as fd: fd.write(o_password) return current if isinstance(current, bool) else current.lower() == name.lower()
.. versionadded:: 2016.3.0 Configures the machine to auto login with the specified user Args: name (str): The user account use for auto login password (str): The password to user for auto login .. versionadded:: 2017.7.3 Returns: bool: ``True`` if successful, otherwise ``False`` CLI Example: .. code-block:: bash salt '*' user.enable_auto_login stevej
juraj-google-style
def gather_nd(self, indices, name=None): raise AttributeError
Gather slices from `params` into a Tensor with shape specified by `indices`. See tf.gather_nd for details. Args: indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. Index tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `params`.
github-repos
def _retrieve_variables_impl(config: Text, hosts: List[Tuple[int, Text]], variables: Dict[Text, Dict[Text, tf_variables.Variable]], table_config: tpu_embedding_v2_utils.TableConfig): for host_id, host in enumerate(hosts): with ops.device(host): for table in table_config: retrieved = table.optimizer._retrieve()(table_name=table.name, num_shards=len(hosts), shard_id=host_id, config=config) if not isinstance(retrieved, tuple): retrieved = (retrieved,) for i, slot in enumerate(['parameters'] + table.optimizer._slot_names()): sharded_var = variables[table.name][slot] if host_id < len(sharded_var.variables): sharded_var.variables[host_id].assign(retrieved[i]) config = None
Retrieve embedding tables from TPU to host memory. Args: config: A serialized TPUEmbeddingConfiguration proto. hosts: A list of all the host CPU devices. variables: A dictionary of dictionaries of TPUEmbeddingVariables. First key is the table name, second key is 'parameters' or the optimizer slot name. table_config: A list of tf.tpu.experimental.embedding.TableConfig objects.
github-repos
def __init__(self, experimenter=None, data=None): super().__init__() self.experimenter = experimenter self.data = data
Create a QueuePropExperimenter with the optional parameters below. Args: experimenter (int): Experimenter ID which takes the same form as in struct ofp_experimenter_header. data (bytes): Experimenter defined data.
juraj-google-style
def subset(self, ns_uris): sub_ns = NamespaceSet() for ns_uri in ns_uris: ni = self.__lookup_uri(ns_uri) new_ni = copy.deepcopy(ni) sub_ns._NamespaceSet__add_namespaceinfo(new_ni) return sub_ns
Return a subset of this NamespaceSet containing only data for the given namespaces. Args: ns_uris (iterable): An iterable of namespace URIs which select the namespaces for the subset. Returns: The subset Raises: NamespaceNotFoundError: If any namespace URIs in `ns_uris` don't match any namespaces in this set.
juraj-google-style
def Head(num_classes=1000, classifier_activation=None, name=None): if name is None: name = str(backend.get_uid('head')) def apply(x): x = layers.GlobalAveragePooling2D(name=name + '_head_gap')(x) x = layers.LayerNormalization(epsilon=1e-06, name=name + '_head_layernorm')(x) x = layers.Dense(num_classes, activation=classifier_activation, name=name + '_head_dense')(x) return x return apply
Implementation of classification head of ConvNeXt. Args: num_classes: number of classes for Dense layer classifier_activation: activation function for the Dense layer name: name prefix Returns: Classification head function.
github-repos
def main(argv=None): if argv is None: argv = sys.argv args = parse_args(argv) logging.basicConfig(level=50 - args.verbosity * 10) backup = args.backup or None verbose = args.verbosity > 0 changed, errors = merge_pyi.merge_tree(py_path=args.py, pyi_path=args.pyi, backup=backup, verbose=verbose) if changed: print() print('Changed files:') for f in changed: print(' ', f) if errors: print() print('Errors:') for f, err in errors: print() print('File: ', f, err)
Merge source files and a pyi files in a project tree. Args: argv: Flags and files to process.
github-repos
def write(self, face, data, viewport=None, *, alignment=1) -> None: if type(data) is Buffer: data = data.mglo self.mglo.write(face, data, viewport, alignment)
Update the content of the texture. Args: face (int): The face to update. data (bytes): The pixel data. viewport (tuple): The viewport. Keyword Args: alignment (int): The byte alignment of the pixels.
juraj-google-style
def get_items(self, page=1, order_by=None, filters=None): start = ((page - 1) * self.per_page) query = self.get_query() if (order_by is not None): query = query.order_by(self._get_field(order_by)) if (filters is not None): query = self._filter(query, filters) return (query.offset(start).limit(self.per_page), self.count(query))
Fetch database for items matching. Args: page (int): which page will be sliced slice size is ``self.per_page``. order_by (str): a field name to order query by. filters (dict): a ``filter name``: ``value`` dict. Returns: tuple with: items, sliced by page*self.per_page total items without slice
codesearchnet
def find_contacts(self, geoms_1, geoms_2): for contact in self.sim.data.contact[0 : self.sim.data.ncon]: c1_in_g1 = self.sim.model.geom_id2name(contact.geom1) in geoms_1 c2_in_g2 = self.sim.model.geom_id2name(contact.geom2) in geoms_2 c2_in_g1 = self.sim.model.geom_id2name(contact.geom2) in geoms_1 c1_in_g2 = self.sim.model.geom_id2name(contact.geom1) in geoms_2 if (c1_in_g1 and c2_in_g2) or (c1_in_g2 and c2_in_g1): yield contact
Finds contact between two geom groups. Args: geoms_1: a list of geom names (string) geoms_2: another list of geom names (string) Returns: iterator of all contacts between @geoms_1 and @geoms_2
juraj-google-style
def from_json_keyfile_name(cls, filename, scopes='', token_uri=None, revoke_uri=None): with open(filename, 'r') as file_obj: client_credentials = json.load(file_obj) return cls._from_parsed_json_keyfile(client_credentials, scopes, token_uri=token_uri, revoke_uri=revoke_uri)
Factory constructor from JSON keyfile by name. Args: filename: string, The location of the keyfile. scopes: List or string, (Optional) Scopes to use when acquiring an access token. token_uri: string, URI for OAuth 2.0 provider token endpoint. If unset and not present in the key file, defaults to Google's endpoints. revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint. If unset and not present in the key file, defaults to Google's endpoints. Returns: ServiceAccountCredentials, a credentials object created from the keyfile. Raises: ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`. KeyError, if one of the expected keys is not present in the keyfile.
codesearchnet
def is_valid_geometry(geometry): if isinstance(geometry, Polygon) or isinstance(geometry, MultiPolygon): return True else: return False
Confirm that the geometry type is of type Polygon or MultiPolygon. Args: geometry (BaseGeometry): BaseGeometry instance (e.g. Polygon) Returns: bool
juraj-google-style
def _any_log_contains(self, substring: str, log_record_list: List['logging.LogRecord']) -> bool: return any(map(lambda log_record: substring in str(log_record.message), log_record_list))
Returns True if any of the log contains a given substring. Args: substring: A piece of string to check whether it exists in the log message. log_record_list: A list of `absl.logging.LogRecord`s. Returns: True if and only if the substring exists in any of the log in `log_record_list`.
github-repos
def load_assistant_model(model: 'PreTrainedModel', assistant_model: Optional[Union[str, 'PreTrainedModel']], assistant_tokenizer: Optional[PreTrainedTokenizer]) -> Tuple[Optional['PreTrainedModel'], Optional[PreTrainedTokenizer]]: if not model.can_generate() or assistant_model is None: return (None, None) if getattr(model, 'framework') != 'pt' or not isinstance(model, PreTrainedModel): raise ValueError('Assisted generation, triggered by the `assistant_model` argument, is only available for `PreTrainedModel` model instances. For instance, TF or JAX models are not supported.') if isinstance(assistant_model, str): assistant_config = AutoConfig.from_pretrained(assistant_model) _, loaded_assistant_model = infer_framework_load_model(assistant_model, config=assistant_config) loaded_assistant_model = loaded_assistant_model.to(device=model.device, dtype=model.dtype) loaded_assistant_tokenizer = AutoTokenizer.from_pretrained(assistant_model) else: loaded_assistant_model = assistant_model loaded_assistant_tokenizer = assistant_tokenizer same_vocab_size = model.config.vocab_size == loaded_assistant_model.config.vocab_size same_special_tokens = all((getattr(model.config, token) == getattr(loaded_assistant_model.config, token) for token in ('eos_token_id', 'pad_token_id', 'bos_token_id'))) if same_vocab_size and same_special_tokens: loaded_assistant_tokenizer = None elif loaded_assistant_tokenizer is None: raise ValueError('The assistant model has a different tokenizer than the main model. You should pass the assistant tokenizer.') return (loaded_assistant_model, loaded_assistant_tokenizer)
Prepares the assistant model and the assistant tokenizer for a pipeline whose model that can call `generate`. Args: model ([`PreTrainedModel`]): The main model that will be used by the pipeline to make predictions. assistant_model (`str` or [`PreTrainedModel`], *optional*): The assistant model that will be used by the pipeline to make predictions. assistant_tokenizer ([`PreTrainedTokenizer`], *optional*): The assistant tokenizer that will be used by the pipeline to encode data for the model. Returns: Tuple: The loaded assistant model and (optionally) the loaded tokenizer.
github-repos
def marcxml2mods(marc_xml, uuid, url): marc_xml = _read_content_or_path(marc_xml) return type_decisioner( marc_xml, lambda: transform_to_mods_mono(marc_xml, uuid, url), lambda: transform_to_mods_multimono(marc_xml, uuid, url), lambda: transform_to_mods_periodical(marc_xml, uuid, url), )
Convert `marc_xml` to MODS. Decide type of the record and what template to use (monograph, multi-monograph, periodical). Args: marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. uuid (str): UUID string giving the package ID. url (str): URL of the publication (public or not). Returns: list: Collection of transformed xml strings.
juraj-google-style
def getDocPath(fn, root=None): cwd = pathlib.Path(os.getcwd()) if root: cwd = pathlib.Path(root) while True: dpath = cwd.joinpath('docdata') if dpath.is_dir(): break parent = cwd.parent if (parent == cwd): raise ValueError(f'Unable to find data directory from {os.getcwd()}.') cwd = parent fpath = os.path.abspath(os.path.join(dpath.as_posix(), fn)) if (not fpath.startswith(dpath.as_posix())): raise ValueError(f'Path escaping detected: {fn}') if (not os.path.isfile(fpath)): raise ValueError(f'File does not exist: {fn}') return fpath
Helper for getting a documentation data file paths. Args: fn (str): Name of the file to retrieve the full path for. root (str): Optional root path to look for a docdata in. Notes: Defaults to looking for the ``docdata`` directory in the current working directory. This behavior works fine for notebooks nested in the docs directory of synapse; but this root directory that is looked for may be overridden by providing an alternative root. Returns: str: A file path. Raises: ValueError if the file does not exist or directory traversal attempted..
codesearchnet
def send_offer_update_email(self, user_email, subject, email_body, site_code=None): config = get_sailthru_configuration(site_code) _send_offer_assignment_notification_email(config, user_email, subject, email_body, site_code, self)
Sends the offer emails after assignment, either for revoking or reminding. Args: self: Ignore. user_email (str): Recipient's email address. subject (str): Email subject. email_body (str): The body of the email. site_code (str): Identifier of the site sending the email.
juraj-google-style
def init_cache(self, batch_size, max_length, encoder_outputs): decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4') decoder_attention_mask = jnp.ones_like(decoder_input_ids) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(decoder_input_ids, decoder_attention_mask, **kwargs) init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward) return unfreeze(init_variables['cache'])
Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
github-repos
def timeit(hosts=None, stmt=None, warmup=30, repeat=None, duration=None, concurrency=1, output_fmt=None, fail_if=None, sample_mode='reservoir'): num_lines = 0 log = Logger(output_fmt) with Runner(hosts, concurrency, sample_mode) as runner: version_info = aio.run(runner.client.get_server_version) for line in as_statements(lines_from_stdin(stmt)): runner.warmup(line, warmup) timed_stats = runner.run(line, iterations=repeat, duration=duration) r = Result(version_info=version_info, statement=line, timed_stats=timed_stats, concurrency=concurrency) log.result(r) if fail_if: eval_fail_if(fail_if, r) num_lines += 1 if (num_lines == 0): raise SystemExit('No SQL statements provided. Use --stmt or provide statements via stdin')
Run the given statement a number of times and return the runtime stats Args: fail-if: An expression that causes cr8 to exit with a failure if it evaluates to true. The expression can contain formatting expressions for: - runtime_stats - statement - meta - concurrency - bulk_size For example: --fail-if "{runtime_stats.mean} > 1.34"
codesearchnet
def get_yield_stress(self, n): comp = root(self.get_stability_criteria, -1, args=n) tens = root(self.get_stability_criteria, 1, args=n) return (comp.x, tens.x)
Gets the yield stress for a given direction Args: n (3x1 array-like): direction for which to find the yield stress
juraj-google-style
def get_osx_config(browser: str) -> dict: if browser.lower() == 'chrome': cookie_file = ('~/Library/Application Support/Google/Chrome/Default/' 'Cookies') elif browser.lower() == "chromium": cookie_file = '~/Library/Application Support/Chromium/Default/Cookies' else: raise ValueError("Browser must be either Chrome or Chromium.") config = { 'my_pass': keyring.get_password( '{} Safe Storage'.format(browser), browser), 'iterations': 1003, 'cookie_file': cookie_file, } return config
Get settings for getting Chrome/Chromium cookies on OSX. Args: browser: Either "Chrome" or "Chromium" Returns: Config dictionary for Chrome/Chromium cookie decryption
juraj-google-style
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0): if (kmip_version < enums.KMIPVersion.KMIP_2_0): raise exceptions.VersionNotSupported('KMIP {} does not support the DefaultsInformation object.'.format(kmip_version.value)) super(DefaultsInformation, self).read(input_buffer, kmip_version=kmip_version) local_buffer = utils.BytearrayStream(input_buffer.read(self.length)) object_defaults = [] while self.is_tag_next(enums.Tags.OBJECT_DEFAULTS, local_buffer): object_default = ObjectDefaults() object_default.read(local_buffer, kmip_version=kmip_version) object_defaults.append(object_default) if (len(object_defaults) == 0): raise exceptions.InvalidKmipEncoding('The DefaultsInformation encoding is missing the object defaults structure.') else: self._object_defaults = object_defaults self.is_oversized(local_buffer)
Read the data encoding the DefaultsInformation structure and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 2.0. Raises: InvalidKmipEncoding: Raised if the object defaults are missing from the encoding. VersionNotSupported: Raised when a KMIP version is provided that does not support the DefaultsInformation structure.
codesearchnet
def eval(self, expr, **kwargs): columns = (self.index if self._is_transposed else self.columns) index = (self.columns if self._is_transposed else self.index) columns_copy = pandas.DataFrame(columns=self.columns) columns_copy = columns_copy.eval(expr, inplace=False, **kwargs) expect_series = isinstance(columns_copy, pandas.Series) def eval_builder(df, **kwargs): kwargs.pop('axis', None) df.columns = columns result = df.eval(expr, inplace=False, **kwargs) return result func = self._build_mapreduce_func(eval_builder, axis=1, **kwargs) new_data = self._map_across_full_axis(1, func) if expect_series: new_columns = [columns_copy.name] new_index = index else: new_columns = columns_copy.columns new_index = self.index return self.__constructor__(new_data, new_index, new_columns)
Returns a new QueryCompiler with expr evaluated on columns. Args: expr: The string expression to evaluate. Returns: A new QueryCompiler with new columns after applying expr.
codesearchnet
def _parseDOM(istack): ostack = [] end_tag_index = 0 def neither_nonpair_or_end_or_comment(el): return (not (el.isNonPairTag() or el.isEndTag() or el.isComment())) index = 0 while (index < len(istack)): el = istack[index] end_tag_index = _indexOfEndTag(istack[index:]) if ((end_tag_index == 0) and neither_nonpair_or_end_or_comment(el)): el.isNonPairTag(True) if (end_tag_index == 0): if (not el.isEndTag()): ostack.append(el) else: el.childs = _parseDOM(istack[(index + 1):(end_tag_index + index)]) el.endtag = istack[(end_tag_index + index)] el.endtag.openertag = el ostack.append(el) ostack.append(el.endtag) index = (end_tag_index + index) index += 1 return ostack
Recursively go through element array and create DOM. Args: istack (list): List of :class:`.HTMLElement` objects. Returns: list: DOM tree as list.
codesearchnet
def test_gradient(self, shape, rt_value, rt_grad, default_value, default_grad, output_value, output_grad, ragged_rank=None): rt_value = ragged_factory_ops.constant(rt_value, dtype=dtypes.float32, ragged_rank=ragged_rank) rt_grad = ragged_factory_ops.constant(rt_grad, dtype=dtypes.float32, ragged_rank=ragged_rank) default_value = constant_op.constant(default_value, dtype=dtypes.float32) default_grad = constant_op.constant(default_grad, dtype=dtypes.float32) output_value = constant_op.constant(output_value, dtype=dtypes.float32, shape=shape) output_grad = constant_op.constant(output_grad, dtype=dtypes.float32, shape=shape) shape = tensor_shape.as_shape(shape) for partition_type in ['row_splits', 'value_rowids']: rt_val = self.rt_with_partition_type(rt_value, partition_type) if context.executing_eagerly(): self._test_gradient_helper(rt_val, default_value, shape, output_grad, output_value, rt_grad, default_grad) else: for shape_info in ['known', 'unknown_dims', 'unknown_rank']: rt_val = self.wrap_in_placeholder(rt_val, shape_info) default_val = self.wrap_in_placeholder(default_value, shape_info) shape_val = self.wrap_in_placeholder(shape, shape_info) self._test_gradient_helper(rt_val, default_val, shape_val, output_grad, output_value, rt_grad, default_grad)
Tests that ragged_to_dense generates the right gradient. Args: shape: The `shape` arg for `ragged_to_dense`. rt_value: The `rt_input` arg for `ragged_to_dense`. rt_grad: The expected gradient for `rt_value`. Corresponds 1:1 with `rt_value`. default_value: The `default_value` arg for `ragged_to_dense`. default_grad: The expected gradient for `default_value`. Corresponds 1:1 with `default_value`. output_value: The expected output of `ragged_to_dense`. output_grad: The gradient for the output (used to generate the gradients `rt_grad` and `default_grad`). Corresponds 1:1 with `output_value`. ragged_rank: Ragged rank for `rt_value`.
github-repos
def remove_waiter(self, waiter_handle): (spec, waiter) = waiter_handle self._remove_waiter(spec, waiter)
Remove a message callback. This call will remove a callback previously registered using every_match. Args: waiter_handle (object): The opaque handle returned by the previous call to every_match().
codesearchnet
def get_object(cls, api_token, ip): floating_ip = cls(token=api_token, ip=ip) floating_ip.load() return floating_ip
Class method that will return a FloatingIP object by its IP. Args: api_token: str - token ip: str - floating ip address
juraj-google-style
def scale_stoichiometry(self, scaling): return {k: (v * scaling) for (k, v) in self.stoichiometry.items()}
Scale the Calculation stoichiometry Returns the stoichiometry, scaled by the argument scaling. Args: scaling (float): The scaling factor. Returns: (Counter(Str:Int)): The scaled stoichiometry as a Counter of label: stoichiometry pairs
codesearchnet
def delete_entity(self, etype, entity_id): r = fapi.delete_entity(self.namespace, self.name, etype, entity_id, self.api_url) fapi._check_response_code(r, 202)
Delete an entity in this workspace. Args: etype (str): Entity type entity_id (str): Entity name/unique id
juraj-google-style
def test_sample_paths_2d(self, random_type, seed): mu = np.array([0.2, 0.7]) a = np.array([[0.4, 0.1], [0.3, 0.2]]) b = np.array([[0.33, -0.03], [0.21, 0.5]]) def drift_fn(t, x): return mu * tf.sqrt(t) * tf.ones_like(x, dtype=t.dtype) def vol_fn(t, x): del x return (a * t + b) * tf.ones([2, 2], dtype=t.dtype) num_samples = 10000 times = np.array([0.1, 0.21, 0.32, 0.43, 0.55]) x0 = np.array([0.1, -1.1]) paths = self.evaluate(euler_sampling.sample(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn, times=times, num_samples=num_samples, initial_state=x0, time_step=0.01, random_type=random_type, seed=seed)) self.assertAllClose(paths.shape, (num_samples, 5, 2), atol=0) means = np.mean(paths, axis=0) times = np.reshape(times, [-1, 1]) expected_means = x0 + 2.0 / 3.0 * mu * np.power(times, 1.5) self.assertAllClose(means, expected_means, rtol=0.01, atol=0.01)
Tests path properties for 2-dimentional Ito process. We construct the following Ito processes. dX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2 dX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2 mu_1, mu_2 are constants. s_ij = a_ij t + b_ij For this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5. Args: random_type: Random number type defined by tff.math.random.RandomType enum. seed: Random seed.
github-repos
def verify_loop_init_vars(init_vars, symbol_names, first_iter_vars=None, extra_message=None): if not symbol_names: return if first_iter_vars is None: first_iter_vars = (None,) * len(symbol_names) assert len(symbol_names) == len(init_vars) assert len(symbol_names) == len(first_iter_vars) for name, val, fi_val in zip(symbol_names, init_vars, first_iter_vars): if isinstance(val, variables.UndefinedReturnValue): if fi_val: raise ValueError('the return value from a TensorFlow loop may only be a {}; got {}'.format(LEGAL_LOOP_TYPES, type(fi_val))) else: raise NotImplementedError('a return statement cannot be placed inside this TensorFlow loop; this may happen if a return statement depends on a static Python condition such as a hyperparameter') error_msg = None if val is None: error_msg = "'{}' is not allowed to be None before the loop".format(name) elif isinstance(val, variables.Undefined): error_msg = "'{}' must be defined before the loop".format(name) if extra_message: error_msg += '\n' + extra_message if error_msg is not None: raise ValueError(error_msg)
Ensures that all values in the state are valid to use in a TF loop. The init_vars may contain placeholder values derived from first_iter_vars. Args: init_vars: initial loop variables (as taken before entering the loop) symbol_names: corresponding names of the initial loop variables first_iter_vars: loop variables after one iteration of the loop extra_message: an extra string to append to the error message, in case of "undefined variable" errors (see variables.Undefined)
github-repos
def _build_map(outputs): finished_nodes = set() nodes_in_progress = set() nodes_in_decreasing_depth = [] layer_indices = {} for output in nest.flatten(outputs): _build_map_helper(output, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices) return (nodes_in_decreasing_depth, layer_indices)
This method topologically sorts nodes in order from inputs to outputs. It uses a depth-first search to topologically sort nodes that appear in the _keras_history connectivity metadata of `outputs`. Args: outputs: the output tensors whose _keras_history metadata should be walked. This may be an arbitrary nested structure. Returns: A tuple like (ordered_nodes, layer_to_first_traversal_index) ordered_nodes: list of nodes appearing in the keras history, topologically sorted from original inputs to the `outputs`. (If outputs have different sets of ancestors, the inputs to one output may appear after a different output). layer_to_first_traversal_index: A dict mapping layer to the traversal index in the DFS where it is seen. Note: if a layer is shared by several nodes, the dict will only store the index corresponding to the *first* time the layer seen.
github-repos
def _load_and_verify_metadata(self, submission_type): metadata_filename = os.path.join(self._extracted_submission_dir, 'metadata.json') if (not os.path.isfile(metadata_filename)): logging.error('metadata.json not found') return None try: with open(metadata_filename, 'r') as f: metadata = json.load(f) except IOError as e: logging.error('Failed to load metadata: %s', e) return None for field_name in REQUIRED_METADATA_JSON_FIELDS: if (field_name not in metadata): logging.error('Field %s not found in metadata', field_name) return None if (submission_type != metadata['type']): logging.error('Invalid submission type in metadata, expected "%s", actual "%s"', submission_type, metadata['type']) return None entry_point = metadata['entry_point'] if (not os.path.isfile(os.path.join(self._extracted_submission_dir, entry_point))): logging.error('Entry point not found: %s', entry_point) return None if (not entry_point.endswith('.sh')): logging.warning('Entry point is not an .sh script. This is not necessarily a problem, but if submission wont run double check entry point first: %s', entry_point) return metadata
Loads and verifies metadata. Args: submission_type: type of the submission Returns: dictionaty with metadata or None if metadata not found or invalid
codesearchnet
async def _async_wait_for_process( future_process: Any, out: Optional[Union[TeeCapture, IO[str]]] = sys.stdout, err: Optional[Union[TeeCapture, IO[str]]] = sys.stderr ) -> CommandOutput: process = await future_process future_output = _async_forward(process.stdout, out) future_err_output = _async_forward(process.stderr, err) output, err_output = await asyncio.gather(future_output, future_err_output) await process.wait() return CommandOutput(output, err_output, process.returncode)
Awaits the creation and completion of an asynchronous process. Args: future_process: The eventually created process. out: Where to write stuff emitted by the process' stdout. err: Where to write stuff emitted by the process' stderr. Returns: A (captured output, captured error output, return code) triplet.
juraj-google-style
def get_database_info(db_uri): if (not db_uri): return (None, None) scheme = urlparse.urlparse(db_uri).scheme if (scheme == 'sqlite'): return (sqlite3, create_sqlite_connection_provider(db_uri)) else: raise ValueError(('Only sqlite DB URIs are supported now: ' + db_uri))
Returns TBContext fields relating to SQL database. Args: db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db". Returns: A tuple with the db_module and db_connection_provider TBContext fields. If db_uri was empty, then (None, None) is returned. Raises: ValueError: If db_uri scheme is not supported.
codesearchnet
def __init__(self, s): self.txt = s self._family = self._extract_family(s) self.regex = re.compile(s)
Create a regex rule. Args: s (str): Regex pattern. Eg '.*\\.beta$'.
juraj-google-style
def post_process_depth_estimation(self, outputs: 'DepthEstimatorOutput', target_sizes: Optional[Union[TensorType, List[Tuple[int, int]], None]]=None) -> List[Dict[str, TensorType]]: requires_backends(self, 'torch') predicted_depth = outputs.predicted_depth if target_sizes is not None and len(predicted_depth) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the predicted depth') results = [] target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes for depth, target_size in zip(predicted_depth, target_sizes): if target_size is not None: depth = torch.nn.functional.interpolate(depth.unsqueeze(0).unsqueeze(1), size=target_size, mode='bicubic', align_corners=False).squeeze() results.append({'predicted_depth': depth}) return results
Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images. Only supports PyTorch. Args: outputs ([`DepthEstimatorOutput`]): Raw outputs of the model. target_sizes (`TensorType` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth predictions.
github-repos
def get_list_store(data_frame): df_py_dtypes = get_py_dtypes(data_frame) list_store = gtk.ListStore(*df_py_dtypes.dtype) for i, row_i in data_frame.iterrows(): list_store.append(row_i.tolist()) return df_py_dtypes, list_store
Return a `pandas.DataFrame` containing Python type information for the columns in `data_frame` and a `gtk.ListStore` matching the contents of the data frame. Args: data_frame (pandas.DataFrame) : Data frame containing data columns. Returns: (tuple) : The first element is a data frame as returned by `get_py_dtypes` and the second element is a `gtk.ListStore` matching the contents of the data frame.
juraj-google-style
def make_mixture_prior(latent_size, mixture_components): if mixture_components == 1: return tfd.MultivariateNormalDiag( loc=tf.zeros([latent_size]), scale_identity_multiplier=1.0) loc = tf.compat.v1.get_variable( name="loc", shape=[mixture_components, latent_size]) raw_scale_diag = tf.compat.v1.get_variable( name="raw_scale_diag", shape=[mixture_components, latent_size]) mixture_logits = tf.compat.v1.get_variable( name="mixture_logits", shape=[mixture_components]) return tfd.MixtureSameFamily( components_distribution=tfd.MultivariateNormalDiag( loc=loc, scale_diag=tf.nn.softplus(raw_scale_diag)), mixture_distribution=tfd.Categorical(logits=mixture_logits), name="prior")
Creates the mixture of Gaussians prior distribution. Args: latent_size: The dimensionality of the latent representation. mixture_components: Number of elements of the mixture. Returns: random_prior: A `tfd.Distribution` instance representing the distribution over encodings in the absence of any evidence.
juraj-google-style
def obtain_token(self, config=None): client_application = CLIENT_APPLICATION if (self.config and ('client_application' in self.config)): client_application += (':' + self.config['client_application']) headers = {'x-qx-client-application': client_application} if self.token_unique: try: response = requests.post(str((self.config.get('url') + '/users/loginWithToken')), data={'apiToken': self.token_unique}, verify=self.verify, headers=headers, **self.extra_args) except requests.RequestException as e: raise ApiError(('error during login: %s' % str(e))) elif (config and ('email' in config) and ('password' in config)): email = config.get('email', None) password = config.get('password', None) credentials = {'email': email, 'password': password} try: response = requests.post(str((self.config.get('url') + '/users/login')), data=credentials, verify=self.verify, headers=headers, **self.extra_args) except requests.RequestException as e: raise ApiError(('error during login: %s' % str(e))) else: raise CredentialsError('invalid token') if (response.status_code == 401): error_message = None try: error_message = response.json()['error']['message'] except: pass if error_message: raise CredentialsError(('error during login: %s' % error_message)) else: raise CredentialsError('invalid token') try: response.raise_for_status() self.data_credentials = response.json() except (requests.HTTPError, ValueError) as e: raise ApiError(('error during login: %s' % str(e))) if (self.get_token() is None): raise CredentialsError('invalid token')
Obtain the token to access to QX Platform. Raises: CredentialsError: when token is invalid or the user has not accepted the license. ApiError: when the response from the server couldn't be parsed.
codesearchnet
def expects_none(options): if any(options.get(key) is not None for key in ["count", "maximum", "minimum", "between"]): return matches_count(0, options) else: return False
Returns whether the given query options expect a possible count of zero. Args: options (Dict[str, int | Iterable[int]]): A dictionary of query options. Returns: bool: Whether a possible count of zero is expected.
juraj-google-style
def reverse_axis(self, axis_to_reverse): if axis_to_reverse.lower() == 'x': self.general.reverse_x_axis = True if axis_to_reverse.lower() == 'y': self.general.reverse_y_axis = True if axis_to_reverse.lower() != 'x' or axis_to_reverse.lower() != 'y': raise ValueError('Axis for reversing needs to be either x or y.') return
Reverse an axis in all figure plots. This will reverse the tick marks on an axis for each plot in the figure. It can be overridden in SinglePlot class. Args: axis_to_reverse (str): Axis to reverse. Supports `x` and `y`. Raises: ValueError: The string representing the axis to reverse is not `x` or `y`.
juraj-google-style
def GetMessages(self, files): result = {} for f in files: result.update(self._symbols_by_file[f]) return result
Gets all the messages from a specified file. This will find and resolve dependencies, failing if they are not registered in the symbol database. Args: files: The file names to extract messages from. Returns: A dictionary mapping proto names to the message classes. This will include any dependent messages as well as any messages defined in the same file as a specified message. Raises: KeyError: if a file could not be found.
juraj-google-style
def delete_user(self, email): LOG.info("Deleting user %s", email) user_obj = self.user_collection.delete_one({'_id': email}) return user_obj
Delete a user from the database Args: email(str) Returns: user_obj(dict)
juraj-google-style
def default(self, value): if isinstance(value, messages.Enum): return str(value) if six.PY3 and isinstance(value, bytes): return value.decode('utf8') if isinstance(value, messages.Message): result = {} for field in value.all_fields(): item = value.get_assigned_value(field.name) if item not in (None, [], ()): result[field.name] = ( self.__protojson_protocol.encode_field(field, item)) for unknown_key in value.all_unrecognized_fields(): unrecognized_field, _ = value.get_unrecognized_field_info( unknown_key) result[unknown_key] = unrecognized_field return result return super(MessageJSONEncoder, self).default(value)
Return dictionary instance from a message object. Args: value: Value to get dictionary for. If not encodable, will call superclasses default method.
juraj-google-style
def ConfigureLogging(debug_output=False, filename=None, mode='w', quiet_mode=False): for handler in logging.root.handlers: logging.root.removeHandler(handler) logger = logging.getLogger() if (filename and filename.endswith('.gz')): handler = CompressedFileHandler(filename, mode=mode) elif filename: handler = logging.FileHandler(filename, mode=mode) else: handler = logging.StreamHandler() format_string = '%(asctime)s [%(levelname)s] (%(processName)-10s) PID:%(process)d <%(module)s> %(message)s' formatter = logging.Formatter(format_string) handler.setFormatter(formatter) if debug_output: level = logging.DEBUG elif quiet_mode: level = logging.WARNING else: level = logging.INFO logger.setLevel(level) handler.setLevel(level) logger.addHandler(handler)
Configures the logging root logger. Args: debug_output (Optional[bool]): True if the logging should include debug output. filename (Optional[str]): log filename. mode (Optional[str]): log file access mode. quiet_mode (Optional[bool]): True if the logging should not include information output. Note that debug_output takes precedence over quiet_mode.
codesearchnet