code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def save(self, new_path=None): self.saved_in_temp = new_path is None if new_path is None: fd, new_path = tempfile.mkstemp() os.close(fd) if self.current_path: shutil.move(self.current_path, new_path) else: with open(new_path, 'wb') as dest: _copy_stream(self._data, dest, self._size) self.current_path = new_path
Moves or creates the file with stream contents to a new location. Args: new_path: path to move to, if None a temporary file is created.
juraj-google-style
def analyze_step_stats(self, show_dataflow: bool=True, show_memory: bool=True, op_time: str='schedule') -> StepStatsAnalysis: self._preprocess_op_time(op_time) self._allocate_pids() self._assign_lanes() self._analyze_tensors(show_memory) self._show_compute(show_dataflow) if show_memory: self._show_memory_counters() return StepStatsAnalysis(chrome_trace=self._chrome_trace, allocator_maximums=self._allocator_maximums)
Analyze the step stats and format it into Chrome Trace Format. Args: show_dataflow: (Optional.) If True, add flow events to the trace connecting producers and consumers of tensors. show_memory: (Optional.) If True, add object snapshot events to the trace showing the sizes and lifetimes of tensors. op_time: (Optional.) How the execution time of op is shown in timeline. Possible values are "schedule", "gpu" and "all". "schedule" will show op from the time it is scheduled to the end of the scheduling. Notice by the end of its scheduling its async kernels may not start yet. It is shown using the default value from step_stats. "gpu" will show op with the execution time of its kernels on GPU. "all" will show op from the start of its scheduling to the end of its last kernel. Returns: A 'StepStatsAnalysis' object.
github-repos
def notify_program_learners(cls, enterprise_customer, program_details, users): program_name = program_details.get('title') program_branding = program_details.get('type') program_uuid = program_details.get('uuid') lms_root_url = get_configuration_value_for_site( enterprise_customer.site, 'LMS_ROOT_URL', settings.LMS_ROOT_URL ) program_path = urlquote( '/dashboard/programs/{program_uuid}/?tpa_hint={tpa_hint}'.format( program_uuid=program_uuid, tpa_hint=enterprise_customer.identity_provider, ) ) destination_url = '{site}/{login_or_register}?next={program_path}'.format( site=lms_root_url, login_or_register='{login_or_register}', program_path=program_path ) program_type = 'program' program_start = get_earliest_start_date_from_program(program_details) with mail.get_connection() as email_conn: for user in users: login_or_register = 'register' if isinstance(user, PendingEnterpriseCustomerUser) else 'login' destination_url = destination_url.format(login_or_register=login_or_register) send_email_notification_message( user=user, enrolled_in={ 'name': program_name, 'url': destination_url, 'type': program_type, 'start': program_start, 'branding': program_branding, }, enterprise_customer=enterprise_customer, email_connection=email_conn )
Notify learners about a program in which they've been enrolled. Args: enterprise_customer: The EnterpriseCustomer being linked to program_details: Details about the specific program the learners were enrolled in users: An iterable of the users or pending users who were enrolled
juraj-google-style
def encipher_shift(plaintext, plain_vocab, shift): ciphertext = [] cipher = ShiftEncryptionLayer(plain_vocab, shift) for (_, sentence) in enumerate(plaintext): cipher_sentence = [] for (_, character) in enumerate(sentence): encrypted_char = cipher.encrypt_character(character) cipher_sentence.append(encrypted_char) ciphertext.append(cipher_sentence) return ciphertext
Encrypt plain text with a single shift layer. Args: plaintext (list of list of Strings): a list of plain text to encrypt. plain_vocab (list of Integer): unique vocabularies being used. shift (Integer): number of shift, shift to the right if shift is positive. Returns: ciphertext (list of Strings): encrypted plain text.
codesearchnet
def format_filter_value(self, element, value): format_func = self.allowed_filter.get(element) return format_func(value)
Calls the specific function to format value, depending on the given element. Arguments: element (string): The element of the VT to be formatted. value (dictionary): The element value. Returns: Returns a formatted value.
codesearchnet
def frombytes(data, size, bandtype=gdal.GDT_Byte): r = ImageDriver('MEM').raster('', size, bandtype) r.frombytes(data) return r
Returns an in-memory raster initialized from a pixel buffer. Arguments: data -- byte buffer of raw pixel data size -- two or three-tuple of (xsize, ysize, bandcount) bandtype -- band data type
juraj-google-style
def get_by_ip_hostname(self, ip_hostname): resources = self._client.get_all() resources_filtered = [x for x in resources if x['credentials']['ip_hostname'] == ip_hostname] if resources_filtered: return resources_filtered[0] else: return None
Retrieve a storage system by its IP. Works only with API version <= 300. Args: ip_hostname: Storage system IP or hostname. Returns: dict
juraj-google-style
def run_tac(model_path, targets, output_path): if not model_path: raise ValueError('Invalid model_path.') if not targets: raise ValueError('Targets are not specified.') if not output_path: raise ValueError('Invalid output_path.') return _pywrap_tac_wrapper.run_tac(model_path, targets, output_path)
Run target aware conversion for the given tflite model file. Args: model_path: Path to the tflite model file. targets: A list of string of the desired targets. E.g., ['GPU', 'CPU']. output_path: The output path. Returns: Whether the optimization succeeded. Raises: ValueError: Invalid model_path. Targets are not specified. Invalid output_path.
github-repos
def make_m_psd(self, original_nu, feed_dictionary): feed_dict = feed_dictionary.copy() _, min_eig_val_m = self.get_lanczos_eig(compute_m=True, feed_dict=feed_dict) lower_nu = original_nu upper_nu = original_nu num_iter = 0 while min_eig_val_m - TOL < 0 and num_iter < (MAX_BINARY_SEARCH_ITER / 2): num_iter += 1 upper_nu *= NU_UPDATE_CONSTANT feed_dict.update({self.nu: upper_nu}) _, min_eig_val_m = self.get_lanczos_eig(compute_m=True, feed_dict=feed_dict) final_nu = upper_nu while lower_nu <= upper_nu and num_iter < MAX_BINARY_SEARCH_ITER: num_iter += 1 mid_nu = (lower_nu + upper_nu) / 2 feed_dict.update({self.nu: mid_nu}) _, min_eig_val_m = self.get_lanczos_eig(compute_m=True, feed_dict=feed_dict) if min_eig_val_m - TOL < 0: lower_nu = mid_nu else: upper_nu = mid_nu final_nu = upper_nu return final_nu
Run binary search to find a value for nu that makes M PSD Args: original_nu: starting value of nu to do binary search on feed_dictionary: dictionary of updated lambda variables to feed into M Returns: new_nu: new value of nu
juraj-google-style
def delta_hv(scatterer): Z = scatterer.get_Z() return np.arctan2(Z[2,3] - Z[3,2], -Z[2,2] - Z[3,3])
Delta_hv for the current setup. Args: scatterer: a Scatterer instance. Returns: Delta_hv [rad].
juraj-google-style
def nic_v2(msg, NICa, NICbc): if ((typecode(msg) < 5) or (typecode(msg) > 22)): raise RuntimeError(('%s: Not a surface position message (5<TC<8), airborne position message (8<TC<19), or airborne position with GNSS height (20<TC<22)' % msg)) tc = typecode(msg) NIC = uncertainty.TC_NICv2_lookup[tc] if (20 <= tc <= 22): NICs = 0 else: NICs = ((NICa * 2) + NICbc) try: if isinstance(NIC, dict): NIC = NIC[NICs] Rc = uncertainty.NICv2[NIC][NICs]['Rc'] except KeyError: Rc = uncertainty.NA return Rc
Calculate NIC, navigation integrity category, for ADS-B version 2 Args: msg (string): 28 bytes hexadecimal message string NICa (int or string): NIC supplement - A NICbc (int or srting): NIC supplement - B or C Returns: int or string: Horizontal Radius of Containment
codesearchnet
def orient_undirected_graph(self, data, graph): self.arguments['{VERBOSE}'] = str(self.verbose).upper() self.arguments['{SCORE}'] = self.score self.arguments['{BETA}'] = str(self.beta) self.arguments['{OPTIM}'] = str(self.optim).upper() self.arguments['{ALPHA}'] = str(self.alpha) whitelist = DataFrame(list(nx.edges(graph)), columns=['from', 'to']) blacklist = DataFrame(list(nx.edges(nx.DiGraph(DataFrame(((- nx.adj_matrix(graph, weight=None).to_dense()) + 1), columns=list(graph.nodes()), index=list(graph.nodes()))))), columns=['from', 'to']) results = self._run_bnlearn(data, whitelist=whitelist, blacklist=blacklist, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for (idx, i) in enumerate(data.columns)})
Run the algorithm on an undirected graph. Args: data (pandas.DataFrame): DataFrame containing the data graph (networkx.Graph): Skeleton of the graph to orient Returns: networkx.DiGraph: Solution on the given skeleton.
codesearchnet
def calculate_weighted_avg(bonds): minimum_bond = min(bonds) weighted_sum = 0.0 total_sum = 0.0 for entry in bonds: weighted_sum += (entry * exp((1 - ((entry / minimum_bond) ** 6)))) total_sum += exp((1 - ((entry / minimum_bond) ** 6))) return (weighted_sum / total_sum)
Returns the weighted average bond length given by Hoppe's effective coordination number formula. Args: bonds (list): list of floats that are the bond distances between a cation and its peripheral ions
codesearchnet
def merge_entries(self, source_entry): for list_attr in source_entry.attrs.values(): for attr in list_attr: self.attrs[attr.header.attr_type_id].append(attr) for stream in source_entry.data_streams: dest_stream = self._find_datastream(stream.name) if (dest_stream is not None): dest_stream.add_from_datastream(stream) else: self.data_streams.append(stream)
Merge two entries. Allow the merging of two MFTEntries copying the attributes to the correct place and the datastreams. Args: source_entry (:obj:`MFTEntry`) - Source entry where the data will be copied from
codesearchnet
def probabilities(input_energy: energy.BitstringEnergy): all_bitstrings = tf.constant(list(itertools.product([0, 1], repeat=input_energy.num_bits)), dtype=tf.int8) all_energies = input_energy(all_bitstrings) energy_exp = tf.math.exp(-all_energies) partition = tf.math.reduce_sum(energy_exp) return energy_exp / partition
Returns the probabilities of the EBM. Args: input_energy: The energy function defining the EBM.
github-repos
def get_cohp_by_label(self, label): if (label.lower() == 'average'): return Cohp(efermi=self.efermi, energies=self.energies, cohp=self.cohp, are_coops=self.are_coops, icohp=self.icohp) else: try: return Cohp(efermi=self.efermi, energies=self.energies, cohp=self.all_cohps[label].get_cohp(spin=None, integrated=False), are_coops=self.are_coops, icohp=self.all_cohps[label].get_icohp(spin=None)) except KeyError: print('The label does not exist')
Get specific COHP object. Args: label: string (for newer Lobster versions: a number) Returns: Returns the COHP object to simplify plotting
codesearchnet
def getContextsForTerm(self, retina_name, term, get_fingerprint=None, start_index=0, max_results=5): resourcePath = '/terms/contexts' method = 'GET' queryParams = {} headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'} postData = None queryParams['retina_name'] = retina_name queryParams['term'] = term queryParams['start_index'] = start_index queryParams['max_results'] = max_results queryParams['get_fingerprint'] = get_fingerprint response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams) return [context.Context(**r) for r in response.json()]
Get the contexts for a given term Args: retina_name, str: The retina name (required) term, str: A term in the retina (required) get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) start_index, int: The start-index for pagination (optional) (optional) max_results, int: Max results per page (optional) (optional) Returns: Array[Context]
juraj-google-style
def add_tensor_filter(self, filter_name, filter_callable): if not isinstance(filter_name, str): raise TypeError('Input argument filter_name is expected to be str, but is not.') if not filter_name: raise ValueError('Input argument filter_name cannot be empty.') if not callable(filter_callable): raise TypeError('Input argument filter_callable is expected to be callable, but is not.') self._tensor_filters[filter_name] = filter_callable
Add a tensor filter. A tensor filter is a named callable of the signature: filter_callable(dump_datum, tensor), wherein dump_datum is an instance of debug_data.DebugTensorDatum carrying metadata about the dumped tensor, including tensor name, timestamps, etc. tensor is the value of the dumped tensor as an numpy.ndarray object. The return value of the function is a bool. This is the same signature as the input argument to debug_data.DebugDumpDir.find(). Args: filter_name: (str) name of the filter. Cannot be empty. filter_callable: (callable) a filter function of the signature described as above. Raises: ValueError: If filter_name is an empty str. TypeError: If filter_name is not a str. Or if filter_callable is not callable.
github-repos
def GetTypeChecker(field): if ((field.cpp_type == _FieldDescriptor.CPPTYPE_STRING) and (field.type == _FieldDescriptor.TYPE_STRING)): return UnicodeValueChecker() if (field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM): if SupportsOpenEnums(field): return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32] else: return EnumValueChecker(field.enum_type) return _VALUE_CHECKERS[field.cpp_type]
Returns a type checker for a message field of the specified types. Args: field: FieldDescriptor object for this field. Returns: An instance of TypeChecker which can be used to verify the types of values assigned to a field of the specified type.
codesearchnet
def value_loss_given_predictions(value_prediction, rewards, reward_mask, gamma=0.99): B, T = rewards.shape assert (B, T) == reward_mask.shape assert (B, T + 1, 1) == value_prediction.shape value_prediction = np.squeeze(value_prediction, axis=2) value_prediction = value_prediction[:, :-1] * reward_mask r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) loss = (value_prediction - r2g)**2 return np.sum(loss) / np.sum(reward_mask)
Computes the value loss given the prediction of the value function. Args: value_prediction: np.ndarray of shape (B, T+1, 1) rewards: np.ndarray of shape (B, T) of rewards. reward_mask: np.ndarray of shape (B, T), the mask over rewards. gamma: float, discount factor. Returns: The average L2 value loss, averaged over instances where reward_mask is 1.
juraj-google-style
def dbExec(self, query_str): try: connection = sqlite3.connect(self.m_connection_string) cursor = connection.cursor() cursor.execute(query_str) connection.commit() cursor.close() connection.close() return True except: ekm_log(traceback.format_exc(sys.exc_info())) return False pass
Required override of dbExec() from MeterDB(), run query. Args: query_str (str): query to run
juraj-google-style
def fprime(self, w, *args): x0 = args[0] x1 = args[1] n0 = x0.shape[0] n1 = x1.shape[0] n = (max(n0, n1) * 10) idx0 = np.random.choice(range(n0), size=n) idx1 = np.random.choice(range(n1), size=n) b = np.ones((n, 1)) i1 = (self.i + 1) h = self.h h1 = (h + 1) w2 = w[(- h1):].reshape(h1, 1) w1 = w[:(- h1)].reshape(i1, h) if sparse.issparse(x0): x0 = x0.tocsr()[idx0] x1 = x1.tocsr()[idx1] xb0 = sparse.hstack((x0, b)) xb1 = sparse.hstack((x1, b)) else: x0 = x0[idx0] x1 = x1[idx1] xb0 = np.hstack((x0, b)) xb1 = np.hstack((x1, b)) z0 = np.hstack((sigm(xb0.dot(w1)), b)) z1 = np.hstack((sigm(xb1.dot(w1)), b)) y0 = z0.dot(w2) y1 = z1.dot(w2) e = (1 - (y1 - y0)) dy = (e / n) dw1 = ((- (xb1.T.dot((dy.dot(w2[:(- 1)].reshape(1, h)) * dsigm(xb1.dot(w1)))) - xb0.T.dot((dy.dot(w2[:(- 1)].reshape(1, h)) * dsigm(xb0.dot(w1))))).reshape((i1 * h))) + ((self.l1 * w[:(- h1)]) / (i1 * h))) dw2 = ((- (z1 - z0).T.dot(dy).reshape(h1)) + ((self.l2 * w[(- h1):]) / h1)) return np.append(dw1, dw2)
Return the derivatives of the cost function for predictions. Args: w (array of float): weight vectors such that: w[:-h1] -- weights between the input and h layers w[-h1:] -- weights between the h and output layers args: features (args[0]) and target (args[1]) Returns: gradients of the cost function for predictions
codesearchnet
def _check_debug_tensor_value(self, tensor_debug_mode, debug_tensor_value, wall_time, op_type, output_slot, execution_index=None, graph_execution_trace_index=None): assert tensor_debug_mode != debug_event_pb2.TensorDebugMode.FULL_TENSOR if not debug_tensor_value: return if tensor_debug_mode == debug_event_pb2.TensorDebugMode.CURT_HEALTH: _, any_nan_inf = debug_tensor_value if any_nan_inf: self._alerts.append(InfNanAlert(wall_time, op_type, output_slot, execution_index=execution_index, graph_execution_trace_index=graph_execution_trace_index)) elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.CONCISE_HEALTH: _, size, num_neg_inf, num_pos_inf, num_nan = debug_tensor_value if num_neg_inf or num_pos_inf or num_nan: self._alerts.append(InfNanAlert(wall_time, op_type, output_slot, size=size, num_neg_inf=num_neg_inf, num_pos_inf=num_pos_inf, num_nan=num_nan, execution_index=execution_index, graph_execution_trace_index=graph_execution_trace_index)) elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_HEALTH: _, _, _, _, size, num_neg_inf, num_pos_inf, num_nan, _, _, _ = debug_tensor_value if num_neg_inf or num_pos_inf or num_nan: self._alerts.append(InfNanAlert(wall_time, op_type, output_slot, size=size, num_neg_inf=num_neg_inf, num_pos_inf=num_pos_inf, num_nan=num_nan, execution_index=execution_index, graph_execution_trace_index=graph_execution_trace_index))
Check for bad numerical values based on debug summary of tensor value. If tensor_debug_mode is one in which debug_tensor_value does not carry information about the presence or count of inf / nan values (e.g., SHAPE), this method is a no-op. When infs and/or nans are found, `InfNanAlert` objects are created and appended to `self._alerts`. Args: tensor_debug_mode: TensorDebugMode proto enum. debug_tensor_value: Debug tensor value as a list of numbers. wall_time: Wall timestamp for the tensor event. op_type: Type of the op that generated the tensor (e.g., "Conv2D"). output_slot: Output slot index of the tensor for the op. execution_index: Top-level execution index. graph_execution_trace_index: Intra-graph execution index.
github-repos
def DeregisterHelper(cls, resolver_helper): if (resolver_helper.type_indicator not in cls._resolver_helpers): raise KeyError('Resolver helper object not set for type indicator: {0:s}.'.format(resolver_helper.type_indicator)) del cls._resolver_helpers[resolver_helper.type_indicator]
Deregisters a path specification resolver helper. Args: resolver_helper (ResolverHelper): resolver helper. Raises: KeyError: if resolver helper object is not set for the corresponding type indicator.
codesearchnet
def create_function(self, vpc_config): zip_file = 'lambda-holder.zip' with zipfile.ZipFile(zip_file, mode='w') as zipped: zipped.writestr('index.py', 'print "Hello world"') contents = '' with open('lambda-holder.zip', 'rb') as openfile: contents = openfile.read() LOG.info('Creating lambda function: %s', self.app_name) try: self.lambda_client.create_function(Environment=self.lambda_environment, FunctionName=self.app_name, Runtime=self.runtime, Role=self.role_arn, Handler=self.handler, Code={'ZipFile': contents}, Description=self.description, Timeout=int(self.timeout), MemorySize=int(self.memory), Publish=False, VpcConfig=vpc_config, Tags={'app_group': self.group, 'app_name': self.app_name}) except boto3.exceptions.botocore.exceptions.ClientError as error: if ('CreateNetworkInterface' in error.response['Error']['Message']): message = '{0} is missing "ec2:CreateNetworkInterface"'.format(self.role_arn) LOG.critical(message) raise SystemExit(message) raise LOG.info('Successfully created Lambda function and alias')
Create lambda function, configures lambda parameters. We need to upload non-zero zip when creating function. Uploading hello_world python lambda function since AWS doesn't care which executable is in ZIP. Args: vpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using a VPC in lambda
codesearchnet
def get_folders(cls, session, mailbox_or_id): if isinstance(mailbox_or_id, Mailbox): mailbox_or_id = mailbox_or_id.id return cls(('/mailboxes/%d/folders.json' % mailbox_or_id), session=session, out_type=Folder)
List the folders for the mailbox. Args: mailbox_or_id (helpscout.models.Mailbox or int): Mailbox or the ID of the mailbox to get the folders for. Returns: RequestPaginator(output_type=helpscout.models.Folder): Folders iterator.
codesearchnet
def global_step(self): return self._global_step
Return the global_step Tensor used by the supervisor. Returns: An integer Tensor for the global_step.
github-repos
def service_info(self, short_name): if short_name not in self.services: raise ArgumentError("Unknown service name", short_name=short_name) info = {} info['short_name'] = short_name info['long_name'] = self.services[short_name]['state'].long_name info['preregistered'] = self.services[short_name]['state'].preregistered return info
Get static information about a service. Args: short_name (string): The short name of the service to query Returns: dict: A dictionary with the long_name and preregistered info on this service.
juraj-google-style
def neighbours_pattern(element): if not element.parent: return [] parent = element.parent neighbours = filter( lambda x: x.isTag() and not x.isEndTag() or x.getContent().strip() \ or x is element, parent.childs ) if len(neighbours) <= 1: return [] output = [] element_index = neighbours.index(element) if element_index >= 1: output.append( _neighbour_to_path_call( "left", neighbours[element_index - 1], element ) ) if element_index + 1 < len(neighbours): output.append( _neighbour_to_path_call( "right", neighbours[element_index + 1], element ) ) return output
Look for negihbours of the `element`, return proper :class:`PathCall`. Args: element (obj): HTMLElement instance of the object you are looking for. Returns: list: List of :class:`PathCall` instances.
juraj-google-style
def _get_default_help_message(func, args, description=None, args_help=None): if (description is None): description = ('Argument parsing for %s' % func.__name__) args_help = (args_help or {}) for argument in [arg_name for arg_name in args if (arg_name not in args_help)]: args_help[argument] = ('Help message for %s' % argument) return (description, args_help)
Create a default description for the parser and help message for the agurments if they are missing. Args: func: the method we are creating a parser for args: the argument names of the method description: a potentially existing description created from the function docstring args_help: a dict {arg_name: help} with potentially missing arguments Returns: a tuple (arg_parse_description, complete_args_help)
codesearchnet
def label(self, input_grid): unset = 0 high_labels, num_labels = label(input_grid > self.high_thresh) region_ranking = np.argsort(maximum(input_grid, high_labels, index=np.arange(1, num_labels + 1)))[::-1] output_grid = np.zeros(input_grid.shape, dtype=int) stack = [] for rank in region_ranking: label_num = rank + 1 label_i, label_j = np.where(high_labels == label_num) for i in range(label_i.size): if output_grid[label_i[i], label_j[i]] == unset: stack.append((label_i[i], label_j[i])) while len(stack) > 0: index = stack.pop() output_grid[index] = label_num for i in range(index[0] - 1, index[0] + 2): for j in range(index[1] - 1, index[1] + 2): if 0 <= i < output_grid.shape[0] and 0 <= j < output_grid.shape[1]: if (input_grid[i, j] > self.low_thresh) and (output_grid[i, j] == unset): stack.append((i, j)) return output_grid
Label input grid with hysteresis method. Args: input_grid: 2D array of values. Returns: Labeled output grid.
juraj-google-style
def thermal_expansion_coeff(self, structure, temperature, mode='debye'): soec = ElasticTensor(self[0]) v0 = ((structure.volume * 1e-30) / structure.num_sites) if (mode == 'debye'): td = soec.debye_temperature(structure) t_ratio = (temperature / td) integrand = (lambda x: (((x ** 4) * np.exp(x)) / ((np.exp(x) - 1) ** 2))) cv = (((9 * 8.314) * (t_ratio ** 3)) * quad(integrand, 0, (t_ratio ** (- 1)))[0]) elif (mode == 'dulong-petit'): cv = (3 * 8.314) else: raise ValueError('Mode must be debye or dulong-petit') tgt = self.get_tgt(temperature, structure) alpha = np.einsum('ijkl,ij', soec.compliance_tensor, tgt) alpha *= (cv / ((1000000000.0 * v0) * 6.022e+23)) return SquareTensor(alpha)
Gets thermal expansion coefficient from third-order constants. Args: temperature (float): Temperature in kelvin, if not specified will return non-cv-normalized value structure (Structure): Structure to be used in directional heat capacity determination, only necessary if temperature is specified mode (string): mode for finding average heat-capacity, current supported modes are 'debye' and 'dulong-petit'
codesearchnet
def _add_rank_score(self, variant_obj, info_dict): rank_score_entry = info_dict.get('RankScore') if rank_score_entry: for family_annotation in rank_score_entry.split(','): rank_score = family_annotation.split(':')[-1] logger.debug("Updating rank_score to: {0}".format( rank_score)) variant_obj.rank_score = float(rank_score)
Add the rank score if found Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
juraj-google-style
def get_args(path): defaults = get_defaults(path) licenses = ', '.join(os.listdir((cwd + licenses_loc))) p = parser(description=('tool for adding open source licenses to your projects. available licenses: %s' % licenses)) _name = (False if defaults.get('name') else True) _email = (False if defaults.get('email') else True) _license = (False if defaults.get('license') else True) p.add_argument('-n', dest='name', required=_name, help='name') p.add_argument('-e', dest='email', required=_email, help='email') p.add_argument('-l', dest='license', required=_license, help='license') p.add_argument('-p', dest='project', required=False, help='project') p.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=version)) p.add_argument('--txt', action='store_true', required=False, help='add .txt to filename') args = p.parse_args() name = (args.name if args.name else defaults.get('name')) email = (args.email if args.email else defaults.get('email')) license = (get_license(args.license) if args.license else defaults.get('license')) project = (args.project if args.project else os.getcwd().split('/')[(- 1)]) ext = ('.txt' if args.txt else '') year = str(date.today().year) return (name, email, license, project, ext, year)
Parse command line args & override defaults. Arguments: - path (str) Absolute filepath Returns: - (tuple) Name, email, license, project, ext, year
codesearchnet
def get_config(self): config = {'name': self._name} if self.clipnorm is not None: config['clipnorm'] = self.clipnorm if self.clipvalue is not None: config['clipvalue'] = self.clipvalue if self.global_clipnorm is not None: config['global_clipnorm'] = self.global_clipnorm return config
Returns the config of the optimizer. An optimizer config is a Python dictionary (serializable) containing the configuration of an optimizer. The same optimizer can be reinstantiated later (without any saved state) from this configuration. Returns: Python dictionary.
github-repos
def _insert_stack(stack, sample_count, call_tree): curr_level = call_tree for func in stack: next_level_index = {node['stack']: node for node in curr_level['children']} if (func not in next_level_index): new_node = {'stack': func, 'children': [], 'sampleCount': 0} curr_level['children'].append(new_node) curr_level = new_node else: curr_level = next_level_index[func] curr_level['sampleCount'] = sample_count
Inserts stack into the call tree. Args: stack: Call stack. sample_count: Sample count of call stack. call_tree: Call tree.
codesearchnet
def register(self, name): if (name not in settings.CODEMIRROR_SETTINGS): msg = "Given config name '{}' does not exists in 'settings.CODEMIRROR_SETTINGS'." raise UnknowConfigError(msg.format(name)) parameters = copy.deepcopy(self.default_internal_config) parameters.update(copy.deepcopy(settings.CODEMIRROR_SETTINGS[name])) if ('css_bundle_name' not in parameters): css_template_name = settings.CODEMIRROR_BUNDLE_CSS_NAME parameters['css_bundle_name'] = css_template_name.format(settings_name=name) if ('js_bundle_name' not in parameters): js_template_name = settings.CODEMIRROR_BUNDLE_JS_NAME parameters['js_bundle_name'] = js_template_name.format(settings_name=name) self.registry[name] = parameters return parameters
Register configuration for an editor instance. Arguments: name (string): Config name from available ones in ``settings.CODEMIRROR_SETTINGS``. Raises: UnknowConfigError: If given config name does not exist in ``settings.CODEMIRROR_SETTINGS``. Returns: dict: Registred config dict.
codesearchnet
def LogHttpFrontendAccess(self, request, source=None, message_count=None): event_id = self.GetNewEventId() log_msg = "%s-%s [%s]: %s %s %s %s (%d)" % ( event_id, request.source_ip, source or "<unknown>", request.method, request.url, request.user_agent, request.user, message_count or 0) logging.info(log_msg)
Write a log entry for a Frontend or UI Request. Args: request: A HttpRequest protobuf. source: Client id of the client initiating the request. Optional. message_count: Number of messages received from the client. Optional.
juraj-google-style
def write_config(config, filename=None): if (not filename): filename = CONFIG_DEFAULT_PATH with open(filename, 'w') as f: json.dump(config, f, indent=4)
Write the provided configuration to a specific location. Args: config (dict): a dictionary with the configuration to load. filename (str): the name of the file that will store the new configuration. Defaults to ``None``. If ``None``, the HOME of the current user and the string ``.bigchaindb`` will be used.
codesearchnet
def _process_pricing_schedule(self, item, feed_item): if 'pricing_schedule' in feed_item and feed_item['pricing_schedule']: if not 'pricingSchedule' in item: item['pricingSchedule'] = {} item['pricingSchedule']['pricingPeriods'] = [] for pricing_schedule in feed_item['pricing_schedule']: item['pricingSchedule']['pricingPeriods'].append({'endDate': pricing_schedule.get(FieldMap.PLACEMENT_PERIOD_END, None), 'startDate': pricing_schedule.get(FieldMap.PLACEMENT_PERIOD_START, None), 'rateOrCostNanos': int(float(pricing_schedule.get(FieldMap.PLACEMENT_PERIOD_RATE)) * 1000000000), 'units': pricing_schedule.get(FieldMap.PLACEMENT_PERIOD_UNITS)})
Updates / creates pricing schedule settings. This method updates the CM item with pricing schedule based on configurations from the Bulkdozer feed. Args: item: the CM placement object to update. feed_item: The Bulkdozer feed item representing the settings to define.
github-repos
def coerce(self, value): if not isinstance(value, compat.basestring): value = str(value) if not self._re.match(value): raise ValueError( "The value {0} does not match the pattern {1}".format( value, self.pattern, ) ) return value
Convert a value into a pattern matched string value. All string values are matched against a regex before they are considered acceptable values. Args: value (any): The value to coerce. Raises: ValueError: If the value is not an acceptable value. Returns: str: The pattern matched value represented.
juraj-google-style
def _build_colocation_attr_map(input_map, absolute_import_scope): colocation_attr_map = collections.defaultdict(_ConsistentValue) used_outputs_of_imported_ops = collections.defaultdict(set) for (imported_tensor_name, mapped_tensor) in input_map.items(): imported_tensor_name = ((absolute_import_scope + '/') + imported_tensor_name) (imported_op_name, imported_index) = _split_tensor_name(imported_tensor_name) key = tf.compat.as_bytes(('loc:@' + imported_op_name)) colocation_attr_map[key].Set(mapped_tensor.op.colocation_groups(), {'reason': ("input '%s' is substituted by '%s'" % (imported_tensor_name, mapped_tensor.name))}) used_outputs_of_imported_ops[imported_op_name].add(imported_index) for (imported_op_name, used_outputs) in used_outputs_of_imported_ops.items(): imported_op = tf_v1.get_default_graph().get_operation_by_name(imported_op_name) unused_outputs = (set(range(len(imported_op.outputs))) - used_outputs) if (not unused_outputs): continue key = tf.compat.as_bytes(('loc:@' + imported_op_name)) if (imported_op.colocation_groups() != [key]): raise ValueError(("Internal error: tensors from op '%s' are partially remapped in import but op.colocation_groups=%s cannot be captured in a simple rewrite rule." % (imported_op_name, imported_op.colocation_groups()))) colocation_attr_map[key].Set([key], {'reason': ("tensor '%s:%s' is not substituted by inputs" % (imported_op_name, ','.join((str(i) for i in sorted(unused_outputs)))))}) return colocation_attr_map
Returns a dict mapping from pre-import to post-import colocation attrs. Args: input_map: as for fix_colocation_after_import. absolute_import_scope: as for fix_colocation_after_import. Returns: A dict that maps bytes `"loc:@" + absolute_import_scope + "/foo"` to _ConsistentValues set to the lists of bytes `["loc:@...", ...]` according to the rewriting scheme of fix_colocation_after_import. In case of an inconsistent rewriting, _ConsistentValue.has_error is true.
codesearchnet
def split_list_by_n(l, n): n = max(1, n) return list(l[i:i+n] for i in range(0, len(l), n))
Split a list into lists of size n. Args: l: List of stuff. n: Size of new lists. Returns: list: List of lists each of size n derived from l.
juraj-google-style
def sasl_plain(self, name, password, identity=None): if (identity is None): identity = name self.sasl('plain', name, password, identity)
Authenticate to a server using SASL plain, or does so on connection. Args: name (str): Name to auth with. password (str): Password to auth with. identity (str): Identity to auth with (defaults to name).
codesearchnet
def __init__(self, url=None, token=None, cert=None): token = token or os.environ.get('VAULT_TOKEN') url = url or 'http: self._client = hvac.Client(url=url, token=token, cert=cert)
Initialize the Class properties. Args: url (string): The URL to the value server. token (string): The value token. cert (string): The value cert.
juraj-google-style
def validate(self, bigchain, current_transactions=[]): input_conditions = [] if self.operation == Transaction.CREATE: duplicates = any(txn for txn in current_transactions if txn.id == self.id) if bigchain.is_committed(self.id) or duplicates: raise DuplicateTransaction('transaction `{}` already exists' .format(self.id)) if not self.inputs_valid(input_conditions): raise InvalidSignature('Transaction signature is invalid.') elif self.operation == Transaction.TRANSFER: self.validate_transfer_inputs(bigchain, current_transactions) return self
Validate transaction spend Args: bigchain (BigchainDB): an instantiated bigchaindb.BigchainDB object. Returns: The transaction (Transaction) if the transaction is valid else it raises an exception describing the reason why the transaction is invalid. Raises: ValidationError: If the transaction is invalid
juraj-google-style
def find_files(directory, pattern, recursively=True): for (root, dirs, files) in os.walk(directory): for basename in files: if fnmatch.fnmatch(basename, pattern): (yield (root, basename)) if (not recursively): break
Yield a list of files with their base directories, recursively or not. Returns: A list of (base_directory, filename) Args: directory: base directory to start the search. pattern: fnmatch pattern for filenames. complete_filename: return complete filename or not? recursively: do we recurse or not?
codesearchnet
def count(self, entity, files=False): return self._find_entity(entity).count(files)
Return the count of unique values or files for the named entity. Args: entity (str): The name of the entity. files (bool): If True, counts the number of filenames that contain at least one value of the entity, rather than the number of unique values of the entity.
juraj-google-style
def create(cls, resource_id, *, account_id, properties=None, tags=None, location=None, auto_add=True, auto_commit=False): if cls.get(resource_id): raise ResourceException('Resource {} already exists'.format(resource_id)) res = Resource() res.resource_id = resource_id res.account_id = account_id res.location = location res.resource_type_id = ResourceType.get(cls.resource_type).resource_type_id if properties: for (name, value) in properties.items(): prop = ResourceProperty() prop.resource_id = res.resource_id prop.name = name prop.value = (value.isoformat() if (type(value) == datetime) else value) res.properties.append(prop) db.session.add(prop) if tags: for (key, value) in tags.items(): if (type(value) != str): raise ValueError('Invalid object type for tag value: {}'.format(key)) tag = Tag() tag.resource_id = resource_id tag.key = key tag.value = value res.tags.append(tag) db.session.add(tag) if auto_add: db.session.add(res) if auto_commit: db.session.commit() return cls.get(res.resource_id) else: return cls(res)
Creates a new Resource object with the properties and tags provided Args: resource_id (str): Unique identifier for the resource object account_id (int): Account ID which owns the resource properties (dict): Dictionary of properties for the resource object. tags (dict): Key / value dictionary of tags. Values must be `str` types location (str): Location of the resource, if applicable auto_add (bool): Automatically add the new resource to the DB session. Default: True auto_commit (bool): Automatically commit the change to the database. Default: False
codesearchnet
def MergeMessage( self, source, destination, replace_message_field=False, replace_repeated_field=False): tree = _FieldMaskTree(self) tree.MergeMessage( source, destination, replace_message_field, replace_repeated_field)
Merges fields specified in FieldMask from source to destination. Args: source: Source message. destination: The destination message to be merged into. replace_message_field: Replace message field if True. Merge message field if False. replace_repeated_field: Replace repeated field if True. Append elements of repeated field if False.
juraj-google-style
def reboot(self, **params): outlet = params['outlet'] self.tn.write('\x1b\r\n') self.until_done() self.tn.write('1\r\n') self.until_done() self.tn.write('2\r\n') self.until_done() self.tn.write('1\r\n') self.until_done() self.tn.write('%d\r\n' % outlet) self.until_done() self.tn.write('1\r\n') self.until_done() self.tn.write('2\r\n') self.until('to cancel') self.tn.write('YES\r\n') self.until('to continue') self.tn.write('\r\n') self.until_done() time.sleep(5) self.tn.write('1\r\n') self.until('to cancel') self.tn.write('YES\r\n') self.until('to continue') self.tn.write('\r\n') self.until_done()
Reboot outlet Args: params (dict), must contain parameter "outlet" - outlet number Example: params = {'outlet': 1}
juraj-google-style
def assert_text_equal(self, selector, value, testid=None, **kwargs): self.info_log(('Assert text equal selector(%s) testid(%s)' % (selector, testid))) highlight = kwargs.get('highlight', BROME_CONFIG['highlight']['highlight_on_assertion_success']) self.debug_log(('effective highlight: %s' % highlight)) wait_until_visible = kwargs.get('wait_until_visible', BROME_CONFIG['proxy_driver']['wait_until_visible_before_assert_visible']) self.debug_log(('effective wait_until_visible: %s' % wait_until_visible)) element = self.find(selector, raise_exception=False, wait_until_visible=wait_until_visible) if element: if (element.text == value): if highlight: element.highlight(highlight=BROME_CONFIG['highlight']['style_on_assertion_success']) if (testid is not None): self.create_test_result(testid, True) return True else: if highlight: element.highlight(style=BROME_CONFIG['highlight']['style_on_assertion_failure']) if (testid is not None): self.create_test_result(testid, False) return False else: if (testid is not None): self.create_test_result(testid, False) return False
Assert that the element's text is equal to the provided value Args: selector (str): the selector used to find the element value (str): the value that will be compare with the element.text value test_id (str): the test_id or a str Kwargs: wait_until_visible (bool) highlight (bool) Returns: bool: True is the assertion succeed; False otherwise.
codesearchnet
def execute_scheduler(self): try: self.scheduler.add_job(self.schedule_jobs, trigger='interval', name='schedule_jobs', minutes=15, start_date=(datetime.now() + timedelta(seconds=1))) self.scheduler.add_job(self.process_status_queue, trigger='interval', name='process_status_queue', seconds=30, start_date=(datetime.now() + timedelta(seconds=5)), max_instances=1) self.scheduler.start() except KeyboardInterrupt: self.scheduler.shutdown()
Main entry point for the scheduler. This method will start two scheduled jobs, `schedule_jobs` which takes care of scheduling the actual SQS messaging and `process_status_queue` which will track the current status of the jobs as workers are executing them Returns: `None`
codesearchnet
def bestfit_func(self, bestfit_x): if not self.done_bestfit: raise KeyError("Do do_bestfit first") bestfit_y = self.fit_args[1] * (bestfit_x ** self.fit_args[0]) return bestfit_y
Returns bestfit_function args: bestfit_x: scalar, array_like x value return: scalar, array_like bestfit y value
juraj-google-style
def fmtVersion(*vsnparts): if len(vsnparts) < 1: raise s_exc.BadTypeValu(valu=repr(vsnparts), name='fmtVersion', mesg='Not enough version parts to form a version string with.',) ret = '.'.join([str(part).lower() for part in vsnparts]) return ret
Join a string of parts together with a . separator. Args: *vsnparts: Returns:
juraj-google-style
def _get_variable_nodes_from_graph_def(graph_def): variables = [n for n in graph_def.node if n.op == 'VarHandleOp'] variable_name_map = dict(((n.name, n) for n in variables)) child_map = collections.defaultdict(lambda: []) for n in graph_def.node: for inp in n.input: if not inp.startswith('^'): child_map[inp].append(n) variables = {} for v_name, v_node in variable_name_map.items(): queue = list(child_map[v_name]) processed = set([]) while queue: n_current = queue.pop() if n_current.name in processed: continue processed.add(n_current.name) if n_current.op in _PASS_THROUGH_VARIABLE_OPS: children = child_map.get(n_current.name, []) queue.extend(children) elif n_current.op not in _READ_ONLY_VARIABLE_OPS: variables[v_name] = (v_node, True) queue = [] if v_name not in variables: variables[v_name] = (v_node, False) return variables
Get the list of Variable nodes from `graph_def`. Args: graph_def: An instance of `GraphDef`. This GraphDef *must* have already been optimized by Grappler. In particular, function inlining must have already happened. Returns: A dict mapping string names of variables to tuples `(node_def, modified)`, where `node_def` is the `NodeDef` corresponding to variable, and `modified` is a python bool describing whether the variable is modified during runtime.
github-repos
def __init__(self, c_q: int, c_k: int, c_v: int, c_hidden: int, no_heads: int, gating: bool=True): super().__init__() self.c_q = c_q self.c_k = c_k self.c_v = c_v self.c_hidden = c_hidden self.no_heads = no_heads self.gating = gating self.linear_q = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, bias=False, init='glorot') self.linear_k = EsmFoldLinear(self.c_k, self.c_hidden * self.no_heads, bias=False, init='glorot') self.linear_v = EsmFoldLinear(self.c_v, self.c_hidden * self.no_heads, bias=False, init='glorot') self.linear_o = EsmFoldLinear(self.c_hidden * self.no_heads, self.c_q, init='final') self.linear_g = None if self.gating: self.linear_g = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, init='gating') self.sigmoid = nn.Sigmoid()
Args: c_q: Input dimension of query data c_k: Input dimension of key data c_v: Input dimension of value data c_hidden: Per-head hidden dimension no_heads: Number of attention heads gating: Whether the output should be gated using query data
github-repos
def _scrub_method_name(self, method_name): if method_name not in self._scrubbed_method_names: self._scrubbed_method_names[method_name] = ( scrub_method_name(method_name)) return self._scrubbed_method_names[method_name]
Scrubs a method name, returning result from local cache if available. This method wraps fitparse.utils.scrub_method_name and memoizes results, as scrubbing a method name is expensive. Args: method_name: Method name to scrub. Returns: Scrubbed method name.
juraj-google-style
def create(self, msgtype, *args, **kwargs): if msgtype not in self._messages: raise ProtocolError("Unknown message type %r for protocol version %s" % (msgtype, self._version)) return self._messages[msgtype].create(*args, **kwargs)
Create a new Message instance for the given type. Args: msgtype (str) :
juraj-google-style
def compute_all_metrics_statistics(all_results): statistics = {} decode_inds = {} all_metrics = all_results.keys() for key in all_metrics: values = all_results[key] statistics[(key + '_MEAN')] = np.mean(values, axis=0) statistics[(key + '_STD')] = np.std(values, axis=0) (min_stats, min_decode_ind) = reduce_to_best_decode(values, np.argmin) statistics[(key + '_MIN')] = min_stats decode_inds[(key + '_MIN_DECODE')] = min_decode_ind (max_stats, max_decode_ind) = reduce_to_best_decode(values, np.argmax) statistics[(key + '_MAX')] = max_stats decode_inds[(key + '_MAX_DECODE')] = max_decode_ind for key in statistics: statistics[key] = np.mean(statistics[key], axis=0) return (statistics, decode_inds)
Computes statistics of metrics across multiple decodings. Args: all_results: dict of 3-D numpy arrays. Each array has shape=(num_decodes, num_samples, num_frames). Returns: statistics: dict of 1-D numpy arrays, shape=(num_frames). First the statistic (max/mean/std) is computed across the decodes, then the mean is taken across num_samples. decode_inds: dict of 1-D numpy arrays, shape=(num_samples,) Each element represents the index of the decode corresponding to the best statistic.
codesearchnet
def fit(self, X, *args, **kwargs): self.constant_value = self._get_constant_value(X) if self.constant_value is None: if self.unfittable_model: self.model = getattr(scipy.stats, self.model_class)(*args, **kwargs) else: self.model = getattr(scipy.stats, self.model_class)(X, *args, **kwargs) for name in self.METHOD_NAMES: attribute = getattr(self.__class__, name) if isinstance(attribute, str): setattr(self, name, getattr(self.model, attribute)) elif attribute is None: setattr(self, name, missing_method_scipy_wrapper(lambda x: x)) else: self._replace_constant_methods() self.fitted = True
Fit scipy model to an array of values. Args: X(`np.ndarray` or `pd.DataFrame`): Datapoints to be estimated from. Must be 1-d Returns: None
juraj-google-style
def __getIp6Address(self, addressType): addrType = ['link local', 'global', 'rloc', 'mesh EID'] addrs = [] globalAddr = [] linkLocal64Addr = '' rlocAddr = '' meshEIDAddr = '' addrs = self.__sendCommand('ipaddr') for ip6Addr in addrs: if (ip6Addr == 'Done'): break ip6AddrPrefix = ip6Addr.split(':')[0] if (ip6AddrPrefix == 'fe80'): if (ip6Addr.split(':')[4] != '0'): linkLocal64Addr = ip6Addr elif ip6Addr.startswith(self.meshLocalPrefix): if (ip6Addr.split(':')[4] == '0'): rlocAddr = ip6Addr else: meshEIDAddr = ip6Addr elif (ip6Addr != None): globalAddr.append(ip6Addr) else: pass if (addressType == addrType[0]): return linkLocal64Addr elif (addressType == addrType[1]): return globalAddr elif (addressType == addrType[2]): return rlocAddr elif (addressType == addrType[3]): return meshEIDAddr else: pass
get specific type of IPv6 address configured on thread device Args: addressType: the specific type of IPv6 address link local: link local unicast IPv6 address that's within one-hop scope global: global unicast IPv6 address rloc: mesh local unicast IPv6 address for routing in thread network mesh EID: mesh Endpoint Identifier Returns: IPv6 address string
codesearchnet
def _duplicate_body_captures_in_cond(cond_graph, body_graph_captures): types = [t.dtype.as_datatype_enum for t in body_graph_captures] with cond_graph._c_graph.get() as c_graph: placeholders = c_api.TF_CreatePlaceholders(c_graph, types, compat.as_str(_build_cond_placeholders_name_prefix(cond_graph))) placeholder_ops = [ops.Operation._from_c_op(ph.oper, cond_graph) for ph in placeholders] tensors = [] for op in placeholder_ops: tensors.append(op.outputs[0]) tuples = zip(body_graph_captures, tensors) keys = [id(t) for t in body_graph_captures] for k, v in zip(keys, tuples): cond_graph._function_captures.add_or_replace(key=k, external=v[0], internal=v[1], is_by_ref=False) cond_graph.inputs.extend(tensors)
Creates placeholders for body captures in cond_graph. This is needed to match signatures of cond and body graphs. Args: cond_graph: cond branch graph body_graph_captures: Tensors which were captured when building the `body_graph`.
github-repos
def load_ini(self, ini_file): if ini_file and not os.path.exists(ini_file): self.log.critical(f"Settings file specified but not found. {ini_file}") sys.exit(1) if not ini_file: ini_file = f"{self.cwd}/settings.ini" if os.path.exists(ini_file): config = configparser.RawConfigParser(allow_no_value=True) config.read(ini_file) for key, value in self.spec.items(): entry = None if value['type'] == str: entry = config.get("settings", option=key.lower(), fallback=None) elif value['type'] == bool: entry = config.getboolean("settings", option=key.lower(), fallback=None) elif value['type'] == int: entry = config.getint("settings", option=key.lower(), fallback=None) elif value['type'] == float: entry = config.getfloat("settings", option=key.lower(), fallback=None) elif value['type'] in [list, dict]: entries = config.get("settings", option=key.lower(), fallback=None) if entries: try: entry = json.loads(entries) except json.decoder.JSONDecodeError as _err: self.log.critical(f"Error parsing json from ini file. {entries}") sys.exit(1) if entry is not None: setattr(self, key.upper(), entry)
Load the contents from the ini file Args: ini_file (str): The file from which the settings should be loaded
juraj-google-style
def flux_minimization(model, fixed, solver, weights={}): fba = FluxBalanceProblem(model, solver) for (reaction_id, value) in iteritems(fixed): flux = fba.get_flux_var(reaction_id) fba.prob.add_linear_constraints((flux >= value)) fba.minimize_l1() return ((reaction_id, fba.get_flux(reaction_id)) for reaction_id in model.reactions)
Minimize flux of all reactions while keeping certain fluxes fixed. The fixed reactions are given in a dictionary as reaction id to value mapping. The weighted L1-norm of the fluxes is minimized. Args: model: MetabolicModel to solve. fixed: dict of additional lower bounds on reaction fluxes. solver: LP solver instance to use. weights: dict of weights on the L1-norm terms. Returns: An iterator of reaction ID and reaction flux pairs.
codesearchnet
class PatchTSMixerEncoder(PatchTSMixerPreTrainedModel): def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.use_return_dict = config.use_return_dict self.patcher = nn.Linear(config.patch_length, config.d_model) if config.use_positional_encoding: self.positional_encoder = PatchTSMixerPositionalEncoding(config=config) else: self.positional_encoder = None self.mlp_mixer_encoder = PatchTSMixerBlock(config=config) if config.post_init: self.post_init() @auto_docstring def forward(self, past_values: torch.Tensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[Tuple, PatchTSMixerEncoderOutput]: return_dict = return_dict if return_dict is not None else self.use_return_dict patches = self.patcher(past_values) if self.positional_encoder is not None: patches = self.positional_encoder(patches) last_hidden_state, hidden_states = self.mlp_mixer_encoder(patches, output_hidden_states=output_hidden_states) if not return_dict: return tuple((v for v in [last_hidden_state, hidden_states])) return PatchTSMixerEncoderOutput(last_hidden_state=last_hidden_state, hidden_states=hidden_states)
Encoder for PatchTSMixer which inputs patched time-series and outputs patched embeddings. Args: config (`PatchTSMixerConfig`): Configuration.
github-repos
def write_eval_records(bt_table, game_data, last_game): eval_num = last_game GAMES_PER_COMMIT = 2000 for games in grouper(tqdm(game_data), GAMES_PER_COMMIT): assert bt_table.read_row(EVAL_PREFIX.format(eval_num)), "Prev row doesn't exists" assert bt_table.read_row(EVAL_PREFIX.format(eval_num+1)) is None, "Row already exists" rows = [] for i, metadata in enumerate(games): eval_num += 1 row_name = EVAL_PREFIX.format(eval_num) row = bt_table.row(row_name) for column, value in metadata: row.set_cell(METADATA, column, value) rows.append(row) if i < 5 or i + 5 > len(games): print("\t", i, row_name, metadata[6][1]) if eval_num == last_game + len(games): test = input("Commit ('y'/'yes' required): ") if test.lower() not in ('y', 'yes'): break game_num_update = bt_table.row(TABLE_STATE) game_num_update.set_cell(METADATA, EVAL_GAME_COUNTER, eval_num) print(TABLE_STATE, eval_num) response = bt_table.mutate_rows(rows) any_bad = False for i, status in enumerate(response): if status.code is not 0: print("Row number {} failed to write {}".format(i, status)) any_bad = True if any_bad: break game_num_update.commit()
Write all eval_records to eval_table In addition to writing new rows table_state must be updated in row `table_state` columns `metadata:eval_game_counter` Args: bt_table: bigtable table to add rows to. game_data: metadata pairs (column name, value) for each eval record. last_game: last_game in metadata:table_state
juraj-google-style
def stringize(self, rnf_profile=RnfProfile()): sorted_segments = sorted(self.segments, key=(lambda x: (((((x.genome_id * (10 ** 23)) + (x.chr_id * (10 ** 21))) + ((x.left + ((int((x.left == 0)) * x.right) - 1)) * (10 ** 11))) + (x.right * (10 ** 1))) + int((x.direction == 'F'))))) segments_strings = [x.stringize(rnf_profile) for x in sorted_segments] read_tuple_name = '__'.join([self.prefix, format(self.read_tuple_id, 'x').zfill(rnf_profile.read_tuple_id_width), ','.join(segments_strings), self.suffix]) return read_tuple_name
Create RNF representation of this read. Args: read_tuple_id_width (int): Maximal expected string length of read tuple ID. genome_id_width (int): Maximal expected string length of genome ID. chr_id_width (int): Maximal expected string length of chromosome ID. coor_width (int): Maximal expected string length of a coordinate.
codesearchnet
def register_auth_system(self, auth_system): auth_system_settings = dbconfig.get('auth_system') if (auth_system.name not in auth_system_settings['available']): auth_system_settings['available'].append(auth_system.name) dbconfig.set('default', 'auth_system', DBCChoice(auth_system_settings)) if (auth_system.name == auth_system_settings['enabled'][0]): self.active_auth_system = auth_system auth_system().bootstrap() logger.debug('Registered {} as the active auth system'.format(auth_system.name)) return True else: logger.debug('Not trying to load the {} auth system as it is disabled by config'.format(auth_system.name)) return False
Register a given authentication system with the framework. Returns `True` if the `auth_system` is registered as the active auth system, else `False` Args: auth_system (:obj:`BaseAuthPlugin`): A subclass of the `BaseAuthPlugin` class to register Returns: `bool`
codesearchnet
def get_atten(self, idx=0): if not self.is_open: raise attenuator.Error( "Connection to attenuator at %s is not open!" % self._telnet_client.host) if idx + 1 > self.path_count or idx < 0: raise IndexError("Attenuator index out of range!", self.path_count, idx) atten_val_str = self._telnet_client.cmd("CHAN:%s:ATT?" % (idx + 1)) atten_val = float(atten_val_str) return atten_val
This function returns the current attenuation from an attenuator at a given index in the instrument. Args: idx: This zero-based index is the identifier for a particular attenuator in an instrument. Raises: Error: The underlying telnet connection to the instrument is not open. Returns: A float that is the current attenuation value.
juraj-google-style
def get_lock_config(self, device_label): response = None try: response = requests.get(urls.lockconfig(self._giid, device_label), headers={'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
Get lock configuration Args: device_label (str): device label of lock
codesearchnet
def as_videos(content: ProcessorContentTypes, *, ignore_unsupported_types: bool=False) -> list[ProcessorPart]: return _as_format_helper(content, lambda mime: mime.startswith('video/'), ignore_unsupported_types)
Returns the video parts from the content. Args: content: Input content. ignore_unsupported_types: By default if content contains non-video parts a ValueError would be raised. This argument allows ingoring such parts. Returns: A list of video parts.
github-repos
def _DownloadUrl(self, url, dest_dir): dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False) dest_file.close() dest = dest_file.name self.logger.info('Downloading url from %s to %s.', url, dest) try: urlretrieve.urlretrieve(url, dest) return dest except (httpclient.HTTPException, socket.error, urlerror.URLError) as e: self.logger.warning('Could not download %s. %s.', url, str(e)) except Exception as e: self.logger.warning('Exception downloading %s. %s.', url, str(e)) return None
Download a script from a given URL. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script.
juraj-google-style
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
Create a mask from the two sequences passed to be used in a sequence-pair classification task. LUKE does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros.
github-repos
def extract_flattened_patches(self, image: np.ndarray, max_patches: int, patch_size: dict, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: requires_backends(self.extract_flattened_patches, 'torch') image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format) image = torch.from_numpy(image) patch_height, patch_width = (patch_size['height'], patch_size['width']) image_height, image_width = get_image_size(image, ChannelDimension.FIRST) scale = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) num_feasible_rows = max(min(math.floor(scale * image_height / patch_height), max_patches), 1) num_feasible_cols = max(min(math.floor(scale * image_width / patch_width), max_patches), 1) resized_height = max(num_feasible_rows * patch_height, 1) resized_width = max(num_feasible_cols * patch_width, 1) image = torch.nn.functional.interpolate(image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=False, antialias=True).squeeze(0) patches = torch_extract_patches(image, patch_height, patch_width) patches_shape = patches.shape rows = patches_shape[1] columns = patches_shape[2] depth = patches_shape[3] patches = patches.reshape([rows * columns, depth]) row_ids = torch.arange(rows).reshape([rows, 1]).repeat(1, columns).reshape([rows * columns, 1]) col_ids = torch.arange(columns).reshape([1, columns]).repeat(rows, 1).reshape([rows * columns, 1]) row_ids += 1 col_ids += 1 row_ids = row_ids.to(torch.float32) col_ids = col_ids.to(torch.float32) result = torch.cat([row_ids, col_ids, patches], -1) result = torch.nn.functional.pad(result, [0, 0, 0, max_patches - rows * columns]).float() result = to_numpy_array(result) return result
Extract flattened patches from an image. Args: image (`np.ndarray`): Image to extract flattened patches from. max_patches (`int`): Maximum number of patches to extract. patch_size (`dict`): Dictionary containing the patch height and width. Returns: result (`np.ndarray`): A sequence of `max_patches` flattened patches.
github-repos
def _read_from_hdx(self, object_type, value, fieldname='id', action=None, **kwargs): if not fieldname: raise HDXError('Empty %s field name!' % object_type) if action is None: action = self.actions()['show'] data = {fieldname: value} data.update(kwargs) try: result = self.configuration.call_remoteckan(action, data) return True, result except NotFound: return False, '%s=%s: not found!' % (fieldname, value) except Exception as e: raisefrom(HDXError, 'Failed when trying to read: %s=%s! (POST)' % (fieldname, value), e)
Makes a read call to HDX passing in given parameter. Args: object_type (str): Description of HDX object type (for messages) value (str): Value of HDX field fieldname (str): HDX field name. Defaults to id. action (Optional[str]): Replacement CKAN action url to use. Defaults to None. **kwargs: Other fields to pass to CKAN. Returns: Tuple[bool, Union[Dict, str]]: (True/False, HDX object metadata/Error)
juraj-google-style
def _update_from_file(self, filename): if os.path.exists(filename): try: with open(filename, 'r') as config_file: yaml_dict = yaml.safe_load(config_file.read()) if (yaml_dict is not None): self._update_dict(self._config, yaml_dict) except IsADirectoryError: raise ConfigLoadError('The specified configuration file is a directory not a file') else: raise ConfigLoadError('The config file {} does not exist'.format(filename))
Helper method to update an existing configuration with the values from a file. Loads a configuration file and replaces all values in the existing configuration dictionary with the values from the file. Args: filename (str): The path and name to the configuration file.
codesearchnet
def MapTuple(fn, *args, **kwargs): if not callable(fn): raise TypeError('MapTuple can be used only with callable objects. Received %r instead.' % fn) label = 'MapTuple(%s)' % ptransform.label_from_callable(fn) arg_names, defaults = get_function_args_defaults(fn) num_defaults = len(defaults) if num_defaults < len(args) + len(kwargs): raise TypeError('Side inputs must have defaults for MapTuple.') if defaults or args or kwargs: wrapper = lambda x, *args, **kwargs: [fn(*tuple(x) + args, **kwargs)] else: wrapper = lambda x: [fn(*x)] type_hints = get_type_hints(fn).with_defaults(typehints.decorators.IOTypeHints.from_callable(fn)) if type_hints.input_types is not None: pass output_hint = type_hints.simple_output_type(label) if output_hint: wrapper = with_output_types(typehints.Iterable[_strip_output_annotations(output_hint)])(wrapper) modified_arg_names = ['tuple_element'] + arg_names[-num_defaults:] modified_argspec = (modified_arg_names, defaults) pardo = ParDo(CallableWrapperDoFn(wrapper, fullargspec=modified_argspec), *args, **kwargs) pardo.label = label return pardo
:func:`MapTuple` is like :func:`Map` but expects tuple inputs and flattens them into multiple input arguments. In other words "SwapKV" >> beam.Map(lambda kv: (kv[1], kv[0])) is equivalent to "SwapKV" >> beam.MapTuple(lambda k, v: (v, k)) This can be useful when processing a PCollection of tuples (e.g. key-value pairs). Args: fn (callable): a callable object. *args: positional arguments passed to the transform callable. **kwargs: keyword arguments passed to the transform callable. Returns: ~apache_beam.pvalue.PCollection: A :class:`~apache_beam.pvalue.PCollection` containing the :func:`MapTuple` outputs. Raises: TypeError: If the **fn** passed as argument is not a callable. Typical error is to pass a :class:`DoFn` instance which is supported only for :class:`ParDo`.
github-repos
def subgraph(self, nodeids): _eps, _vars = self._eps, self._vars _hcons, _icons = self._hcons, self._icons top = index = xarg = None eps = [_eps[nid] for nid in nodeids] lbls = set(ep[2] for ep in eps) hcons = [] icons = [] subvars = {} if self.top: top = self.top tophc = _hcons.get(top, None) if tophc is not None and tophc[2] in lbls: subvars[top] = {} elif top not in lbls: top = None if self.xarg: xarg = self.xarg subvars[self.xarg] = _vars[self.xarg]['props'] subvars.update((lbl, {}) for lbl in lbls) subvars.update( (var, _vars[var]['props']) for ep in eps for var in ep[3].values() if var in _vars ) if self.index in subvars: index = self.index for var in subvars: hc = _hcons.get(var, None) if hc is not None and hc[2] in lbls: hcons.append(hc) for ic in _icons.get(var, []): if ic[0] in subvars and ic[2] in subvars: icons.append(ic) return Xmrs( top=top, index=index, xarg=xarg, eps=eps, hcons=hcons, icons=icons, vars=subvars, lnk=self.lnk, surface=self.surface, identifier=self.identifier )
Return an Xmrs object with only the specified *nodeids*. Necessary variables and arguments are also included in order to connect any nodes that are connected in the original Xmrs. Args: nodeids: the nodeids of the nodes/EPs to include in the subgraph. Returns: An :class:`Xmrs` object.
juraj-google-style
def __init__(self, encoding=None): super(DSVParser, self).__init__() self._encoding = encoding if py2to3.PY_2: self._end_of_line = b'\n' else: self._end_of_line = '\n' self._maximum_line_length = ( len(self._end_of_line) + len(self.COLUMNS) * (self.FIELD_SIZE_LIMIT + len(self.DELIMITER)))
Initializes a delimiter separated values (DSV) parser. Args: encoding (Optional[str]): encoding used in the DSV file, where None indicates the codepage of the parser mediator should be used.
juraj-google-style
def _replace_child_layer_functions(layer, serialization_cache): original_fns = {} def replace_layer_functions(child_layer, serialized_fns): original_fns[child_layer] = {'call': child_layer.call, '_activity_regularizer': child_layer._activity_regularizer} with utils.no_automatic_dependency_tracking_scope(child_layer): try: child_layer._activity_regularizer = serialized_fns.get('activity_regularizer_fn') except AttributeError: pass child_layer.call = utils.use_wrapped_call(child_layer, serialized_fns['call_and_return_conditional_losses'], default_training_value=False) def replace_metric_functions(child_layer, serialized_fns): original_fns[child_layer] = {'__call__': child_layer.__call__, 'result': child_layer.result, 'update_state': child_layer.update_state} with utils.no_automatic_dependency_tracking_scope(child_layer): child_layer.__call__ = serialized_fns['__call__'] child_layer.result = serialized_fns['result'] child_layer.update_state = serialized_fns['update_state'] for child_layer in utils.list_all_layers(layer): if isinstance(child_layer, input_layer.InputLayer): continue if child_layer not in serialization_cache[constants.KERAS_CACHE_KEY]: serialized_functions = child_layer._trackable_saved_model_saver._get_serialized_attributes(serialization_cache).functions else: serialized_functions = serialization_cache[constants.KERAS_CACHE_KEY][child_layer].functions if not serialized_functions: continue if isinstance(child_layer, metrics.Metric): replace_metric_functions(child_layer, serialized_functions) else: replace_layer_functions(child_layer, serialized_functions) return original_fns
Replaces functions in the children layers with wrapped tf.functions. This step allows functions from parent layers to reference the wrapped functions from their children layers instead of retracing the ops. This function also resets all losses stored in the layer. These are stored in the returned dictionary. Use `_restore_child_layer_functions` to restore the original attributes. Args: layer: Keras Layer object. serialization_cache: Dictionary shared between all objects during serialization. Returns: Dictionary mapping layer objects -> original functions and losses: { Child layer 1: { 'losses': Original losses, 'call': Original call function '_activity_regularizer': Original activity regularizer}, Child layer 2: ... }
github-repos
def __init__(self, loaders): if not loaders: if any(self._iter_config_props()): raise AssertionError('Class has ConfigProperty attributes: must provide loader(s)') self._update_property_keys() self.varz = {} self._loaders = loaders self._load()
Load values into the class's ConfigProperty attributes (validating types if possible) Args: loaders: iterable of AbstractLoader instances ConfigProperty values are loaded from these sources; and the order indicates preference.
juraj-google-style
def groups_setPurpose(self, *, channel: str, purpose: str, **kwargs) -> SlackResponse: kwargs.update({'channel': channel, 'purpose': purpose}) return self.api_call('groups.setPurpose', json=kwargs)
Sets the purpose for a private channel. Args: channel (str): The channel id. e.g. 'G1234567890' purpose (str): The new purpose for the channel. e.g. 'My Purpose'
codesearchnet
def get_enterprise_sso_uid(self, obj): enterprise_learner = EnterpriseCustomerUser.objects.filter(user_id=obj.id).first() return enterprise_learner and enterprise_learner.get_remote_id()
Get enterprise SSO UID. Arguments: obj (User): Django User object Returns: (str): string containing UUID for enterprise customer's Identity Provider.
juraj-google-style
def local_services(self): if (not self._loop.inside_loop()): self._state_lock.acquire() try: return sorted([(index, name) for (index, name) in self._name_map.items()], key=(lambda element: element[0])) finally: if (not self._loop.inside_loop()): self._state_lock.release()
Get a list of id, name pairs for all of the known synced services. This method is safe to call outside of the background event loop without any race condition. Internally it uses a thread-safe mutex to protect the local copies of supervisor data and ensure that it cannot change while this method is iterating over it. Returns: list (id, name): A list of tuples with id and service name sorted by id from low to high
codesearchnet
def format_returnvalue(self, value): self._ensure_loaded() if not self.return_info.is_data: return None if self.return_info.type_name is not None: return typeinfo.type_system.format_value(value, self.return_info.type_name, self.return_info.formatter) return self.return_info.formatter(value)
Format the return value of this function as a string. Args: value (object): The return value that we are supposed to format. Returns: str: The formatted return value, or None if this function indicates that it does not return data
juraj-google-style
def copy(src, dst): src, src_is_storage = format_and_is_storage(src) dst, dst_is_storage = format_and_is_storage(dst) if not src_is_storage and not dst_is_storage: return shutil_copy(src, dst) with handle_os_exceptions(): if not hasattr(dst, 'read'): try: if isdir(dst): dst = join(dst, basename(src)) elif not isdir(dirname(dst)): raise IOError("No such file or directory: '%s'" % dst) except ObjectPermissionError: pass _copy(src, dst, src_is_storage, dst_is_storage)
Copies a source file to a destination file or directory. Equivalent to "shutil.copy". Source and destination can also be binary opened file-like objects. Args: src (path-like object or file-like object): Source file. dst (path-like object or file-like object): Destination file or directory. Raises: IOError: Destination directory not found.
juraj-google-style
def _use_datastore(self, key, options=None): flag = ContextOptions.use_datastore(options) if flag is None: flag = self._datastore_policy(key) if flag is None: flag = ContextOptions.use_datastore(self._conn.config) if flag is None: flag = True return flag
Return whether to use the datastore for this key. Args: key: Key instance. options: ContextOptions instance, or None. Returns: True if the datastore should be used, False otherwise.
juraj-google-style
def _find_codopant(target, oxidation_state, allowed_elements=None): ref_radius = target.ionic_radius candidates = [] symbols = allowed_elements or [el.symbol for el in Element] for sym in symbols: try: with warnings.catch_warnings(): warnings.simplefilter("ignore") sp = Specie(sym, oxidation_state) r = sp.ionic_radius if r is not None: candidates.append((r, sp)) except: pass return min(candidates, key=lambda l: abs(l[0] / ref_radius - 1))[1]
Finds the element from "allowed elements" that (i) possesses the desired "oxidation state" and (ii) is closest in ionic radius to the target specie Args: target: (Specie) provides target ionic radius. oxidation_state: (float) codopant oxidation state. allowed_elements: ([str]) List of allowed elements. If None, all elements are tried. Returns: (Specie) with oxidation_state that has ionic radius closest to target.
juraj-google-style
def restore(self, restored_tensors, restored_shapes): raise ValueError('Calling an abstract method.')
Restores this object from 'restored_tensors'. Args: restored_tensors: the tensors that were loaded from a checkpoint restored_shapes: the shapes this object should conform to after restore, or None. Returns: An operation that restores the state of the object. Raises: ValueError: If the object cannot be restored using the provided parameters.
github-repos
def all_logging_disabled(highest_level=logging.CRITICAL): previous_level = logging.root.manager.disable logging.disable(highest_level) try: yield finally: logging.disable(previous_level)
Disable all logging temporarily. A context manager that will prevent any logging messages triggered during the body from being processed. Args: highest_level: the maximum logging level that is being blocked
juraj-google-style
def core_assignment(self) -> np.ndarray: return self._core_assignment
The logical to physical core mapping. Returns: An integer numpy array of rank 3, with shape `[num_replicas, num_cores_per_replica, topology_rank]`. Maps (replica, logical core) pairs to physical topology coordinates.
github-repos
def eval_autoregressive(self, features=None, decode_length=50): results = self._slow_greedy_infer(features, decode_length=decode_length) return results["logits"], results["losses"]
Autoregressive eval. Quadratic time in decode_length. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. Returns: logits: `Tensor` losses: a dictionary: {loss-name (string): floating point `Scalar`}. Contains a single key "training".
juraj-google-style
def _is_empty_observation_data(feature_ndims, observation_index_points, observations): if ((observation_index_points is None) and (observations is None)): return True num_obs = tf.compat.dimension_value(observation_index_points.shape[(- (feature_ndims + 1))]) if ((num_obs is not None) and (num_obs == 0)): return True return False
Returns `True` if given observation data is empty. Emptiness means either 1. Both `observation_index_points` and `observations` are `None`, or 2. the "number of observations" shape is 0. The shape of `observation_index_points` is `[..., N, f1, ..., fF]`, where `N` is the number of observations and the `f`s are feature dims. Thus, we look at the shape element just to the left of the leftmost feature dim. If that shape is zero, we consider the data empty. We don't check the shape of observations; validations are checked elsewhere in the calling code, to ensure these shapes are consistent. Args: feature_ndims: the number of feature dims, as reported by the GP kernel. observation_index_points: the observation data locations in the index set. observations: the observation data. Returns: is_empty: True if the data were deemed to be empty.
codesearchnet
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, padding_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> torch.Tensor: residual = hidden_states hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states if padding_mask is not None: hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0) hidden_states = self.conv1(hidden_states.transpose(1, 2)).transpose(1, 2) if padding_mask is not None: hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0) hidden_states = self.activation_fn(hidden_states) hidden_states = self.conv2(hidden_states.transpose(1, 2)).transpose(1, 2) hidden_states = self.conv_dropout(hidden_states) hidden_states = residual + hidden_states hidden_states = self.conv_layer_norm(hidden_states) outputs = (hidden_states, present_key_value) if output_attentions: outputs += self_attn_weights return outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0 for *masked* output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def WriteTaskCompletion(self, aborted=False): self._RaiseIfNotWritable() if (self._storage_type != definitions.STORAGE_TYPE_TASK): raise IOError('Unsupported storage type.') self._task.aborted = aborted task_completion = self._task.CreateTaskCompletion() self._storage_file.WriteTaskCompletion(task_completion)
Writes task completion information. Args: aborted (Optional[bool]): True if the session was aborted. Raises: IOError: if the storage type is not supported or when the storage writer is closed. OSError: if the storage type is not supported or when the storage writer is closed.
codesearchnet
def model(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]: out = array_ops.gather_v2(self.embedding_w, input_tensor) out = nn_ops.conv2d(out, self.conv_filters, strides=(1, 1, 2, 1), dilations=(1, 1, 1, 1), padding='SAME', data_format='NHWC') if is_qat_model: out = array_ops.fake_quant_with_min_max_args(out, min=-0.1, max=0.2, num_bits=8, narrow_range=False) second_conv_filters = array_ops.fake_quant_with_min_max_args(self.second_conv_filters, min=-0.1, max=0.2, num_bits=8, narrow_range=True) else: second_conv_filters = self.second_conv_filters out = nn_ops.conv2d(out, second_conv_filters, strides=(1, 1, 2, 1), dilations=(1, 1, 1, 1), padding='SAME', data_format='NHWC') if is_qat_model: out = array_ops.fake_quant_with_min_max_args(out, min=-0.1, max=0.2, num_bits=8, narrow_range=False) return {'output': out}
Performs a gather and a 2D convolution operation. Args: input_tensor: Input tensor to perform operation on. Returns: A map of: output key -> output result.
github-repos
def add_positional_embedding(x, max_length, name=None, positions=None): with tf.name_scope("add_positional_embedding"): _, length, depth = common_layers.shape_list(x) var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype) if positions is None: pad_length = tf.maximum(0, length - max_length) sliced = tf.cond( tf.less(length, max_length), lambda: tf.slice(var, [0, 0], [length, -1]), lambda: tf.pad(var, [[0, pad_length], [0, 0]])) return x + tf.expand_dims(sliced, 0) else: return x + tf.gather(var, tf.to_int32(positions))
Adds positional embedding. Args: x: Tensor with shape [batch, length, depth]. max_length: int representing static maximum size of any dimension. name: str representing name of the embedding tf.Variable. positions: Tensor with shape [batch, length]. Returns: Tensor of same shape as x.
juraj-google-style
def _FillEventSourceHeap( self, storage_writer, event_source_heap, start_with_first=False): if self._processing_profiler: self._processing_profiler.StartTiming('fill_event_source_heap') if self._processing_profiler: self._processing_profiler.StartTiming('get_event_source') if start_with_first: event_source = storage_writer.GetFirstWrittenEventSource() else: event_source = storage_writer.GetNextWrittenEventSource() if self._processing_profiler: self._processing_profiler.StopTiming('get_event_source') while event_source: event_source_heap.PushEventSource(event_source) if event_source_heap.IsFull(): break if self._processing_profiler: self._processing_profiler.StartTiming('get_event_source') event_source = storage_writer.GetNextWrittenEventSource() if self._processing_profiler: self._processing_profiler.StopTiming('get_event_source') if self._processing_profiler: self._processing_profiler.StopTiming('fill_event_source_heap')
Fills the event source heap with the available written event sources. Args: storage_writer (StorageWriter): storage writer for a session storage. event_source_heap (_EventSourceHeap): event source heap. start_with_first (Optional[bool]): True if the function should start with the first written event source.
juraj-google-style
def stretch_hist_equalize(self, approximate=False): logger.info('Perform a histogram equalized contrast stretch.') nwidth = 2048.0 logger.debug(('Make histogram bins having equal amount of data, ' + 'using numpy percentile function:')) def _band_hist(band_data): cdf = da.arange(0.0, 1.0, (1.0 / nwidth), chunks=nwidth) if approximate: flat_data = band_data.ravel() bins = da.percentile(flat_data[da.notnull(flat_data)], (cdf * 100.0)) else: bins = dask.delayed(np.nanpercentile)(band_data, (cdf * 100.0)) bins = da.from_delayed(bins, shape=(nwidth,), dtype=cdf.dtype) res = dask.delayed(np.interp)(band_data, bins, cdf) res = da.from_delayed(res, shape=band_data.shape, dtype=band_data.dtype) return res band_results = [] for band in self.data['bands'].values: if (band == 'A'): continue band_data = self.data.sel(bands=band) res = _band_hist(band_data.data) band_results.append(res) if ('A' in self.data.coords['bands'].values): band_results.append(self.data.sel(bands='A')) self.data.data = da.stack(band_results, axis=self.data.dims.index('bands'))
Stretch the current image's colors through histogram equalization. Args: approximate (bool): Use a faster less-accurate percentile calculation. At the time of writing the dask version of `percentile` is not as accurate as the numpy version. This will likely change in the future. Current dask version 0.17.
codesearchnet