code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def evaluate_stacked_ensemble(path, ensemble_id): with functions.DBContextManager(path) as session: stacked_ensemble = session.query(models.StackedEnsemble).filter_by( id=ensemble_id).first() if not stacked_ensemble: raise exceptions.UserError('Stacked ensemble {} ' 'does not exist'.format(ensemble_id)) stacked_ensemble.job_id = get_current_job().id stacked_ensemble.job_status = 'started' session.add(stacked_ensemble) session.commit() try: meta_features_list = [] for base_learner in stacked_ensemble.base_learners: mf = np.load(base_learner.meta_features_path(path)) if len(mf.shape) == 1: mf = mf.reshape(-1, 1) meta_features_list.append(mf) secondary_features = np.concatenate(meta_features_list, axis=1) extraction = session.query(models.Extraction).first() return_splits_iterable = functions.import_object_from_string_code( extraction.meta_feature_generation['source'], 'return_splits_iterable' ) X, y = extraction.return_train_dataset() indices_list = [test_index for train_index, test_index in return_splits_iterable(X, y)] indices = np.concatenate(indices_list) X, y = X[indices], y[indices] est = stacked_ensemble.return_secondary_learner() return_splits_iterable_stacked_ensemble = functions.import_object_from_string_code( extraction.stacked_ensemble_cv['source'], 'return_splits_iterable' ) preds = [] trues_list = [] for train_index, test_index in return_splits_iterable_stacked_ensemble(secondary_features, y): X_train, X_test = secondary_features[train_index], secondary_features[test_index] y_train, y_test = y[train_index], y[test_index] est = est.fit(X_train, y_train) preds.append( getattr(est, stacked_ensemble.base_learner_origin. meta_feature_generator)(X_test) ) trues_list.append(y_test) preds = np.concatenate(preds, axis=0) y_true = np.concatenate(trues_list) for key in stacked_ensemble.base_learner_origin.metric_generators: metric_generator = functions.import_object_from_string_code( stacked_ensemble.base_learner_origin.metric_generators[key], 'metric_generator' ) stacked_ensemble.individual_score[key] = metric_generator(y_true, preds) stacked_ensemble.job_status = 'finished' session.add(stacked_ensemble) session.commit() except: session.rollback() stacked_ensemble.job_status = 'errored' stacked_ensemble.description['error_type'] = repr(sys.exc_info()[0]) stacked_ensemble.description['error_value'] = repr(sys.exc_info()[1]) stacked_ensemble.description['error_traceback'] = \ traceback.format_exception(*sys.exc_info()) session.add(stacked_ensemble) session.commit() raise
Evaluates the ensemble and updates the database when finished/ Args: path (str): Path to Xcessiv notebook ensemble_id (str): Ensemble ID
juraj-google-style
def tokenize(self, text, never_split=None): never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens
Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split.
github-repos
def force_list(val=None): if (val is None): return [] if isinstance(val, pd.Series): return val.tolist() return (val if isinstance(val, list) else [val])
Force a list representation of an object Args: val: object to parse into a list Returns:
codesearchnet
def render_html_report(summary, report_template=None, report_dir=None): if (not report_template): report_template = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'templates', 'report_template.html') logger.log_debug('No html report template specified, use default.') else: logger.log_info('render with html report template: {}'.format(report_template)) logger.log_info('Start to render Html report ...') report_dir = (report_dir or os.path.join(os.getcwd(), 'reports')) if (not os.path.isdir(report_dir)): os.makedirs(report_dir) start_at_timestamp = int(summary['time']['start_at']) summary['time']['start_datetime'] = datetime.fromtimestamp(start_at_timestamp).strftime('%Y-%m-%d %H:%M:%S') report_path = os.path.join(report_dir, '{}.html'.format(start_at_timestamp)) with io.open(report_template, 'r', encoding='utf-8') as fp_r: template_content = fp_r.read() with io.open(report_path, 'w', encoding='utf-8') as fp_w: rendered_content = Template(template_content, extensions=['jinja2.ext.loopcontrols']).render(summary) fp_w.write(rendered_content) logger.log_info('Generated Html report: {}'.format(report_path)) return report_path
render html report with specified report name and template Args: report_template (str): specify html report template path report_dir (str): specify html report save directory
codesearchnet
def _lower_if_str(item): try: string_type = basestring except NameError: string_type = str if isinstance(item, string_type): return item.lower() return item
Try to convert item to lowercase, if it is string. Args: item (obj): Str, unicode or any other object. Returns: obj: ``item.lower()`` if `item` is ``str`` or ``unicode``, else just \ `item` itself.
codesearchnet
def _parse_phone(self, val): ret = { 'type': None, 'value': None } try: ret['type'] = val[1]['type'] except (IndexError, KeyError, ValueError, TypeError): pass ret['value'] = val[3].strip() try: self.vars['phone'].append(ret) except AttributeError: self.vars['phone'] = [] self.vars['phone'].append(ret)
The function for parsing the vcard phone numbers. Args: val (:obj:`list`): The value to parse.
juraj-google-style
def GetRowCache(self, query): query_hash = hash(query) if query_hash not in self._row_caches: self._row_caches[query_hash] = set() return self._row_caches[query_hash]
Retrieves the row cache for a specific query. The row cache is a set that contains hashes of values in a row. The row cache is used to find duplicate row when a database and a database with a WAL file is parsed. Args: query (str): query. Returns: set: hashes of the rows that have been parsed.
juraj-google-style
def do_usufy(self, query, **kwargs): results = [] test = self.check_usufy(query, **kwargs) if test: r = { "type": "i3visio.profile", "value": self.platformName + " - " + query, "attributes": [] } aux = {} aux["type"] = "i3visio.uri" aux["value"] = self.createURL(word=query, mode="usufy") aux["attributes"] = [] r["attributes"].append(aux) aux = {} aux["type"] = "i3visio.alias" aux["value"] = query aux["attributes"] = [] r["attributes"].append(aux) aux = {} aux["type"] = "i3visio.platform" aux["value"] = self.platformName aux["attributes"] = [] r["attributes"].append(aux) r["attributes"] += self.process_usufy(test) results.append(r) return results
Verifying a usufy query in this platform. This might be redefined in any class inheriting from Platform. Args: ----- query: The element to be searched. Return: ------- A list of elements to be appended.
juraj-google-style
def port_list(br): cmd = 'ovs-vsctl list-ports {0}'.format(br) result = __salt__['cmd.run_all'](cmd) retcode = result['retcode'] stdout = result['stdout'] return _stdout_list_split(retcode, stdout)
Lists all of the ports within bridge. Args: br: A string - bridge name. Returns: List of bridges (or empty list), False on failure. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' openvswitch.port_list br0
codesearchnet
def _group_similar(items: List[T], comparer: Callable[([T, T], bool)]) -> List[List[T]]: groups = [] used = set() for i in range(len(items)): if (i not in used): group = [items[i]] for j in range((i + 1), len(items)): if ((j not in used) and comparer(items[i], items[j])): used.add(j) group.append(items[j]) groups.append(group) return groups
Combines similar items into groups. Args: items: The list of items to group. comparer: Determines if two items are similar. Returns: A list of groups of items.
codesearchnet
def plugin_class_validation(self, plugin_class): try: getattr(plugin_class, 'dependencies') getattr(plugin_class, 'execute') except AttributeError: return False return True
Plugin validation Every workbench plugin must have a dependencies list (even if it's empty). Every workbench plugin must have an execute method. Args: plugin_class: The loaded plugun class. Returns: True if dependencies and execute are present, else False.
juraj-google-style
def handle_error(err, halt=True): print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err)) if halt: sys.exit(1)
Print errors message and optionally exit. Args: err (str): The error message to print. halt (bool, optional): Defaults to True. If True the script will exit.
codesearchnet
def normalize(tensor, mean, std, inplace=False): if (not _is_tensor_image(tensor)): raise TypeError('tensor is not a torch image.') if (not inplace): tensor = tensor.clone() mean = torch.as_tensor(mean, dtype=torch.float32, device=tensor.device) std = torch.as_tensor(std, dtype=torch.float32, device=tensor.device) tensor.sub_(mean[(:, None, None)]).div_(std[(:, None, None)]) return tensor
Normalize a tensor image with mean and standard deviation. .. note:: This transform acts out of place by default, i.e., it does not mutates the input tensor. See :class:`~torchvision.transforms.Normalize` for more details. Args: tensor (Tensor): Tensor image of size (C, H, W) to be normalized. mean (sequence): Sequence of means for each channel. std (sequence): Sequence of standard deviations for each channel. Returns: Tensor: Normalized Tensor image.
codesearchnet
def _parse_shape(self, space): if isinstance(space, gym.spaces.Discrete): return () if isinstance(space, gym.spaces.Box): return space.shape raise NotImplementedError()
Get a tensor shape from a OpenAI Gym space. Args: space: Gym space. Raises: NotImplementedError: For spaces other than Box and Discrete. Returns: Shape tuple.
juraj-google-style
def _handle_client_error(): try: (yield) except _ClientError as exception: error = exception.response['Error'] if (error['Code'] in _ERROR_CODES): raise _ERROR_CODES[error['Code']](error['Message']) raise
Handle boto exception and convert to class IO exceptions Raises: OSError subclasses: IO error.
codesearchnet
def play(env, transpose=True, fps=30, nop_=0): assert isinstance(env.observation_space, gym.spaces.box.Box) obs_s = env.observation_space is_bw = len(obs_s.shape) == 2 is_rgb = len(obs_s.shape) == 3 and obs_s.shape[2] in [1, 3] assert is_bw or is_rgb if hasattr(env, 'get_keys_to_action'): keys_to_action = env.get_keys_to_action() elif hasattr(env.unwrapped, 'get_keys_to_action'): keys_to_action = env.unwrapped.get_keys_to_action() else: raise ValueError('env has no get_keys_to_action method') relevant_keys = set(sum(map(list, keys_to_action.keys()), [])) video_size = env.observation_space.shape[0], env.observation_space.shape[1] if transpose: video_size = tuple(reversed(video_size)) pressed_keys = [] running = True env_done = True flags = pygame.RESIZABLE | pygame.HWSURFACE | pygame.DOUBLEBUF screen = pygame.display.set_mode(video_size, flags) pygame.event.set_blocked(pygame.MOUSEMOTION) if env.spec is not None: pygame.display.set_caption(env.spec.id) else: pygame.display.set_caption('nes-py') clock = pygame.time.Clock() while running: if env_done: env_done = False obs = env.reset() else: action = keys_to_action.get(tuple(sorted(pressed_keys)), nop_) obs, rew, env_done, info = env.step(action) if obs is not None: if len(obs.shape) == 2: obs = obs[:, :, None] if obs.shape[2] == 1: obs = obs.repeat(3, axis=2) display_arr(screen, obs, video_size, transpose) for event in pygame.event.get(): if event.type == pygame.KEYDOWN: if event.key in relevant_keys: pressed_keys.append(event.key) elif event.key == 27: running = False elif event.key == ord('e'): env.unwrapped._backup() elif event.key == ord('r'): env.unwrapped._restore() elif event.type == pygame.KEYUP: if event.key in relevant_keys: pressed_keys.remove(event.key) elif event.type == pygame.QUIT: running = False pygame.display.flip() clock.tick(fps) pygame.quit()
Play the game using the keyboard as a human. Args: env (gym.Env): the environment to use for playing transpose (bool): whether to transpose frame before viewing them fps (int): number of steps of the environment to execute every second nop_ (any): the object to use as a null op action for the environment Returns: None
juraj-google-style
def coordinate_filter(self, query, mongo_query): LOG.debug('Adding genomic coordinates to the query') chromosome = query['chrom'] mongo_query['chromosome'] = chromosome if (query.get('start') and query.get('end')): mongo_query['position'] = {'$lte': int(query['end'])} mongo_query['end'] = {'$gte': int(query['start'])} return mongo_query
Adds genomic coordinated-related filters to the query object Args: query(dict): a dictionary of query filters specified by the users mongo_query(dict): the query that is going to be submitted to the database Returns: mongo_query(dict): returned object contains coordinate filters
codesearchnet
def enable_plugin(self, name, timeout=0): url = self._url('/plugins/{0}/enable', name) params = {'timeout': timeout} res = self._post(url, params=params) self._raise_for_status(res) return True
Enable an installed plugin. Args: name (string): The name of the plugin. The ``:latest`` tag is optional, and is the default if omitted. timeout (int): Operation timeout (in seconds). Default: 0 Returns: ``True`` if successful
codesearchnet
def PopAttributeContainer(self): try: serialized_data = self._list.pop(0) self.data_size -= len(serialized_data) return serialized_data except IndexError: return None
Pops a serialized attribute container from the list. Returns: bytes: serialized attribute container data.
codesearchnet
def files_from_list(*paths): ret = [] for path in paths: if isfile(path): ret.append(abspath(path)) elif isdir(path): ret += [f for f in ls(path, abspaths=True, recursive=True) if isfile(f)] else: raise File404(path) return ret
Return a list of all file paths from a list of files or directories. For each path in the input: if it is a file, return it; if it is a directory, return a list of files in the directory. Arguments: paths (list of str): List of file and directory paths. Returns: list of str: Absolute file paths. Raises: File404: If any of the paths do not exist.
codesearchnet
def inflate_plugin(self, identifier, definition=None, cls=None): cls = self.get_plugin(identifier, cls) return cls(**(definition or {}))
Inflate a plugin thanks to it's identifier, definition and class. Args: identifier (str): the plugin identifier. definition (dict): the kwargs to instantiate the plugin with. cls (str): "provider", "checker", or None. Returns: Provider/Checker: instance of plugin.
codesearchnet
def _CreateStopsFolder(self, schedule, doc): if (not schedule.GetStopList()): return None stop_folder = self._CreateFolder(doc, 'Stops') stop_folder_selection = self._StopFolderSelectionMethod(stop_folder) stop_style_selection = self._StopStyleSelectionMethod(doc) stops = list(schedule.GetStopList()) stops.sort(key=(lambda x: x.stop_name)) for stop in stops: (folder, pathway_folder) = stop_folder_selection(stop) (style_id, pathway_style_id) = stop_style_selection(stop) self._CreateStopPlacemark(folder, stop, style_id) if (self.show_stop_hierarchy and (stop.location_type != transitfeed.Stop.LOCATION_TYPE_STATION) and stop.parent_station and (stop.parent_station in schedule.stops)): placemark = self._CreatePlacemark(pathway_folder, stop.stop_name, pathway_style_id) parent_station = schedule.stops[stop.parent_station] coordinates = [(stop.stop_lon, stop.stop_lat), (parent_station.stop_lon, parent_station.stop_lat)] self._CreateLineString(placemark, coordinates) return stop_folder
Create a KML Folder containing placemarks for each stop in the schedule. If there are no stops in the schedule then no folder is created. Args: schedule: The transitfeed.Schedule instance. doc: The KML Document ElementTree.Element instance. Returns: The Folder ElementTree.Element instance or None if there are no stops.
codesearchnet
def get_by_contract(self, contract_hash): hash = contract_hash if isinstance(contract_hash, str) and len(contract_hash) == 40: hash = UInt160.ParseString(contract_hash) if not isinstance(hash, UInt160): raise Exception("Incorrect address format") contractlist_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_CONTRACT).snapshot() results = [] for val in contractlist_snapshot.iterator(prefix=bytes(hash.Data), include_key=False): if len(val) > 4: try: event = SmartContractEvent.FromByteArray(val) results.append(event) except Exception as e: logger.error("could not parse event: %s %s" % (e, val)) return results
Look up a set of notifications by the contract they are associated with Args: contract_hash (UInt160 or str): hash of contract for notifications to be retreived Returns: list: a list of notifications
juraj-google-style
def VerifyStructure(self, parser_mediator, line): self._last_month = 0 self._year_use = parser_mediator.GetEstimatedYear() key = 'header' try: structure = self._MAC_WIFI_HEADER.parseString(line) except pyparsing.ParseException: structure = None if (not structure): key = 'turned_over_header' try: structure = self._MAC_WIFI_TURNED_OVER_HEADER.parseString(line) except pyparsing.ParseException: structure = None if (not structure): logger.debug('Not a Mac Wifi log file') return False time_elements_tuple = self._GetTimeElementsTuple(key, structure) try: dfdatetime_time_elements.TimeElementsInMilliseconds(time_elements_tuple=time_elements_tuple) except ValueError: logger.debug('Not a Mac Wifi log file, invalid date and time: {0!s}'.format(structure.date_time)) return False self._last_month = time_elements_tuple[1] return True
Verify that this file is a Mac Wifi log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not.
codesearchnet
def non_trainable_weights(self): if self.trainable: children_weights = self._gather_children_attribute('non_trainable_variables') non_trainable_weights = self._non_trainable_weights + children_weights else: children_weights = self._gather_children_attribute('variables') non_trainable_weights = self._trainable_weights + self._non_trainable_weights + children_weights return self._dedup_weights(non_trainable_weights)
List of all non-trainable weights tracked by this layer. Non-trainable weights are *not* updated during training. They are expected to be updated manually in `call()`. Returns: A list of non-trainable variables.
github-repos
def defer(target, args=None, kwargs=None, callback=None): obj = _defer(target, args, kwargs, callback) obj.finished.connect(lambda: _defer_cleanup(obj)) obj.start() _defer_threads.append(obj) return obj
Perform operation in thread with callback Instances are cached until finished, at which point they are garbage collected. If we didn't do this, Python would step in and garbage collect the thread before having had time to finish, resulting in an exception. Arguments: target (callable): Method or function to call callback (callable, optional): Method or function to call once `target` has finished. Returns: None
juraj-google-style
def parse(self, values): type_map = {} for (name, t) in self._hparam_types.items(): (param_type, _) = t type_map[name] = param_type values_map = parse_values(values, type_map) return self.override_from_dict(values_map)
Override existing hyperparameter values, parsing new values from a string. See parse_values for more detail on the allowed format for values. Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. Returns: The `HParams` instance. Raises: ValueError: If `values` cannot be parsed or a hyperparameter in `values` doesn't exist.
codesearchnet
def add_member_to_list(self, username, listname, member_type="USER"): return self.client.service.addMemberToList( listname, username, member_type, self.proxy_id )
Add a member to an existing list. Args: username (str): The username of the user to add listname (str): The name of the list to add the user to member_type (str): Normally, this should be "USER". If you are adding a list as a member of another list, set this to "LIST", instead.
juraj-google-style
def get_enabled_features(self, user_id, attributes=None): enabled_features = [] if (not self.is_valid): self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_enabled_features')) return enabled_features if (not isinstance(user_id, string_types)): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id')) return enabled_features if (not self._validate_user_inputs(attributes)): return enabled_features for feature in self.config.feature_key_map.values(): if self.is_feature_enabled(feature.key, user_id, attributes): enabled_features.append(feature.key) return enabled_features
Returns the list of features that are enabled for the user. Args: user_id: ID for user. attributes: Dict representing user attributes. Returns: A list of the keys of the features that are enabled for the user.
codesearchnet
def Parse(self, conditions, host_data): processed = [] probes = self.triggers.Calls(conditions) for p in probes: artifact_data = host_data.get(p.artifact) if not p.result_context: rdf_data = artifact_data["PARSER"] else: rdf_data = artifact_data.get(str(p.result_context)) try: result = p.Parse(rdf_data) except ProcessingError as e: raise ProcessingError("Bad artifact %s: %s" % (p.artifact, e)) if result: processed.append(result) return self.matcher.Detect(probes, processed)
Runs probes that evaluate whether collected data has an issue. Args: conditions: The trigger conditions. host_data: A map of artifacts and rdf data. Returns: Anomalies if an issue exists.
juraj-google-style
def fetch(self, pageNum, itemsPerPage): return self.get_all_alerts(self.status, pageNum, itemsPerPage)
Intermediate fetching Args: pageNum (int): Page number itemsPerPage (int): Number of Users per Page Returns: dict: Response payload
juraj-google-style
def parse_changes(json): changes = [] dates = len(json) for date in range(1, dates): last_close = json[date - 1]['close'] now_close = json[date]['close'] changes.append(now_close - last_close) logger.debug('Market Changes (from JSON):\n{0}'.format(changes)) return changes
Gets price changes from JSON Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. Returns: List of floats of price changes between entries in JSON.
juraj-google-style
def reset(self, name): message_type = type(self) try: field = message_type.field_by_name(name) except KeyError: if (name not in message_type.__by_name): raise AttributeError(('Message %s has no field %s' % (message_type.__name__, name))) if field.repeated: self.__tags[field.number] = FieldList(field, []) else: self.__tags.pop(field.number, None)
Reset assigned value for field. Resetting a field will return it to its default value or None. Args: name: Name of field to reset.
codesearchnet
def get_entity_group_version(key): eg = EntityGroup.key_for_entity_group(key).get() if eg: return eg.version else: return None
Return the version of the entity group containing key. Args: key: a key for an entity group whose __entity_group__ key you want. Returns: The version of the entity group containing key. This version is guaranteed to increase on every change to the entity group. The version may increase even in the absence of user-visible changes to the entity group. May return None if the entity group was never written to. On non-HR datatores, this function returns None.
juraj-google-style
def lf_polarities(L): polarities = [sorted(list(set(L[(:, i)].data))) for i in range(L.shape[1])] return [(p[0] if (len(p) == 1) else p) for p in polarities]
Return the polarities of each LF based on evidence in a label matrix. Args: L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the jth LF to the ith candidate
codesearchnet
def assert_raises(ex_type, func, *args, **kwargs): try: func(*args, **kwargs) except Exception as ex: assert isinstance(ex, ex_type), ('Raised %r but type should have been %r' % (ex, ex_type)) return True else: raise AssertionError('No error was raised')
r""" Checks that a function raises an error when given specific arguments. Args: ex_type (Exception): exception type func (callable): live python function CommandLine: python -m utool.util_assert assert_raises --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_assert import * # NOQA >>> import utool as ut >>> ex_type = AssertionError >>> func = len >>> # Check that this raises an error when something else does not >>> assert_raises(ex_type, assert_raises, ex_type, func, []) >>> # Check this does not raise an error when something else does >>> assert_raises(ValueError, [].index, 0)
codesearchnet
def _expand_to_beam_size(tensor, beam_size): tensor = tf.expand_dims(tensor, axis=1) tile_dims = ([1] * tensor.shape.ndims) tile_dims[1] = beam_size return tf.tile(tensor, tile_dims)
Tiles a given tensor by beam_size. Args: tensor: tensor to tile [batch_size, ...] beam_size: How much to tile the tensor by. Returns: Tiled tensor [batch_size, beam_size, ...]
codesearchnet
def create_audit_student_enrollment(self, course_id): audit_enrollment = { "mode": "audit", "course_details": {"course_id": course_id} } resp = self.requester.post( urljoin(self.base_url, self.enrollment_url), json=audit_enrollment ) resp.raise_for_status() return Enrollment(resp.json())
Creates an audit enrollment for the user in a given course Args: course_id (str): an edX course id Returns: Enrollment: object representing the student enrollment in the provided course
juraj-google-style
def add(self, *l): for a in flatten(l): self._add([self.Inner(a)], self.l)
add inner to outer Args: *l: element that is passed into Inner init
juraj-google-style
def eval_math_expression(expression: str) -> Optional[Union[float, int]]: try: return eval_node(ast.parse(expression, mode='eval').body) except TypeError: return
Evaluate (safely) a mathematial expression and returns its value. Args: expression (`str`): The expression to evaluate. Returns: `Optional[Union[float, int]]`: Returns `None` if the evaluation fails in any way and the value computed otherwise. Example: ```py >>> eval_expr('2^6') 4 >>> eval_expr('2**6') 64 >>> eval_expr('1 + 2*3**(4^5) / (6 + -7)') -5.0 ```
github-repos
def fts_match_any(self, fts, inv): return any([self.fts_match(fts, s) for s in inv])
Return `True` if any segment in `inv` matches the features in `fts` Args: fts (list): a collection of (value, feature) tuples inv (list): a collection of IPA segments represented as Unicode strings Returns: bool: `True` if any segment in `inv` matches the features in `fts`
juraj-google-style
def main(): logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s') try: cli() return 0 except LocationsError as error: print(error) return 2 except RuntimeError as error: print(error) return 255 except OSError as error: return error.errno
Main script handler. Returns: int: 0 for success, >1 error code
codesearchnet
def map_placement_transcode_configs(self, placement_feed, transcode_configs_feed, pricing_schedule_feed): for placement in placement_feed: placement['pricing_schedule'] = [] for pricing_schedule in pricing_schedule_feed: if placement.get(FieldMap.PLACEMENT_ID, '') == pricing_schedule.get(FieldMap.PLACEMENT_ID, None): placement['pricing_schedule'].append(pricing_schedule) transcode_id = placement.get(FieldMap.TRANSCODE_ID, '') placement['transcode_config'] = [] if transcode_id: for transcode_config in transcode_configs_feed: if transcode_id == transcode_config.get(FieldMap.TRANSCODE_ID, None): placement['transcode_config'].append(transcode_config)
Maps sub feeds with the parent feed based on placement id. Args: placement_feed: Bulkdozer feed representing the placements configurations. trascode_configs_feed: Bulkdozer feed representing the transcode configs. pricing_schedule_feed: Bulkdozer feed representing the pricing schedules.
github-repos
def CreateCampaign(client, merchant_id, budget_id): campaign_service = client.GetService('CampaignService', 'v201809') campaign = { 'name': 'Shopping campaign 'advertisingChannelType': 'DISPLAY', 'status': 'PAUSED', 'budget': { 'budgetId': budget_id }, 'biddingStrategyConfiguration': { 'biddingStrategyType': 'MANUAL_CPC' }, 'settings': [{ 'xsi_type': 'ShoppingSetting', 'campaignPriority': 0, 'merchantId': merchant_id, 'salesCountry': 'ZZ', 'enableLocal': True, }] } operations = [{ 'operator': 'ADD', 'operand': campaign }] return campaign_service.mutate(operations)['value'][0]
Creates a new Display Network campaign. Args: client: an AdWordsClient instance. merchant_id: a int merchant center ID. budget_id: a int budget ID. Returns: The campaign that was successfully created.
juraj-google-style
def version(self): if (self._server_version is None): try: data = self.http_get('/version') self._server_version = data['version'] self._server_revision = data['revision'] except Exception: self._server_version = self._server_revision = 'unknown' return (self._server_version, self._server_revision)
Returns the version and revision of the gitlab server. Note that self.version and self.revision will be set on the gitlab object. Returns: tuple (str, str): The server version and server revision. ('unknown', 'unknwown') if the server doesn't perform as expected.
codesearchnet
def deploy_raiden_contracts( self, max_num_of_token_networks: Optional[int], ) -> DeployedContracts: deployed_contracts: DeployedContracts = { 'contracts_version': self.contract_version_string(), 'chain_id': int(self.web3.version.network), 'contracts': {}, } self._deploy_and_remember(CONTRACT_ENDPOINT_REGISTRY, [], deployed_contracts) secret_registry = self._deploy_and_remember( contract_name=CONTRACT_SECRET_REGISTRY, arguments=[], deployed_contracts=deployed_contracts, ) token_network_registry_args = [ secret_registry.address, deployed_contracts['chain_id'], DEPLOY_SETTLE_TIMEOUT_MIN, DEPLOY_SETTLE_TIMEOUT_MAX, ] if max_num_of_token_networks: token_network_registry_args.append(max_num_of_token_networks) self._deploy_and_remember( contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY, arguments=token_network_registry_args, deployed_contracts=deployed_contracts, ) return deployed_contracts
Deploy all required raiden contracts and return a dict of contract_name:address Args: max_num_of_token_networks (Optional[int]): The max number of tokens that can be registered to the TokenNetworkRegistry. If None, the argument is omitted from the call to the constructor of TokenNetworkRegistry.
juraj-google-style
def write(self, dataset): if not isinstance(dataset, data_types.DatasetV2): raise TypeError(f'Invalid `dataset.` Expected a `tf.data.Dataset` object but got {type(dataset)}.') if not dataset_ops.get_structure(dataset).is_compatible_with(tensor_spec.TensorSpec([], dtypes.string)): raise TypeError(f'Invalid `dataset`. Expected a`dataset` that produces scalar `tf.string` elements, but got a dataset which produces elements with shapes {dataset_ops.get_legacy_output_shapes(dataset)} and types {dataset_ops.get_legacy_output_types(dataset)}.') dataset = dataset._apply_debug_options() return gen_experimental_dataset_ops.dataset_to_tf_record(dataset._variant_tensor, self._filename, self._compression_type)
Writes a dataset to a TFRecord file. An operation that writes the content of the specified dataset to the file specified in the constructor. If the file exists, it will be overwritten. Args: dataset: a `tf.data.Dataset` whose elements are to be written to a file Returns: In graph mode, this returns an operation which when executed performs the write. In eager mode, the write is performed by the method itself and there is no return value. Raises TypeError: if `dataset` is not a `tf.data.Dataset`. TypeError: if the elements produced by the dataset are not scalar strings.
github-repos
def parse_mmtf_header(infile): infodict = {} mmtf_decoder = mmtf.parse(infile) infodict['date'] = mmtf_decoder.deposition_date infodict['release_date'] = mmtf_decoder.release_date try: infodict['experimental_method'] = [x.decode() for x in mmtf_decoder.experimental_methods] except AttributeError: infodict['experimental_method'] = [x for x in mmtf_decoder.experimental_methods] infodict['resolution'] = mmtf_decoder.resolution infodict['description'] = mmtf_decoder.title group_name_exclude = ['HOH'] chem_comp_type_exclude = ['l-peptide linking', 'peptide linking'] chemicals = list(set([mmtf_decoder.group_list[idx]['groupName'] for idx in mmtf_decoder.group_type_list if mmtf_decoder.group_list[idx]['chemCompType'].lower() not in chem_comp_type_exclude and mmtf_decoder.group_list[idx]['groupName'] not in group_name_exclude])) infodict['chemicals'] = chemicals return infodict
Parse an MMTF file and return basic header-like information. Args: infile (str): Path to MMTF file Returns: dict: Dictionary of parsed header Todo: - Can this be sped up by not parsing the 3D coordinate info somehow? - OR just store the sequences when this happens since it is already being parsed.
juraj-google-style
def add_path_argument(cls, group, argname, dest=None, help_=None): prefixed = ('%s-%s' % (cls.argument_prefix, argname)) if (dest is None): dest = prefixed.replace('-', '_') final_dest = dest[(len(cls.argument_prefix) + 1):] else: final_dest = dest dest = ('%s_%s' % (cls.argument_prefix, dest)) group.add_argument(('--%s' % prefixed), action='store', dest=dest, help=help_) cls.path_arguments[dest] = final_dest
Subclasses may call this to expose a path argument. Args: group: arparse.ArgumentGroup, the extension argument group argname: str, the name of the argument, will be namespaced. dest: str, similar to the `dest` argument of `argparse.ArgumentParser.add_argument`, will be namespaced. help_: str, similar to the `help` argument of `argparse.ArgumentParser.add_argument`.
codesearchnet
def _serialize_container_factory(suffix, container_map): def serialize(ion_event): if not ion_event.ion_type.is_container: raise TypeError('Expected container type') return container_map[ion_event.ion_type] serialize.__name__ = '_serialize_container_' + suffix return serialize
Returns a function that serializes container start/end. Args: suffix (str): The suffix to name the function with. container_map (Dictionary[core.IonType, bytes]): The Returns: function: The closure for serialization.
juraj-google-style
def get(self, po): name = po.name typ = po.typ default = po.default handler = getattr(self, '_get_{}'.format(typ), None) if handler is None: raise ValueError(typ) self.seen.add(name) if not self.parser.has_option(self.section, name): if default is REQUIRED: raise NameError(self.section, name) if isinstance(default, INHERIT_GLOBAL): return handler('global', name, default.default) return handler(self.section, name, default)
Lookup value for a PluginOption instance Args: po: PluginOption Returns: converted value
juraj-google-style
def transform(self, sents): def convert(tokens): return torch.tensor([self.vocab.stoi[t] for t in tokens], dtype=torch.long) if (self.vocab is None): raise Exception('Must run .fit() for .fit_transform() before calling .transform().') seqs = sorted([convert(s) for s in sents], key=(lambda x: (- len(x)))) X = torch.LongTensor(pad_sequence(seqs, batch_first=True)) return X
Converts lists of tokens into a Tensor of embedding indices. Args: sents: A list of lists of tokens (representing sentences) NOTE: These sentences should already be marked using the mark_entities() helper. Returns: X: A Tensor of shape (num_items, max_seq_len)
codesearchnet
def compute(self, x, yerr): K = self.kernel.get_value(x) K[np.diag_indices_from(K)] += yerr ** 2 self._factor = (cholesky(K, overwrite_a=True, lower=False), False) self.log_determinant = 2 * np.sum(np.log(np.diag(self._factor[0]))) self.computed = True
Compute and factorize the covariance matrix. Args: x (ndarray[nsamples, ndim]): The independent coordinates of the data points. yerr (ndarray[nsamples] or float): The Gaussian uncertainties on the data points at coordinates ``x``. These values will be added in quadrature to the diagonal of the covariance matrix.
juraj-google-style
def _act(self, utterance: str) -> list: if self.stateful: utterance = [[utterance], [self.key]] else: utterance = [[utterance]] agent_response: list = self.agent(*utterance) return agent_response
Infers DeepPavlov agent with raw user input extracted from Alexa request. Args: utterance: Raw user input extracted from Alexa request. Returns: response: DeepPavlov agent response.
codesearchnet
def _run_function_for_calibration_graph_mode(sess: session.Session, signature_def: meta_graph_pb2.SignatureDef, representative_dataset: rd.RepresentativeDataset) -> None: output_tensor_names = [output_tensor_info.name for output_tensor_info in signature_def.outputs.values()] sample_validator = _create_sample_validator(expected_input_keys=signature_def.inputs.keys()) for sample in map(sample_validator, _log_sample_num_for_calibration(representative_dataset)): feed_dict = rd.create_feed_dict_from_input_data(sample, signature_def) sess.run(output_tensor_names, feed_dict=feed_dict)
Runs the representative dataset through a function for calibration. NOTE: This is intended to be run in graph mode (TF1). The function is identified by the SignatureDef. Args: sess: The Session object to run the function in. signature_def: A SignatureDef that identifies a function by specifying the inputs and outputs. representative_dataset: The representative dataset to run through the function.
github-repos
def log_deprecated(name='', text='', eos=''): assert (name or text) if eos: eos = ('after ' + datetime(*map(int, eos.split('-'))).strftime('%d %b')) if name: if eos: warn_msg = ('%s will be deprecated %s. %s' % (name, eos, text)) else: warn_msg = ('%s was deprecated. %s' % (name, text)) else: warn_msg = text if eos: warn_msg += (' Legacy period ends %s' % eos) logger.warn(('[Deprecated] ' + warn_msg))
Log deprecation warning. Args: name (str): name of the deprecated item. text (str, optional): information about the deprecation. eos (str, optional): end of service date such as "YYYY-MM-DD".
codesearchnet
def clear(self, name=None): if name is None: name = '%s_clear' % self._name return self._clear_fn(shared_name=self._name, name=name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)
Clears the staging area. Args: name: A name for the operation (optional) Returns: The created op
github-repos
def dimension_values(self, dimension, expanded=True, flat=True): index = self.get_dimension_index(dimension) if index in [0, 1]: return np.array([point[index] for point in self.data[0]]) else: return super(Spline, self).dimension_values(dimension)
Return the values along the requested dimension. Args: dimension: The dimension to return values for expanded (bool, optional): Whether to expand values flat (bool, optional): Whether to flatten array Returns: NumPy array of values along the requested dimension
juraj-google-style
def CheckOperatorSpacing(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] while True: match = Match('^(.*\\boperator\\b)(\\S+)(\\s*\\(.*)$', line) if match: line = ((match.group(1) + ('_' * len(match.group(2)))) + match.group(3)) else: break if ((Search('[\\w.]=', line) or Search('=[\\w.]', line)) and (not Search('\\b(if|while|for) ', line)) and (not Search('(>=|<=|==|!=|&=|\\^=|\\|=|\\+=|\\*=|\\/=|\\%=)', line)) and (not Search('operator=', line))): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') match = Search('[^<>=!\\s](==|!=|<=|>=|\\|\\|)[^<>=!\\s,;\\)]', line) if match: error(filename, linenum, 'whitespace/operators', 3, ('Missing spaces around %s' % match.group(1))) elif (not Match(' match = Match('^(.*[^\\s<])<[^\\s=<,]', line) if match: (_, _, end_pos) = CloseExpression(clean_lines, linenum, len(match.group(1))) if (end_pos <= (- 1)): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') match = Match('^(.*[^-\\s>])>[^\\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression(clean_lines, linenum, len(match.group(1))) if (start_pos <= (- 1)): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') match = Search('(operator|[^\\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\\s,=<])', line) if (match and (not (match.group(1).isdigit() and match.group(2).isdigit())) and (not ((match.group(1) == 'operator') and (match.group(2) == ';')))): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') match = Search('>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') match = Search('(!\\s|~\\s|[\\s]--[\\s;]|[\\s]\\+\\+[\\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, ('Extra space for operator %s' % match.group(1)))
Checks for horizontal spacing around operators. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
codesearchnet
def resume_training(self, train_data, model_path, valid_data=None): restore_state = self.checkpointer.restore(model_path) loss_fn = self._get_loss_fn() self.train() self._train_model(train_data=train_data, loss_fn=loss_fn, valid_data=valid_data, restore_state=restore_state)
This model resume training of a classifier by reloading the appropriate state_dicts for each model Args: train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the train split model_path: the path to the saved checpoint for resuming training valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the dev split
codesearchnet
def transform(self, transform, desc=None): if (desc is None): desc = u'transform({})'.format(getattr(transform, '__name__', '')) return self.replace(transforms=(self.transforms + [transform]), desc_stack=(self.desc_stack + [desc]))
Create a copy of this query, transformed by `transform`. Args: transform (callable): Callable that takes an iterable of values and returns an iterable of transformed values. Keyword Args: desc (str): A description of the transform, to use in log messages. Defaults to the name of the `transform` function. Returns: Query
codesearchnet
def resize(self, image: 'torch.Tensor', size: SizeDict, interpolation: 'F.InterpolationMode'=None, antialias: bool=True, **kwargs) -> 'torch.Tensor': interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR if size.shortest_edge and size.longest_edge: new_size = get_size_with_aspect_ratio(image.size()[-2:], size.shortest_edge, size.longest_edge) elif size.shortest_edge: new_size = get_resize_output_image_size(image, size=size.shortest_edge, default_to_square=False, input_data_format=ChannelDimension.FIRST) elif size.max_height and size.max_width: new_size = get_image_size_for_max_height_width(image.size()[-2:], size.max_height, size.max_width) elif size.height and size.width: new_size = (size.height, size.width) else: raise ValueError(f"Size must contain 'height' and 'width' keys, or 'max_height' and 'max_width', or 'shortest_edge' key. Got {size}.") return F.resize(image, new_size, interpolation=interpolation, antialias=antialias)
Resize an image to `(size["height"], size["width"])`. Args: image (`torch.Tensor`): Image to resize. size (`SizeDict`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): `InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`. Returns: `torch.Tensor`: The resized image.
github-repos
def _maybe_add_default_serving_output(export_outputs): if len(export_outputs) == 1: (key, value), = export_outputs.items() if key != signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_outputs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = value if len(export_outputs) > 1: if signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in export_outputs: raise ValueError('Multiple `export_outputs` were provided, but none of them are specified as the default. Use`tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY` to specify a default.') return export_outputs
Add a default serving output to the export_outputs if not present. Args: export_outputs: Describes the output signatures to be exported to `SavedModel` and used during serving. Should be a dict. Returns: export_outputs dict with default serving signature added if necessary Raises: ValueError: if multiple export_outputs were provided without a default serving key.
github-repos
def register_hook(self, hook, priority='NORMAL'): assert isinstance(hook, Hook) if hasattr(hook, 'priority'): raise ValueError('"priority" is a reserved attribute for hooks') priority = get_priority(priority) hook.priority = priority inserted = False for i in range((len(self._hooks) - 1), (- 1), (- 1)): if (priority >= self._hooks[i].priority): self._hooks.insert((i + 1), hook) inserted = True break if (not inserted): self._hooks.insert(0, hook)
Register a hook into the hook list. Args: hook (:obj:`Hook`): The hook to be registered. priority (int or str or :obj:`Priority`): Hook priority. Lower value means higher priority.
codesearchnet
def EnterClassType(self, node): nodes = [node] seen = set() while nodes: cur_node = nodes.pop(0) if cur_node in seen: continue seen.add(cur_node) for prefix, cls in self._Lookup(cur_node): if isinstance(cls, pytd.Alias) and isinstance(cls.type, pytd.NothingType): continue if isinstance(cls, pytd.Alias) and isinstance(cls.type, pytd.ClassType): if cls.type.cls: cls = cls.type.cls else: nodes.append(cls.type) if isinstance(cls, pytd.Class): node.cls = cls return else: logging.warning("Couldn't resolve %s: Not a class: %s", prefix + node.name, type(cls))
Fills in a class type. Args: node: A ClassType. This node will have a name, which we use for lookup. Returns: The same ClassType. We will have done our best to fill in its "cls" attribute. Call VerifyLookup() on your tree if you want to be sure that all of the cls pointers have been filled in.
github-repos
def enforce_epsilon_and_compute_hash(dataset_batch_dir, adv_dir, output_dir, epsilon): dataset_images = [f for f in os.listdir(dataset_batch_dir) if f.endswith('.png')] image_hashes = {} resize_warning = False for img_name in dataset_images: if not os.path.exists(os.path.join(adv_dir, img_name)): logging.warning('Image %s not found in the output', img_name) continue image = np.array( Image.open(os.path.join(dataset_batch_dir, img_name)).convert('RGB')) image = image.astype('int32') image_max_clip = np.clip(image + epsilon, 0, 255).astype('uint8') image_min_clip = np.clip(image - epsilon, 0, 255).astype('uint8') adv_image = Image.open(os.path.join(adv_dir, img_name)).convert('RGB') if adv_image.size[::-1] != image.shape[:2]: resize_warning = True adv_image = adv_image.resize((image.shape[1], image.shape[0]), Image.BICUBIC) adv_image = np.array(adv_image) clipped_adv_image = np.clip(adv_image, image_min_clip, image_max_clip) Image.fromarray(clipped_adv_image).save(os.path.join(output_dir, img_name)) image_hashes[img_name[:-4]] = hashlib.sha1( clipped_adv_image.view(np.uint8)).hexdigest() if resize_warning: logging.warning('One or more adversarial images had incorrect size') return image_hashes
Enforces size of perturbation on images, and compute hashes for all images. Args: dataset_batch_dir: directory with the images of specific dataset batch adv_dir: directory with generated adversarial images output_dir: directory where to copy result epsilon: size of perturbation Returns: dictionary with mapping form image ID to hash.
juraj-google-style
def _decorator(func): opname = func.__name__ cap_sym_name = sym_name.capitalize() func.__doc__ = '\n Assert the condition `x {sym}` holds element-wise.\n\n When running in graph mode, you should add a dependency on this operation\n to ensure that it runs. Example of adding a dependency to an operation:\n\n ```python\n with tf.control_dependencies([tf.debugging.{opname}(x, y)]):\n output = tf.reduce_sum(x)\n ```\n\n {sym_name} means, for every element `x[i]` of `x`, we have `x[i] {sym}`.\n If `x` is empty this is trivially satisfied.\n\n Args:\n x: Numeric `Tensor`.\n data: The tensors to print out if the condition is False. Defaults to\n error message and first few entries of `x`.\n summarize: Print this many entries of each tensor.\n message: A string to prefix to the default message.\n name: A name for this operation (optional). Defaults to "{opname}".\n\n Returns:\n Op that raises `InvalidArgumentError` if `x {sym}` is False.\n @compatibility(eager)\n returns None\n @end_compatibility\n\n Raises:\n InvalidArgumentError: if the check can be performed immediately and\n `x {sym}` is False. The check can be performed immediately during\n eager execution or if `x` is statically known.\n '.format(sym=sym, sym_name=cap_sym_name, opname=opname) return func
Generated decorator that adds the appropriate docstring to the function for symbol `sym`. Args: func: Function for a TensorFlow op Returns: Version of `func` with documentation attached.
github-repos
def build_image(image_path, image_name, build_args=None, dockerfile_path=None): cmd = ['docker', 'build', '-t', image_name, image_path] if dockerfile_path: cmd.extend(['-f', dockerfile_path]) for k, v in (build_args or {}).items(): cmd += ['--build-arg', '{}={}'.format(k, v)] check_call(cmd)
Build an image Args: image_path (str): the path to the image directory image_name (str): image 'name:tag' to build build_args (dict, optional): dict of docker build arguments dockerfile_path (str, optional): path to dockerfile relative to image_path if not `image_path/Dockerfile`.
juraj-google-style
def brightness(x, severity=1): c = [0.1, 0.2, 0.3, 0.4, 0.5][(severity - 1)] x = (np.array(x) / 255.0) x = tfds.core.lazy_imports.skimage.color.rgb2hsv(x) x[(:, :, 2)] = np.clip((x[(:, :, 2)] + c), 0, 1) x = tfds.core.lazy_imports.skimage.color.hsv2rgb(x) x_clip = (np.clip(x, 0, 1) * 255) return around_and_astype(x_clip)
Change brightness of images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Changed brightness.
codesearchnet
def get_njobs_in_queue(self, username=None): if username is None: username = getpass.getuser() njobs, process = self._get_njobs_in_queue(username=username) if process is not None and process.returncode != 0: err_msg = ('Error trying to get the number of jobs in the queue' + 'The error response reads:\n {}'.format(process.stderr.read())) logger.critical(err_msg) if not isinstance(self, ShellAdapter): logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs
returns the number of jobs in the queue, probably using subprocess or shutil to call a command like 'qstat'. returns None when the number of jobs cannot be determined. Args: username: (str) the username of the jobs to count (default is to autodetect)
juraj-google-style
def value_to_message(self, value): if (not isinstance(value, self.type)): raise EncodeError(('Expected type %s, got %s: %r' % (self.type.__name__, type(value).__name__, value))) return value
Convert a value instance to a message. Used by serializers to convert Python user types to underlying messages for transmission. Args: value: A value of type self.type. Returns: An instance of type self.message_type.
codesearchnet
def multilayer_fully_connected(images, labels): images = pt.wrap(images) with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001): return (images.flatten().fully_connected(100).fully_connected(100) .softmax_classifier(10, labels))
Creates a multi layer network of fully_connected layers. Each layer is 100 neurons. Please change this to experiment with architectures. Args: images: The input images. labels: The labels as dense one-hot vectors. Returns: A softmax result.
juraj-google-style
def AddSpecification(self, specification): if (specification.identifier in self._format_specifications): raise KeyError('Format specification {0:s} is already defined in store.'.format(specification.identifier)) self._format_specifications[specification.identifier] = specification for signature in specification.signatures: signature_index = len(self._signature_map) signature_identifier = '{0:s}:{1:d}'.format(specification.identifier, signature_index) if (signature_identifier in self._signature_map): raise KeyError('Signature {0:s} is already defined in map.'.format(signature_identifier)) signature.SetIdentifier(signature_identifier) self._signature_map[signature_identifier] = specification
Adds a format specification. Args: specification (FormatSpecification): format specification. Raises: KeyError: if the store already contains a specification with the same identifier.
codesearchnet
def delete_attachment(cls, session, attachment): return super(Conversations, cls).delete(session, attachment, endpoint_override=('/attachments/%s.json' % attachment.id), out_type=Attachment)
Delete an attachment. Args: session (requests.sessions.Session): Authenticated session. attachment (helpscout.models.Attachment): The attachment to be deleted. Returns: NoneType: Nothing.
codesearchnet
def frame(self, frame): try: zframe = str(int(frame)).zfill(self._zfill) except ValueError: zframe = frame if (self._zfill == 0): zframe = '' return ''.join((self._dir, self._base, zframe, self._ext))
Return a path go the given frame in the sequence. Integer or string digits are treated as a frame number and padding is applied, all other values are passed though. Examples: >>> seq.frame(1) /foo/bar.0001.exr >>> seq.frame("#") /foo/bar.#.exr Args: frame (int or str): the desired frame number or a char to pass through (ie. #) Returns: str:
codesearchnet
def row_splits(self): return self._row_partition.row_splits()
The row-split indices for this ragged tensor's `values`. `rt.row_splits` specifies where the values for each row begin and end in `rt.values`. In particular, the values for row `rt[i]` are stored in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`. Returns: A 1-D integer `Tensor` with shape `[self.nrows+1]`. The returned tensor is non-empty, and is sorted in ascending order. `self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to `self.values.shape[0]`. #### Example: >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print(rt.row_splits) # indices of row splits in rt.values tf.Tensor([0 4 4 7 8 8], shape=(6,), dtype=int64)
github-repos
def tokens(self, tokenset='internal'): toks = self.get('tokens', {}).get(tokenset) if toks is not None: if isinstance(toks, stringtypes): toks = YyTokenLattice.from_string(toks) elif isinstance(toks, Sequence): toks = YyTokenLattice.from_list(toks) return toks
Deserialize and return a YyTokenLattice object for the initial or internal token set, if provided, from the YY format or the JSON-formatted data; otherwise return the original string. Args: tokenset (str): return `'initial'` or `'internal'` tokens (default: `'internal'`) Returns: :class:`YyTokenLattice`
juraj-google-style
def _process_update(self, item, feed_item): campaign = self._campaign_dao.get(feed_item, required=True) item['active'] = feed_item.get(FieldMap.AD_ACTIVE, True) if item['active']: self._wait_all_creative_activation(feed_item) self._setup_rotation_strategy(item['creativeRotation'], feed_item) if feed_item['creative_assignment']: item['creativeRotation']['creativeAssignments'] = [] item['placementAssignments'] = [] item['eventTagOverrides'] = [] self._process_assignments(feed_item, item['creativeRotation'].get('creativeAssignments', []), item['placementAssignments'], item['eventTagOverrides'], campaign) if 'deliverySchedule' in item: item['deliverySchedule']['priority'] = feed_item.get(FieldMap.AD_PRIORITY, None) if feed_item.get(FieldMap.AD_HARDCUTOFF, '') != '': if not 'deliverySchedule' in item: item['deliverySchedule'] = {} item['deliverySchedule']['hardCutoff'] = feed_item.get(FieldMap.AD_HARDCUTOFF) item['archived'] = feed_item.get(FieldMap.AD_ARCHIVED, False) if 'T' in feed_item.get(FieldMap.AD_END_DATE, None): item['endTime'] = feed_item.get(FieldMap.AD_END_DATE, None) else: item['endTime'] = StringExtensions.convertDateStrToDateTimeStr(feed_item.get(FieldMap.AD_END_DATE, None), '23:59:59') if 'T' in feed_item.get(FieldMap.AD_START_DATE, None): item['startTime'] = feed_item.get(FieldMap.AD_START_DATE, None) else: item['startTime'] = StringExtensions.convertDateStrToDateTimeStr(feed_item.get(FieldMap.AD_START_DATE, None)) item['name'] = feed_item.get(FieldMap.AD_NAME, None) self._process_landing_page(item, feed_item)
Updates an ad based on the values from the feed. Args: item: Object representing the ad to be updated, this object is updated directly. feed_item: Feed item representing ad values from the Bulkdozer feed.
github-repos
def open(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO): return self._path_open(path, 'rb', mime_type, compression_type)
Returns a read channel for the given file path. Args: path: string path of the file object to be read mime_type: MIME type to specify the type of content in the file object compression_type: Type of compression to be used for this object Returns: file handle with a close function for the user to use
github-repos
def find(self, title): files = backend.iterfiles(self._drive, name=title) try: return next(self[id] for id, _ in files) except StopIteration: raise KeyError(title)
Fetch and return the first spreadsheet with the given title. Args: title(str): title/name of the spreadsheet to return Returns: SpreadSheet: new SpreadSheet instance Raises: KeyError: if no spreadsheet with the given ``title`` is found
juraj-google-style
def _make_patterns(patterns): field_registry = display_fields.FieldRegistry() pattern_list = display_pattern.ScreenPatternList( field_registry=field_registry, ) for pattern in patterns: pattern_list.add(pattern.split('\n')) return pattern_list
Create a ScreenPatternList from a given pattern text. Args: pattern_txt (str list): the patterns Returns: mpdlcd.display_pattern.ScreenPatternList: a list of patterns from the given entries.
juraj-google-style
def launch_batch_workflow(self, batch_workflow): url = ('%(base_url)s/batch_workflows' % {'base_url': self.base_url}) try: r = self.gbdx_connection.post(url, json=batch_workflow) batch_workflow_id = r.json()['batch_workflow_id'] return batch_workflow_id except TypeError as e: self.logger.debug('Batch Workflow not launched, reason: {0}'.format(e))
Launches GBDX batch workflow. Args: batch_workflow (dict): Dictionary specifying batch workflow tasks. Returns: Batch Workflow id (str).
codesearchnet
def replace(table, columns, values): rows = len(values) cells = len(columns) * len(values) return _Mutator(mutation=Mutation(replace=batch._make_write_pb(table, columns, values)), operation=WriteMutation._OPERATION_REPLACE, rows=rows, cells=cells, kwargs={'table': table, 'columns': columns, 'values': values})
Replace one or more table rows. Args: table: Name of the table to be modified. columns: Name of the table columns to be modified. values: Values to be modified.
github-repos
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A SqueezeBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def __init__(self, channel): self.Classify = channel.unary_unary( '/tensorflow.serving.PredictionService/Classify', request_serializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.SerializeToString, response_deserializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse.FromString, ) self.Regress = channel.unary_unary( '/tensorflow.serving.PredictionService/Regress', request_serializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionRequest.SerializeToString, response_deserializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionResponse.FromString, ) self.Predict = channel.unary_unary( '/tensorflow.serving.PredictionService/Predict', request_serializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest.SerializeToString, response_deserializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse.FromString, ) self.GetModelMetadata = channel.unary_unary( '/tensorflow.serving.PredictionService/GetModelMetadata', request_serializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataRequest.SerializeToString, response_deserializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): value = registry_key.GetValueByName('AppCompatCache') if not value: return value_data = value.data value_data_size = len(value.data) format_type = self._CheckSignature(value_data) if not format_type: parser_mediator.ProduceExtractionWarning( 'Unsupported signature in AppCompatCache key: {0:s}'.format( registry_key.path)) return header_object = self._ParseHeader(format_type, value_data) if value_data_size <= header_object.header_size: return cached_entry_offset = header_object.header_size self._cached_entry_data_type_map = self._GetCachedEntryDataTypeMap( format_type, value_data, cached_entry_offset) if not self._cached_entry_data_type_map: raise errors.ParseError('Unable to determine cached entry data type.') parse_cached_entry_function = None if format_type == self._FORMAT_TYPE_XP: parse_cached_entry_function = self._ParseCachedEntryXP elif format_type == self._FORMAT_TYPE_2003: parse_cached_entry_function = self._ParseCachedEntry2003 elif format_type == self._FORMAT_TYPE_VISTA: parse_cached_entry_function = self._ParseCachedEntryVista elif format_type == self._FORMAT_TYPE_7: parse_cached_entry_function = self._ParseCachedEntry7 elif format_type == self._FORMAT_TYPE_8: parse_cached_entry_function = self._ParseCachedEntry8 elif format_type == self._FORMAT_TYPE_10: parse_cached_entry_function = self._ParseCachedEntry10 cached_entry_index = 0 while cached_entry_offset < value_data_size: cached_entry_object = parse_cached_entry_function( value_data, cached_entry_offset) event_data = AppCompatCacheEventData() event_data.entry_index = cached_entry_index + 1 event_data.key_path = registry_key.path event_data.offset = cached_entry_offset event_data.path = cached_entry_object.path if cached_entry_object.last_modification_time is not None: if not cached_entry_object.last_modification_time: date_time = dfdatetime_semantic_time.SemanticTime('Not set') else: date_time = dfdatetime_filetime.Filetime( timestamp=cached_entry_object.last_modification_time) event = time_events.DateTimeValuesEvent( date_time, 'File Last Modification Time') parser_mediator.ProduceEventWithEventData(event, event_data) if cached_entry_object.last_update_time is not None: if not cached_entry_object.last_update_time: date_time = dfdatetime_semantic_time.SemanticTime('Not set') else: date_time = dfdatetime_filetime.Filetime( timestamp=cached_entry_object.last_update_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_RUN) parser_mediator.ProduceEventWithEventData(event, event_data) cached_entry_offset += cached_entry_object.cached_entry_size cached_entry_index += 1 if (header_object.number_of_cached_entries != 0 and cached_entry_index >= header_object.number_of_cached_entries): break
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. Raises: ParseError: if the value data could not be parsed.
juraj-google-style
def unban_user(self, room_id, user_id): body = { "user_id": user_id } return self._send("POST", "/rooms/" + room_id + "/unban", body)
Perform POST /rooms/$room_id/unban Args: room_id (str): The room ID user_id (str): The user ID of the banee(sic)
juraj-google-style
def Read(self, path, length=None, offset=0, fh=None): del fh if self._IsDir(path): raise fuse.FuseOSError(errno.EISDIR) fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token) if all((hasattr(fd, "Read"), hasattr(fd, "Seek"), callable(fd.Read), callable(fd.Seek))): if length is None: length = fd.Get(fd.Schema.SIZE) fd.Seek(offset) return fd.Read(length) else: raise fuse.FuseOSError(errno.EIO)
Reads data from a file. Args: path: The path to the file to read. length: How many bytes to read. offset: Offset in bytes from which reading should start. fh: A file handler. Not used. Returns: A string containing the file contents requested. Raises: FuseOSError: If we try and read a directory or if we try and read an object that doesn't support reading.
juraj-google-style
def remove_by_threshold(self, threshold=5): keys = [x for x in self._dictionary.keys()] for key in keys: if self._dictionary[key] <= threshold: self._dictionary.pop(key) self._update_dictionary()
Remove all words at, or below, the provided threshold Args: threshold (int): The threshold at which a word is to be \ removed
juraj-google-style
def _bind_topics(self, topics): self.client.subscribe(topics.status, self._on_status_message) self.client.subscribe(topics.tracing, self._on_trace) self.client.subscribe(topics.streaming, self._on_report) self.client.subscribe(topics.response, self._on_response_message)
Subscribe to all the topics we need to communication with this device Args: topics (MQTTTopicValidator): The topic validator for this device that we are connecting to.
juraj-google-style
def add_edge(self, source, target): edge = Edge(len(self.edges)) self.edges.append(edge) source.out_edges.append(edge.idx) target.in_edges.append(edge.idx) edge.source = source.idx edge.target = target.idx return edge
Returns a new edge connecting source and target vertices. Args: source: The source Vertex. target: The target Vertex. Returns: A new Edge linking source to target.
juraj-google-style
def create_grid_samples(order, dim=1): x_data = (numpy.arange(1, (order + 1)) / (order + 1.0)) x_data = chaospy.quad.combine(([x_data] * dim)) return x_data.T
Create samples from a regular grid. Args: order (int): The order of the grid. Defines the number of samples. dim (int): The number of dimensions in the grid Returns (numpy.ndarray): Regular grid with ``shape == (dim, order)``.
codesearchnet
def _InsertEvent(self, event, force_flush=False): if event: event_document = {'index': { '_index': self._index_name, '_type': self._document_type}} event_values = self._GetSanitizedEventValues(event) self._event_documents.append(event_document) self._event_documents.append(event_values) self._number_of_buffered_events += 1 if force_flush or self._number_of_buffered_events > self._flush_interval: self._FlushEvents()
Inserts an event. Events are buffered in the form of documents and inserted to Elasticsearch when either forced to flush or when the flush interval (threshold) has been reached. Args: event (EventObject): event. force_flush (bool): True if buffered event documents should be inserted into Elasticsearch.
juraj-google-style
def ingress(self, envelope, http_headers, operation): if self._logger.isEnabledFor(logging.DEBUG): self._logger.debug(_RESPONSE_XML_LOG_LINE, etree.tostring(envelope, pretty_print=True)) if self._logger.isEnabledFor(logging.WARN): warn_data = {} header = envelope.find(_HEADER_XPATH) fault = envelope.find(_FAULT_XPATH) if (fault is not None): warn_data['faultMessage'] = fault.find('faultstring').text if (header is not None): header_data = {re.sub(_REMOVE_NS_REGEXP, '', child.tag): child.text for child in header[0]} warn_data.update(header_data) if ('serviceName' not in warn_data): warn_data['serviceName'] = operation.binding.wsdl.services.keys()[0] if ('methodName' not in warn_data): warn_data['methodName'] = operation.name self._logger.warn('Error summary: %s', warn_data) return (envelope, http_headers)
Overrides the ingress function for response logging. Args: envelope: An Element with the SOAP request data. http_headers: A dict of the current http headers. operation: The SoapOperation instance. Returns: A tuple of the envelope and headers.
codesearchnet
def _scale_tensor(tensor, range_min, range_max, scale_min, scale_max): if (range_min == range_max): return tensor float_tensor = tf.to_float(tensor) scaled_tensor = tf.divide((tf.subtract(float_tensor, range_min) * tf.constant(float((scale_max - scale_min)))), tf.constant(float((range_max - range_min)))) shifted_tensor = (scaled_tensor + tf.constant(float(scale_min))) return shifted_tensor
Scale a tensor to scale_min to scale_max. Args: tensor: input tensor. Should be a numerical tensor. range_min: min expected value for this feature/tensor. range_max: max expected Value. scale_min: new expected min value. scale_max: new expected max value. Returns: scaled tensor.
codesearchnet
def _build_instruction_ds(instructions): tensor_inputs = {k: (np.array(vals, dtype=np.int64) if (k == 'mask_offset') else list(vals)) for (k, vals) in utils.zip_dict(*instructions)} return tf.data.Dataset.from_tensor_slices(tensor_inputs)
Create a dataset containing individual instruction for each shard. Each instruction is a dict: ``` { "filepath": tf.Tensor(shape=(), dtype=tf.string), "mask_offset": tf.Tensor(shape=(), dtype=tf.int64), "mask": tf.Tensor(shape=(100,), dtype=tf.bool), } ``` Args: instructions: `list[dict]`, the list of instruction dict Returns: instruction_ds: The dataset containing the instruction. The dataset size is the number of shard.
codesearchnet
def add_folder(self, path, title, description=None, language=None, thumbnail=None, source_id=None, **node_data): self._parse_path(path) path = path if path.endswith(title) else "{}/{}".format(path, title) self._commit(path, title, description=description, language=language, thumbnail=thumbnail, source_id=source_id)
add_folder: Creates folder in csv Args: path: (str) where in zip to write folder title: (str) content's title source_id: (str) content's original id (optional) description: (str) description of content (optional) language (str): language of content (optional) thumbnail (str): path to thumbnail in zip (optional) Returns: None
juraj-google-style
def _on_connect(self, sequence, topic, message): try: slug = None parts = topic.split('/') slug = parts[(- 3)] uuid = self._extract_device_uuid(slug) except Exception: self._logger.exception('Error parsing slug from connection request (slug=%s, topic=%s)', slug, topic) return if messages.ConnectCommand.matches(message): key = message['key'] client = message['client'] self._loop.add_callback(self._connect_to_device, uuid, key, client) else: self._logger.warn('Unknown message received on connect topic=%s, message=%s', topic, message)
Process a request to connect to an IOTile device A connection message triggers an attempt to connect to a device, any error checking is done by the DeviceManager that is actually managing the devices. A disconnection message is checked to make sure its key matches what we except for this device and is either discarded or forwarded on to the DeviceManager. Args: sequence (int): The sequence number of the packet received topic (string): The topic this message was received on message_type (string): The type of the packet received message (dict): The message itself
codesearchnet
def parse_range_header(self, header, resource_size): if not header or '=' not in header: return None ranges = [] units, range_ = header.split('=', 1) units = units.strip().lower() if units != 'bytes': return None for val in range_.split(','): val = val.strip() if '-' not in val: return None if val.startswith('-'): start = resource_size + int(val) if start < 0: start = 0 stop = resource_size else: start, stop = val.split('-', 1) start = int(start) stop = int(stop) + 1 if stop else resource_size if start >= stop: return None ranges.append((start, stop)) return ranges
Parses a range header into a list of two-tuples (start, stop) where `start` is the starting byte of the range (inclusive) and `stop` is the ending byte position of the range (exclusive). Args: header (str): The HTTP_RANGE request header. resource_size (int): The size of the file in bytes. Returns: None if the value of the header is not syntatically valid.
juraj-google-style
def repr(self, changed_widgets=None): if changed_widgets is None: changed_widgets={} return super(Widget, self).repr(changed_widgets)
Represents the widget as HTML format, packs all the attributes, children and so on. Args: client (App): Client instance. changed_widgets (dict): A dictionary containing a collection of widgets that have to be updated. The Widget that have to be updated is the key, and the value is its textual repr.
juraj-google-style