content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _filename_pattern(ext): """Returns an re matching native or tfrecord files of format `ext`.""" return r".*\.{}(\.tfrecord)?(\.gz)?".format(ext)
6ec5a86dbba2432293451ca7dff0a0d1d5091bf0
8,600
def assemble_remote_url(): """ 组装目标服务器URL, 即生成 parse.remote_url 的值 :rtype: str """ if parse.is_external_domain: # 请求的是外部域名 (external domains) scheme = 'https://' if parse.is_https else 'http://' return urljoin(scheme + parse.remote_domain, parse.remote_path_query) else: # 请求的是主域名及可以被当做(alias)主域名的域名 return urljoin(target_scheme + target_domain, parse.remote_path_query)
f0e14ddb42636f12f4fafa31af4a87b3f91a4e05
8,601
def register_blueprints(app): """Register Flask blueprints.""" app.register_blueprint(public.views.blueprint) app.register_blueprint(drawbot.views.blueprint) app.register_blueprint(user.views.blueprint) return None
936c17a95ddc013ec9f0c6c232a689245fc313d0
8,602
def write_attribute(xml_elem, elem: str=None, attrib: str=None, txt: str=None): """ Write new text to a xml attribute. Elem can be used to refer to a subelement of the current xml_elem Args: xml_elem: The current xml element elem (str): The requested element tag name attrib (str): The attribute name txt (str): The new text for the element Returns: xml_elem: The modified xml element """ if xml_elem is not None: if elem is not None: xml_elem = try_get_single_element_from_xml(elem=elem, xml_elem=xml_elem) if xml_elem is not None: xml_elem.set(attrib, txt) return xml_elem
d2ee296b6926a71ef2ffaf9fd9d47128f66e8806
8,603
def _ndarray_feature(x: np.ndarray) -> tf.train.Feature: """Create an ndarray feature stored as bytes.""" x_bytes = x.tostring() feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[x_bytes])) return feature
03ad22f7d943d24574c92a494c915c28611a8d12
8,604
import re def get_img_compliance_level(profile): """ Try to figure out the IIIF Image API compliance level given the `profile` value from a info.json. """ patt_iiif = re.compile('level([0-2])\.json$') patt_stan = re.compile('#level([0-2])$') def get_from_str(s): m = None if 'http://iiif.io/api/image/2/' in s: m = patt_iiif.search(s) elif 'http://library.stanford.edu/iiif/image-api/' in s: m = patt_stan.search(s) if m: return int(m.group(1)) return -1 lvl = -1 if type(profile) == str: lvl = get_from_str(profile) elif type(profile) == list: for p in [x for x in profile if type(x) == str]: found = get_from_str(p) if found != -1: lvl = found break if lvl == -1: log('Could not find compliance level in info.json.') return lvl
7970a795ea1b79bfea3df0e5a306e2d0286a61de
8,605
def _extract_protocol_layers(deserialized_data): """ Removes unnecessary values from packets dictionaries. :param deserialized_data: Deserialized data from tshark. :return: List of filtered packets in dictionary format. """ packets_filtered = [] for packet in deserialized_data: packets_filtered.append(packet["_source"]["layers"]) return packets_filtered
3c3a899909c5278b29ffb402ccb4d8dde24fce3a
8,606
from typing import Optional from operator import gt def calculate_affinity( adata: AnnData, level: int = 1, block_key: Optional[str] = 'nsbm', group_by: Optional[str] = None, state: Optional = None, neighbors_key: Optional[str] = 'neighbors', adjacency: Optional[sparse.spmatrix] = None, directed: bool = False, use_weights: bool = False, obsp: Optional[str] = None, back_prob: bool = False, copy: bool = False ) -> Optional[AnnData]: """\ Calculate cell affinity given a partition scheme. It can be used for partitions calculated using schist or for any partition scheme, given for example by cell annotations. Parameters ---------- adata: The AnnData object. Should have been already processed with schist level: The level to calculate affinity. This parameter is effective only for Nested partitions block_key: The prefix for partitions. This parameter is ignored if the state is not gt.NestedBlockState group_by: The key for group names used for calculations. Setting this will override level and block_key. This is effective only for NestedBlockState partitions state: Optionally calculate affinities on this state. neighbors_key Use neighbors connectivities as adjacency. If not specified, leiden looks .obsp['connectivities'] for connectivities (default storage place for pp.neighbors). If specified, leiden looks .obsp[.uns[neighbors_key]['connectivities_key']] for connectivities. adjacency Sparse adjacency matrix of the graph, defaults to neighbors connectivities. directed Whether to treat the graph as directed or undirected. use_weights If `True`, edge weights from the graph are used in the computation (placing more emphasis on stronger edges). copy: Return a new object or do everything in place Returns ------- Depending on `copy`, returns or updates `adata` with affinity values in adata.obsm[f'CA_{block_key}_level_{level}'] """ matrix_key = f'CA_{block_key}_level_{level}' # the default name of the matrix if group_by: logg.info(f'Calculating cell affinity to {group_by}') else: logg.info(f'Calculating cell affinity to level {level}') if not state: # if no state is provided, use the default to retrieve graph if 'schist' in adata.uns and 'blocks' in adata.uns['schist'][f'{block_key}']: params = adata.uns['schist'][f'{block_key}']['params'] if 'neighbors_key' in params: neighbors_key=params['neighbors_key'] if 'use_weights' in params: use_weights=params['use_weights'] if 'deg_corr' in params: deg_corr=params['deg_corr'] state = state_from_blocks(adata, state_key=block_key, neighbors_key=neighbors_key, adjacency=adjacency, directed=directed, use_weights=use_weights, deg_corr=deg_corr ) g = state.g elif not neighbors_key: # no state and no adjacency provided, raise an error raise ValueError("A state or an adjacency matrix should be given" "Otherwise a graph cannot be computed") else: # get the graph from the adjacency adjacency = _choose_graph(adata, obsp, neighbors_key) g = get_igraph_from_adjacency(adjacency, directed=directed) g = g.to_graph_tool() gt.remove_parallel_edges(g) state = gt.BlockState(g) else: g = state.g if group_by: matrix_key = f'CA_{group_by}' # if groups are given, we generate a new BlockState and work on that if group_by in adata.obs.columns and adata.obs[group_by].dtype.name == 'category': partitions = adata.obs[group_by].cat.codes.values state = gt.BlockState(g, b=partitions) if back_prob: ca_matrix = get_cell_back_p(state) else: ca_matrix = get_cell_loglikelihood(state, as_prob=True) else: raise ValueError(f"{group_by} should be a categorical entry in adata.obs") else: # use precomputed blocks and states if type(state) == gt.NestedBlockState: if back_prob: p0 = get_cell_back_p(state, level=0) else: p0 = get_cell_loglikelihood(state, level=0, as_prob=True) group_col = None if group_by and group_by in adata.obs.columns: group_col = group_by else: g_name = f'{block_key}_level_{level}' if g_name in adata.obs.columns: group_col = g_name if not group_col: raise ValueError("The provided groups or level/blocks do not exist") g0 = pd.Categorical(state.project_partition(0, 0).a) cross_tab = pd.crosstab(g0, adata.obs[group_col], normalize='index') ca_matrix = (p0 @ cross_tab).values elif type(state) == gt.PPBlockState: if back_prob: ca_matrix = get_cell_back_p(state) else: ca_matrix = get_cell_loglikelihood(state, as_prob=True) matrix_key = 'CA_ppbm' adata.obsm[matrix_key] = ca_matrix return adata if copy else None
e2eec0e9f45199d6cc1559d71dfbf629dba61621
8,607
import asyncio async def scrape_website(client): """ :param client: client bot is connected to :return: only if there's an issue Type '!scrape' to restart the scraping process. Note: this function is executed on bot_ready, so I have to work around not having a convenient guild object. r """ debug_channel = utils.get_bot_commands_channel(client) await debug_channel.send(f"Started web scraping.") print(f"Web scraper starting...") # This loop will always run indefinitely. while True: # During this web scraping, first check if there was # any commands issued to force stop this functionality. if scraper.issued_off: game_lab_channel = utils.get_game_lab_channel(client) print(f"Successfully turned off webscraper") await debug_channel.send(f"Successfully turned off scraper.\n\nPlease go to {game_lab_channel.mention} and verify this action by comparing its edited timestamp.") scraper.issued_off = False scraper.is_scraping = False return # Secondly, check if the embeds exist. # It's possible someone may have deleted them mid-process. if not await validators.validate_pc_availability_embeds(client): print(f"...web scraping ending prematurely- embeds are missing! (This can be restarted with !scrape)") await debug_channel.send(f"ERROR: Machine availability panels must first exist in the channel `#{debug_channel}`! You can add these panels by entering `!gamelab` inside the channel, then start auto-updating PC availability with `!scrape`.") return scraper.is_scraping = True pc_statuses = await _get_scraped_pc_availability() if pc_statuses is None: print("Game Lab Availability is offline. Unable to get PC statuses. Restart bot with !restart.") break print(f"Updating PC availability with the following statuses:\n\t{pc_statuses}") await update_machine_availability_embed(client, pc_statuses) print(F"Trying again in 5 seconds") await asyncio.sleep(5) return None
0a27b9fafd607e0a1816c5a4575209051e32929a
8,608
def numpy_dtypes_for_minmax(request): """ Fixture of numpy dtypes with min and max values used for testing cummin and cummax """ dtype = request.param min_val = ( np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min ) max_val = ( np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max ) return (dtype, min_val, max_val)
d2ad3676549f427c134b38106a93145c54114052
8,609
def solve(topics): """Solve.""" a_words, b_words = get_dicts(topics) candidates = [] original = [] duplicates = [] for a, b in topics: # print(a, b) # print(a_words[a], b_words[b]) if not (a_words[a] == 1 or b_words[b] == 1): candidates.append((a, b)) else: original.append((a, b)) a_words_org, b_words_org = get_dicts(original) while len(candidates) > 0: l_candidates = [] for a, b in candidates: if a_words_org[a] >= 1 and b_words_org[b] >= 1: duplicates.append((a, b)) else: l_candidates.append((a, b)) candidates = l_candidates[:] # print(candidates) return len(candidates)
bb78da10ff6bb939bc0de9e0cc51a036c2a0e8b9
8,610
def get_package_plugin(package_type): """ Get a plugin for a specific package Parameters ---------- package_type: str The package type to fetch Returns ------- InvirtualEnvPlugin: The invirtualenv plugin for the specific package_type """ for plugin in installed_plugins(): if package_type in plugin.package_formats: return plugin
a1d97a6d1c4248f7a1bfeade8e734bcc0af3aceb
8,611
import ast from typing import Tuple from typing import Optional import sys def _type_annotation( node: ast.AST, atok: asttokens.ASTTokens ) -> Tuple[Optional[TypeAnnotation], Optional[Error]]: """Parse the type annotation.""" if isinstance(node, ast.Name): return AtomicTypeAnnotation(identifier=Identifier(node.id), node=node), None elif isinstance(node, ast.Constant): if not isinstance(node.value, str): return ( None, Error( node.value, f"Expected a string literal " f"if the type annotation is given as a constant, " f"but got: " f"{node.value!r} (as {type(node.value)})", ), ) return AtomicTypeAnnotation(identifier=Identifier(node.value), node=node), None elif isinstance(node, ast.Subscript): if not isinstance(node.value, ast.Name): return ( None, Error( node.value, f"Expected a name to define " f"a subscripted type annotation," f"but got: {atok.get_text(node.value)}", ), ) # NOTE (mristin, 2022-01-22): # There were breaking changes between Python 3.8 and 3.9 in ``ast`` module. # Relevant to this particular piece of parsing logic is the deprecation of # ``ast.Index`` and ``ast.ExtSlice`` which is replaced with their actual value # and ``ast.Tuple``, respectively. # # Hence we need to switch on Python version and get the underlying slice value # explicitly. # # See deprecation notes just at the end of: # https://docs.python.org/3/library/ast.html#ast.AST if isinstance(node.slice, ast.Slice): return ( None, Error( node.slice, f"Expected an index to define a subscripted type annotation, " f"but got a slice: {atok.get_text(node.slice)}", ), ) # noinspection PyUnresolvedReferences if (sys.version_info < (3, 9) and isinstance(node.slice, ast.ExtSlice)) or ( sys.version_info >= (3, 9) and isinstance(node.slice, ast.Tuple) and any(isinstance(elt, ast.Slice) for elt in node.slice.elts) ): return ( None, Error( node.slice, f"Expected an index to define a subscripted type annotation, " f"but got an extended slice: {atok.get_text(node.slice)}", ), ) # NOTE (mristin, 2022-01-22): # Please see the note about the deprecation of ``ast.Index`` above. index_node = None # type: Optional[ast.AST] if sys.version_info < (3, 9): # noinspection PyUnresolvedReferences if isinstance(node.slice, ast.Index): index_node = node.slice.value else: return ( None, Error( node.slice, f"Expected an index to define a subscripted type annotation, " f"but got: {atok.get_text(node.slice)}", ), ) else: index_node = node.slice assert index_node is not None subscripts = [] # type: List[TypeAnnotation] if isinstance(index_node, ast.Tuple): for elt in index_node.elts: subscript_annotation, error = _type_annotation(node=elt, atok=atok) if error is not None: return None, error assert subscript_annotation is not None subscripts.append(subscript_annotation) elif isinstance(index_node, (ast.Name, ast.Subscript, ast.Constant)): subscript_annotation, error = _type_annotation(node=index_node, atok=atok) if error is not None: return None, error assert subscript_annotation is not None subscripts.append(subscript_annotation) else: return ( None, Error( index_node, f"Expected a tuple, a name, a subscript or a string literal " f"for a subscripted type annotation, " f"but got: {atok.get_text(index_node)}", ), ) return ( SubscriptedTypeAnnotation( identifier=Identifier(node.value.id), subscripts=subscripts, node=node, ), None, ) else: return ( None, Error( node, f"Expected either atomic type annotation (as name or string literal) " f"or a subscripted one (as a subscript), " f"but got: {atok.get_text(node)} (as {type(node)})", ), )
cc8955f094da748855985b277f5cb0876c1ac2a3
8,612
def validate_basic_message(msg): """Validate basic messages. This example just uses basic assertions but you could easily use a schema library to get more sophisticated validators. """ assert msg.type == TYPE assert "~l10n" in msg assert "sent_time" in msg assert "content" in msg return msg
df6b1541adf86a295e6592f26d72ab2109617f6b
8,613
def _filter_event_queryset(queryset, params, srs=None): """ Filter events queryset by params (e.g. self.request.query_params in EventViewSet) """ # Filter by string (case insensitive). This searches from all fields # which are marked translatable in translation.py val = params.get('text', None) if val: val = val.lower() # Free string search from all translated fields fields = EventTranslationOptions.fields # and these languages languages = [x[0] for x in settings.LANGUAGES] qset = Q() for field in fields: for lang in languages: kwarg = {field + '_' + lang + '__icontains': val} qset |= Q(**kwarg) queryset = queryset.filter(qset) val = params.get('last_modified_since', None) # This should be in format which dateutil.parser recognizes, e.g. # 2014-10-29T12:00:00Z == 2014-10-29T12:00:00+0000 (UTC time) # or 2014-10-29T12:00:00+0200 (local time) if val: dt = parse_time(val, is_start=False) queryset = queryset.filter(Q(last_modified_time__gte=dt)) val = params.get('start', None) if val: dt = parse_time(val, is_start=True) queryset = queryset.filter(Q(end_time__gt=dt) | Q(start_time__gte=dt)) val = params.get('end', None) if val: dt = parse_time(val, is_start=False) queryset = queryset.filter(Q(end_time__lt=dt) | Q(start_time__lte=dt)) val = params.get('bbox', None) if val: bbox_filter = build_bbox_filter(srs, val, 'position') places = Place.geo_objects.filter(**bbox_filter) queryset = queryset.filter(location__in=places) # Filter by data source, multiple sources separated by comma val = params.get('data_source', None) if val: val = val.split(',') queryset = queryset.filter(data_source_id__in=val) # Negative filter by data source, multiple sources separated by comma val = params.get('data_source!', None) if val: val = val.split(',') queryset = queryset.exclude(data_source_id__in=val) # Filter by location id, multiple ids separated by comma val = params.get('location', None) if val: val = val.split(',') queryset = queryset.filter(location_id__in=val) # Filter by keyword id, multiple ids separated by comma val = params.get('keyword', None) if val: val = val.split(',') queryset = queryset.filter(keywords__pk__in=val) # Filter only super or sub events if recurring has value val = params.get('recurring', None) if val: val = val.lower() if val == 'super': queryset = queryset.filter(is_recurring_super=True) elif val == 'sub': queryset = queryset.filter(is_recurring_super=False) val = params.get('max_duration', None) if val: dur = parse_duration_string(val) cond = 'end_time - start_time <= %s :: interval' queryset = queryset.extra(where=[cond], params=[str(dur)]) val = params.get('min_duration', None) if val: dur = parse_duration_string(val) cond = 'end_time - start_time >= %s :: interval' queryset = queryset.extra(where=[cond], params=[str(dur)]) val = params.get('publisher', None) if val: queryset = queryset.filter(publisher__id=val) return queryset
35103268d301239d4c884d50ab5321ebb22ed235
8,614
import re def process_user(enrollment, section): """Handle getting assignments for a single user Args: enrollment (canvasapi.enrollment.Enrollment): Canvas <Enrollment> object section (canvasapi.section.Section): Canvas <Section> object Returns: [list]: formatted list for writing to the CSV """ missing = get_user_missing(section, enrollment.user["id"]) login = course.get_user(enrollment.user["id"]).login_id regex = re.compile("@") if regex.search(login) is None: email = f"{login}@elkhart.k12.in.us" else: email = login return [ enrollment.user["sortable_name"], email, section.name, enrollment.last_activity_at, len(missing), ", ".join(missing), ]
88b471433d99c659eabac82a797530def3baf8f2
8,615
def op(name, data, bucket_count=None, display_name=None, description=None, collections=None): """Create a histogram summary op. Arguments: name: A unique name for the generated summary node. data: A `Tensor` of any shape. Must be castable to `float64`. bucket_count: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op. """ if display_name is None: display_name = name summary_metadata = metadata.create_summary_metadata( display_name=display_name, description=description) with tf.name_scope(name): tensor = _buckets(data, bucket_count=bucket_count) return tf.summary.tensor_summary(name='histogram_summary', tensor=tensor, collections=collections, summary_metadata=summary_metadata)
368b5f0af59352e5f283566d9008c413d38692c9
8,616
def read_as_str(file): """ 读取文件,并返回读取到的内容 """ try: with open(file, 'r') as f: return f.read() except IOError: return ""
934bec1e47e0f9af09be7f4d695a8ddf09004f3f
8,617
def has_xml_header(filepath): """ Return True if the first line of the file is <?xml :param filepath: :return: """ return True
21fdbdf36cf08ca18d8a0f0d7f7d2201b243c558
8,618
def shikaku(givens): """Solver for Shikaku minipuzzles.""" sym = grilops.make_number_range_symbol_set(0, SIZE * SIZE - 1) sg = grilops.SymbolGrid(LATTICE, sym) rc = grilops.regions.RegionConstrainer( LATTICE, solver=sg.solver, rectangular=True ) shifter = Shifter(sg.solver) for p in LATTICE.points: sg.solver.add(sg.cell_is(p, rc.region_id_grid[p])) given = givens[p.y][p.x] if given > 0: given = shifter.given(p, given) sg.solver.add(rc.parent_grid[p] == grilops.regions.R) sg.solver.add(rc.region_size_grid[p] == given) else: sg.solver.add(rc.parent_grid[p] != grilops.regions.R) assert sg.solve() sg.print() print() shifter.print_shifts() print() return shifter.eval_binary()
e380ce634b342a19ecd466576e8e5d3ff28ccc25
8,619
import argparse def parse_arguments(args_to_parse): """Parse the command line arguments. Parameters ---------- args_to_parse: list of str Arguments to parse (split on whitespaces). """ description = "PyTorch implementation of CNN's for Human Activity Recognition" default_config = get_config_section([CONFIG_FILE], "Preset") parser = argparse.ArgumentParser(description=description, formatter_class=FormatterNoDuplicate) # Learning options training = parser.add_argument_group('Training specific options') training.add_argument('-d', '--dataset', help="Path to training data.", default=default_config['dataset'], choices=DATASETS) training.add_argument('-b', '--batch-size', type=int, default=default_config['batch_size'], help='Batch size for training.') training.add_argument('--lr', type=float, default=default_config['lr'], help='Learning rate.') training.add_argument('-e', '--epochs', type=int, default=default_config['epochs'], help='Maximum number of epochs to run for.') training.add_argument('-s', '--is_standardized', type=bool, default=default_config['is_standardized'], help='Whether to standardize the data.') # Model Options model = parser.add_argument_group('Model specific options') model.add_argument('-m', '--model-type', default=default_config['model'], choices=MODELS, help='Type of encoder to use.') # General options general = parser.add_argument_group('General options') general.add_argument('-n', '--name', type=str, default=default_config['name'], help="Name of the model for storing and loading purposes.") # Evaluation options evaluation = parser.add_argument_group('Evaluation specific options') evaluation.add_argument('--is-eval-only', action='store_true', default=default_config['is_eval_only'], help='Whether to only evaluate using precomputed model `name`.') evaluation.add_argument('--no-test', action='store_true', default=default_config['no_test'], help="Whether or not to compute the test losses.`") args = parser.parse_args(args_to_parse) return args
17266dedd3a8086c7382652363a55b5c1067cdc7
8,620
def invert_qgniw(qh,phi,phih,k,l,f0): """ Calculate the streamfunction given the potential vorticity. The algorithm is: 1) Calculate wave potential vorticity 2) Invert for wave, pw, and vortex stremfunctions, pv. 3) Calculate the geostrophic stremfunction, p = pv+pw. """ wv2 = k**2 + l**2 wv2i = 1./wv2 wv2i[0,0] = 0 phih = np.fft.fft2(phi) phix, phiy = np.fft.ifft2(1j*k*phih), np.fft.ifft2(1j*l*phih) jach = np.fft.fft2((1j*(np.conj(phix)*phiy - np.conj(phiy)*phix)).real) jach[0,0] = 0 # the wavy PV phi2 = np.abs(phi)**2 gphi2h = -wv2*np.fft.fft2(phi2) qwh = 0.5*(0.5*gphi2h + jach)/f0 # invert for psi pw = np.fft.ifft2((wv2i*qwh)).real pv = np.fft.ifft2(-(wv2i*qh)).real p = pv+pw ph = np.fft.fft2(p) return ph
b90c5f76c2be93d0a45b8c260e7a7228094ff4c0
8,621
def package_ref_key(package_name, ref): """Returns ndb.Key corresponding to particular PackageRef.""" assert is_valid_package_ref(ref), ref return ndb.Key(PackageRef, ref, parent=package_key(package_name))
270b69baa1309bf29a054736ca6f898f23839ee3
8,622
def conv2d_backprop_input(dout, x_size, weight, stride=1, pad=0): """Backpropagation input for conv2d.""" filter_num, _, filter_h, filter_w = weight.shape dout = dout.transpose(0, 2, 3, 1).reshape(-1, filter_num) col_w = weight.reshape(filter_num, -1).T dcol = np.dot(dout, col_w.T) dx = col2im(dcol, x_size, filter_h, filter_w, stride, pad) return dx
3749398101de25ec4f7b83c8a52754d18d0e8872
8,623
def get_feeds_from_url(url: str) -> list: """ Try to parse the URL and find any RSS feeds in the webpage Adapted from: https://gist.github.com/alexmill/9bc634240531d81c3abe """ logger.info(f"Attempting to find RSS feeds from {url}...") # If the URL itself is a proper RSS feed, just return it if is_rss_feed(url): logger.debug("URL is already a proper RSS feed") return [url] html = get_html(url) possible_feeds = get_feeds_from_links(html) + get_feeds_from_atags(url, html) return [url for url in set(possible_feeds) if is_rss_feed(url)]
0a8b10af257b2acfc4dbf4da886e96118cc5c32f
8,624
def fine_license_ratio(license_data, fine_data, column_name1=None, column_name2=None,year=None): """Get ratio of fines to licenses issued in a given year Parameters: ----------- license_data: DataFrame Any subset of the Professional and Occupational Licensing dataframe fine_data: DataFrame Any subset of the Disciplinary Actions dataframe year: int Year to use to subset your data column_name1: Series Column containing years in license_data dataset column_name2: Series Column containing years in fine_data dataset Returns: -------- tuple A tuple with license percentage as the first entry and fine percentage as the second (year, ratio) """ int(year) str(column_name1) str(column_name2) if year not in license_data[column_name1].unique() or year not in fine_data[column_name2].unique(): raise Exception(str(year) + " not a valid year for this dataset" + "\n----------------------------------------") return "No Data for " + str(year) else: license_data = license_data[license_data[column_name1]==year] fine_data = fine_data[fine_data[column_name2]==year] try: license_count = len(license_data) fine_count = len(fine_data) fine_percentage = fine_count/license_count * 100 license_percentage = 100 - fine_percentage return license_percentage, fine_percentage, license_count, fine_count except ZeroDivisionError: print("Hmmm...It looks like there is are no licenses yet for the year " + str(year))
97dad8c4f5c78b1291016e1cd9fa9c5f8ef20beb
8,625
def import_obj(obj_path, hard=False): """ import_obj imports an object by uri, example:: >>> import_obj("module:main") <function main at x> :param obj_path: a string represents the object uri. :param hard: a boolean value indicates whether to raise an exception on import failures. """ try: # ``__import__`` of Python 2.x could not resolve unicode, so we need # to ensure the type of ``module`` and ``obj`` is native str. module, obj = str(obj_path).rsplit(':', 1) m = __import__(module, globals(), locals(), [obj], 0) return getattr(m, obj) except (ValueError, AttributeError, ImportError): if hard: raise
fe6bc0cd8fff5c0d5b1ba9fc0e153b2004b09755
8,626
def Get_Histogram_key(qubitOperator): """ Function to obtain histogram key string for Cirq Simulator. e.g. PauliWord = QubitOperator('X0 Z2 Y3', 0.5j) returning: histogram_string = '0,2,3' Args: qubitOperator (openfermion.ops._qubit_operator.QubitOperator): QubitOperator Returns: histogram_string (str): Returns string corresponding to histogram key (required for Cirq simulator) """ qubit_No, PauliStr = zip(*list(*qubitOperator.terms.keys())) histogram_string = ','.join([str(i) for i in qubit_No]) return histogram_string
f574f7b3f6c43de7b3121d4e49240a84a4bcfdfc
8,627
def get_organizations(): """ Queries API for a list of all basketball organizations registered with Basketbal Vlaanderen. :return: list of basketball organizations :rtype: [Organization] """ organizations = [] for organization_data in get_list(): organizations.append(Organization(organization_data)) return list(sorted(organizations, key=lambda o: o.guid))
bcf29925465cde99399214cbe44648bbfd136e1b
8,628
def logout(): """Logout.""" logout_user() flash('您已成功登出', 'info') return redirect(url_for('public.home'))
e816d67e4084bad0549d0b932ec806de55cfc41d
8,629
def get_column_labels(): """ This function generates a list of column names for the extracted features that are returned by the get_features function. """ # list the names of the extracted features feature_labels = ["amplitude_envelope", "root_mean_square_energy", "zero_crossing_rate", "band_energy_ratio", "spectral_centroid", "spectral_bandwidth", "spectral_contrast", "spectral_flatness", "spectral_rolloff", "spectral_rolloff_99", "spectral_rolloff_01"] # list the names of the used descriptive statistics measure_suffixes = ["_mean", "_min", "_max", "_std"] # create a list to append the generated column names to columns = ["row_index"] # generate some labels and append them to the list columns.extend([l+s for l in feature_labels for s in measure_suffixes]) # append labels for the distributed AE columns.extend(["amplitude_envelope_f1", "amplitude_envelope_f2", "amplitude_envelope_f3", "amplitude_envelope_f4", "amplitude_envelope_f5"]) # append labels for the distributed RMS columns.extend(["root_mean_square_energy_f0", "root_mean_square_energy_f1", "root_mean_square_energy_f2", "root_mean_square_energy_f3", "root_mean_square_energy_f4", "root_mean_square_energy_f5", "root_mean_square_energy_f6", "root_mean_square_energy_f7", "root_mean_square_energy_f8", "root_mean_square_energy_f9", "root_mean_square_energy_f10"]) # append labels for the distributed ZCR columns.extend(["zero_crossing_rate_f0", "zero_crossing_rate_f1", "zero_crossing_rate_f2", "zero_crossing_rate_f3", "zero_crossing_rate_f4", "zero_crossing_rate_f5", "zero_crossing_rate_f6", "zero_crossing_rate_f7", "zero_crossing_rate_f8", "zero_crossing_rate_f9", "zero_crossing_rate_f10"]) return columns
c140ced9c4344bd7a4029d331d50ebe0750fac0a
8,630
def corr_finder(X, threshold): """ For each variable, find the independent variables that are equal to or more highly correlated than the threshold with the curraent variable Parameters ---------- X : pandas Dataframe Contains only independent variables and desired index threshold: float < 1 Minimum level of correlation to search for Returns ------- Dictionary with the key's as independent variavble indices and values as a list of variables with a correlation greater to or equal than the threshold. Correlation Matrix """ corr_matrix = X.corr(method='kendall') #create the correlation matrix corr_dic = {} for row_name, ser in corr_matrix.iterrows(): #search through each row corr_list = [] #list of variables past/at the threshold for idx, val in ser.iteritems(): #search through the materials of each row if (abs(val) > threshold) and (abs(val) != 1): #if the variable correlates past/at the threshold corr_list.append(idx) if len(corr_list) > 0: corr_dic[row_name] = corr_list return corr_dic, corr_matrix
3b32a3eacb721ff09f6b5614c0ada82df814d5fa
8,631
def magic_file(filename): """ Returns tuple of (num_of_matches, array_of_matches) arranged highest confidence match first. :param filename: path to file :return: list of possible matches, highest confidence first """ head, foot = _file_details(filename) if not head: raise ValueError("Input was empty") try: info = _identify_all(head, foot, ext_from_filename(filename)) except PureError: info = [] info.sort(key=lambda x: x[3], reverse=True) return info
3fc625006c5589b14c73fff501d48a523d1bce5b
8,632
def plot_sentiment( df: pd.DataFrame, title: str = None, height: int = 300, label_col: str = "label" ) -> Figure: """ Plot the predicted sentiment of the sentences. Args: df (pd.DataFrame): Dataframe with the outputs of a sentiment analysis model. title (str): Title of the plot. height (int): Height of the plot. label_col (str): Column name of the sentiment. Returns: Figure: Plotly figure with the percentage of hate speech. """ sentiments_count = get_counts(df, label_col=label_col) labels_order = ["neutro", "positivo", "negativo"] fig = px.bar( x=labels_order, y=[ float(sentiments_count[sentiments_count[label_col] == label].percent) for label in labels_order ], title=title, ) fig.update_traces( marker_color=["gray", "green", "red"], hovertemplate="%{y:.1f}%<extra></extra>", ) fig.update_layout( xaxis_title="Sentimento", yaxis_title="Percentagem de frases", margin=dict(l=0, r=0, b=0, t=0, pad=0), height=height, ) return fig
bf5f7f65fa4cbee6b0abfc77d1f47b6f175ed8f9
8,633
def subf(pattern, format, string, count=0, flags=0): # noqa A002 """Apply `sub` with format style replace.""" is_replace = _is_replace(format) is_string = isinstance(format, (_util.string_type, _util.binary_type)) if is_replace and not format.use_format: raise ValueError("Compiled replace is not a format object!") pattern = compile_search(pattern, flags) rflags = FORMAT if is_string else 0 return _re.sub( pattern, (compile_replace(pattern, format, flags=rflags) if is_replace or is_string else format), string, count, flags )
7ef105eeafb5ab4e6c3405206d850520d3489314
8,634
def independent_connections(fn): """Target must support simultaneous, independent database connections.""" # This is also true of some configurations of UnixODBC and probably win32 # ODBC as well. return _chain_decorators_on( fn, no_support('sqlite', 'Independent connections disabled when ' ':memory: connections are used'), exclude('mssql', '<', (9, 0, 0), 'SQL Server 2005+ is required for independent connections'), )
cf11838e5b32cc2a6c165fda38baf4d680beda4a
8,635
def Route(template, handler): """Make a Route whose placeholders accept only allowable map IDs or labels.""" return webapp2.Route(template.replace('>', r':[\w-]+>'), handler)
2ec563ed4db815ee98d050e8e9a672a7a53ca010
8,636
def values_iterator(dictionary): """Add support for python2 or 3 dictionary iterators.""" try: v = dictionary.itervalues() # python 2 except: v = dictionary.values() # python 3 return v
e4fef48fd1b2a9189d81465fec259efe102c5b75
8,637
def _standardize_bicluster(bicluster): """Standardize a bicluster by subtracting the mean and dividing by standard deviation. Ref.: Pontes, B., Girldez, R., & Aguilar-Ruiz, J. S. (2015). Quality measures for gene expression biclusters. PloS one, 10(3), e0115497. Note that UniBic synthetic data was generated with mean 0 and standard deviation 1, so it is already standardized. Args: bicluster (array-like): The bicluster data values. Returns: (float): The standardized bicluster. """ _bicluster = np.copy(bicluster) row_std = np.std(_bicluster, axis=0) row_std[row_std == 0] = 1 row_mean = np.mean(_bicluster, axis=0) return (_bicluster - row_mean) / row_std
371adc72f64bec4039e0fab65e8acb77e37063d8
8,638
def get_deployment_polarion_id(): """ Determine the polarion_id of the deployment or upgrade Returns: str: polarion_id of the deployment or upgrade """ polarion_config = config.REPORTING.get('polarion') if polarion_config: if config.UPGRADE.get('upgrade'): if config.DEPLOYMENT.get('subscription_plan_approval') == 'Manual': return polarion_config.get('upgrade_manual_id') else: return polarion_config.get('upgrade_auto_id') else: return polarion_config.get('deployment_id')
475689b0adac68fdaf60d77af88f5b6c3e229003
8,639
import shlex def parse_command(message) -> ParsedStatusCommand: """Parsing command arguments to arguments list""" LOGGER.debug('Got message: %s', message) try: _, target, *args = shlex.split(message) return ParsedStatusCommand(target, *args) except ValueError as ex: raise CommandParsingError('Incorrect `/status` command') from ex except TypeError as ex: raise CommandParsingError('Too many arguments for `/status` command') from ex
868841d0b218a02f7b59b9e4302d22bd5d6ed57e
8,640
import io import traceback def mail_on_fail(func: callable): """Send an email when something fails. Use this as a decorator.""" @wraps(func) def _wrap(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: # Handle recursive error handling. # This way if a task wrapped in `@mail_on_fail` sends an email, we # don't sent multiple emails. if getattr(e, '__email_sent__', False): raise e # Get the stack trace f = io.StringIO() traceback.print_exc(file=f) f.seek(0) # Render the email body html = render_template( 'mail/error.html', stack_trace=f.read(), func_name=getattr(func, '__name__', repr(func)) ) # Send the email msg = ErrorEmail(html=html) mail.send(msg) # Mark as sent e.__email_sent__ = True # Raise the error raise e return _wrap
6672ec78551f26e875002b24ef21f331ab171540
8,641
def base_conv(num, base): """Write a Python program to converting an Integer to a string in any base""" _list = [] if num//base == 0: return str(num%base) else: return (base_conv(num//base, base) + str(num%base))
9fcc28ccfe8ba80d974cc4012aad456bfb8c9544
8,642
def handle_log(request): """ Handle streaming logs to a client """ params = request.match_info log_dir = py.path.local('data').join( params['project_slug'], params['job_slug'], ) # Handle .log ext for DockCI legacy data log_path_bare = log_dir.join(params['stage_slug']) log_path_ext = log_dir.join('%s.log' % params['stage_slug']) log_path = None if log_path_bare.check(): log_path = log_path_bare elif log_path_ext.check(): log_path = log_path_ext if log_path is None: return web.Response(status=404) byte_seek = try_qs_int(request, 'seek') line_seek = try_qs_int(request, 'seek_lines') bytes_count = try_qs_int(request, 'count') lines_count = try_qs_int(request, 'count_lines') if byte_seek and line_seek: return web.Response( body="byte_seek and line_seek are mutually exclusive".encode(), status=400, ) if bytes_count and lines_count: return web.Response( body="bytes_count and lines_count are mutually exclusive".encode(), status=400, ) response = web.StreamResponse(status=200, headers={ 'content-type': 'text/plain', }) yield from response.prepare(request) with log_path.open('rb') as handle: if byte_seek is not None: _seeker_bytes(handle, byte_seek) if line_seek is not None: _seeker_lines(handle, line_seek) if bytes_count is not None: gen = _reader_bytes(handle, bytes_count) elif lines_count is not None: gen = _reader_lines(handle, lines_count) else: gen = _reader_bytes(handle) for data in gen: response.write(data) yield from response.drain() return response
4d5b4bd14ff759cd62b72224c0a2d1c99b7dc786
8,643
def get_scheme(patterns, config): """Returns the encoding scheme specified by the given config object Args: patterns (list(list)): List of input patterns config (dict): The config object """ assert(type(patterns) == list and len(patterns) > 0) assert(type(config) == dict) min_max_values = utils.get_min_max_values(patterns) pattern_dims = len(patterns[0]) scheme = None method = config["method"] if method == "quantize": bits_per_attr = config["quantize"]["bits_per_attr"] bits_set_per_attr = config["quantize"]["bits_set_per_attr"] assert(type(bits_per_attr) == list and len(bits_per_attr) == pattern_dims) assert(type(bits_set_per_attr) == list and len(bits_set_per_attr) == pattern_dims) scheme = schemes.QuantizationEncoder(min_max_values, bits_per_attr, bits_set_per_attr) elif method == "donothing": bits_set = config["donothing"]["bits_set"] scheme = schemes.DoNothingEncoder(bits_set) elif method == "som": som_path = config["som"]["som_file_path"] scheme = schemes.SOMEncoder(som_path) elif method == "baum": segment_sizes = config["baum"]["segment_sizes"] scheme = schemes.BaumEncoder(segment_sizes) else: raise ValueError("Unrecognized encoding method: " + method) return scheme
de9cc88bed0446854903832fb7bc64c24cc37144
8,644
def open_signatures_window(*args): """ open_signatures_window() -> TWidget * Open the signatures window ( 'ui_open_builtin' ). @return: pointer to resulting window """ return _ida_kernwin.open_signatures_window(*args)
e699df0192755b28d3c1c324c485ca4486cab98e
8,645
def get_subscription_id(_ctx=ctx): """ Gets the subscription ID from either the node or the provider context """ return get_credentials(_ctx=_ctx).subscription_id
23af53f6f807e14ad629e60ea79e21e8ed3eeef5
8,646
def origin_trial_function_call(feature_name, execution_context=None): """Returns a function call to determine if an origin trial is enabled.""" return 'RuntimeEnabledFeatures::{feature_name}Enabled({context})'.format( feature_name=feature_name, context=execution_context if execution_context else "execution_context")
201dbe8449373dbad0144633350d3e6adbb58b80
8,647
def get_bit(byteval, index) -> bool: """retrieve bit value from byte at provided index""" return (byteval & (1 << index)) != 0
1fe020449ae2ae2513073835db6f75b24e558fdb
8,648
def upsert_target(data, analyst): """ Add/update target information. :param data: The target information. :type data: dict :param analyst: The user adding the target. :type analyst: str :returns: dict with keys "success" (boolean) and "message" (str) """ if 'email_address' not in data: return {'success': False, 'message': "No email address to look up"} target = Target.objects(email_address__iexact=data['email_address']).first() is_new = False if not target: is_new = True target = Target() target.email_address = data['email_address'] bucket_list = False ticket = False if 'department' in data: target.department = data['department'] if 'division' in data: target.division = data['division'] if 'organization_id' in data: target.organization_id = data['organization_id'] if 'firstname' in data: target.firstname = data['firstname'] if 'lastname' in data: target.lastname = data['lastname'] if 'note' in data: target.note = data['note'] if 'title' in data: target.title = data['title'] if 'bucket_list' in data: bucket_list = data.get(form_consts.Common.BUCKET_LIST_VARIABLE_NAME) if 'ticket' in data: ticket = data.get(form_consts.Common.TICKET_VARIABLE_NAME) if bucket_list: target.add_bucket_list(bucket_list, analyst) if ticket: target.add_ticket(ticket, analyst) try: target.save(username=analyst) target.reload() if is_new: run_triage(target, analyst) return {'success': True, 'message': "Target saved successfully", 'id': str(target.id)} except ValidationError, e: return {'success': False, 'message': "Target save failed: %s" % e}
4baa064c52bbeacdc18323196c1762cabd9607aa
8,649
import torch def batch_data(data, batch_size): """ data is a dict := {'x': [numpy array], 'y': [numpy array]} (on one client) returns x, y, which are both numpy array of length: batch_size """ data_x = data["x"] data_y = data["y"] # randomly shuffle data np.random.seed(100) rng_state = np.random.get_state() np.random.shuffle(data_x) np.random.set_state(rng_state) np.random.shuffle(data_y) # loop through mini-batches batch_data = list() for i in range(0, len(data_x), batch_size): batched_x = data_x[i : i + batch_size] batched_y = data_y[i : i + batch_size] batched_x = torch.from_numpy(np.asarray(batched_x)).float() batched_y = torch.from_numpy(np.asarray(batched_y)).long() batch_data.append((batched_x, batched_y)) return batch_data
58cfde03668dd61e23bdb8b96527ae17176c4872
8,650
import ast import sys def ast_parse_node(node): """ :param ast.Node node: an ast node representing an expression of variable :return ast.Node: an ast node for: _watchpoints_obj = var if <var is a local variable>: # watch(a) _watchpoints_localvar = "a" elif <var is a subscript>: # watch(a[3]) _watchpoints_parent = a _watchpoints_subscr = 3 elif <var is an attribute>: # watch(a.b) _watchpoints_parent = a _watchpoints_attr = "b" """ root = ast.Module( body=[ ast.Assign( targets=[ ast.Name(id="_watchpoints_obj", ctx=ast.Store()) ], value=node ) ], type_ignores=[] ) if type(node) is ast.Name: root.body.append( ast.Assign( targets=[ ast.Name(id="_watchpoints_localvar", ctx=ast.Store()) ], value=ast.Constant(value=node.id) ) ) elif type(node) is ast.Subscript: root.body.append( ast.Assign( targets=[ ast.Name(id="_watchpoints_parent", ctx=ast.Store()) ], value=node.value ) ) if sys.version_info.minor <= 8 and type(node.slice) is ast.Index: value_node = node.slice.value elif sys.version_info.minor >= 9 and type(node.slice) is not ast.Slice: value_node = node.slice else: raise ValueError("Slice is not supported!") root.body.append( ast.Assign( targets=[ ast.Name(id="_watchpoints_subscr", ctx=ast.Store()) ], value=value_node ) ) elif type(node) is ast.Attribute: root.body.append( ast.Assign( targets=[ ast.Name(id="_watchpoints_parent", ctx=ast.Store()) ], value=node.value ) ) root.body.append( ast.Assign( targets=[ ast.Name(id="_watchpoints_attr", ctx=ast.Store()) ], value=ast.Constant(value=node.attr) ) ) ast.fix_missing_locations(root) return root
22b3b6fed61e18ed6dc742040a365ebca8847fd5
8,651
def simplify_board_name(board_name: str) -> str: """Removes the following from board names: - `x86-`, e.g. `x86-mario` - `_he`, e.g. `x86-alex_he` - `&` - e.g. `falco & falco_II` - ',' - e.g. `hoho, but substitute a dp to vga chip` (why) Args: board_name: the board name to simplify Returns: str: a simplified board name """ if '&' in board_name: # Always try to extract the first of two. For the time being, # only legacy devices have this format and the second element # is always the 'II' one. board_name = board_name.split('&')[0].strip() if ',' in board_name: # hohoho board_name = board_name.split(',')[0].strip() return TO_REMOVE.sub('', board_name.lower())
bd6a9756aa6e6725b9727825f52ba544e2e4a97d
8,652
import argparse def parse_args(): """ Parse CLI arguments. Returns ------- argparse.Namespace Parsed arguments """ parser = argparse.ArgumentParser( description="Optimize model for inference") parser.add_argument("-m", "--model", dest="model_type", help="Model type", choices=["large"], type=str, default=None, required=False) parser.add_argument("--config", dest="config_override", help="Path to model config override file", type=str, default=None, required=False) parser.add_argument("--frvsr-weights", help="Path to FRVSR weights", type=str, default=None, required=False) parser.add_argument("--gan-weights", help="Path to GAN weights", type=str, default=None, required=False) parser.add_argument("output", help="Output", type=str, default=None) args = parser.parse_args() if args.frvsr_weights is None and args.gan_weights is None: parser.error("should specify FRVSR or GAN weights") return args
71eb3b1a567cdc7705fd6a2120c1ed23c9e8afd8
8,653
def delete_news_site(user_id, news_name): """ Delete subscription to user list Params: - user_id: The user email - news_name: The name of news provider Return: void """ user_info = get_user_by_email(user_id) user_info = user_info.to_dict() list_news = user_info['news_sites'] if list_news.count(news_name) != 0: list_news.remove(news_name) else: # The user is not subscribed to the currently passed news_name return True user_info['news_sites'] = list_news db.collection('users').document(user_id).update(user_info)
02f4ea485b2822c1a614e39dae3ef3aa924596b0
8,654
from datetime import datetime def get_time_str(dt: datetime.datetime = None, tz_default=LocalTimeZone): """ @param dt 为None时,返回当前时间 @param tz_default dt无时区信息时的默认时区 """ if not dt: dt = datetime.datetime.now() dt = convert_zone(dt, tz_default=tz_default) time_str = dt.isoformat().split('+')[0] return time_str + 'Z'
41f9f1465fe88e35450569995a14dfce6ebc9bc5
8,655
def plotLikesTablePair( likesTableFNs, plotFile, nonNormedStats = (), includeSpecialBins = True, getio = None ): """Visually plot a likes table. """ if getio: return dict( depends_on = likesTableFNs, creates = plotFile, attrs = dict( piperun_short = True ) ) likesTable = map( LoadLikesTable, likesTableFNs ) hitsLikes = [ IDotData( likesTable[ i ].hitsLikes ) for i in range( 2 ) ] missLikes = [ IDotData( likesTable[ i ].missLikes ) for i in range( 2 ) ] regionLikes = [ IDotData( likesTable[ i ].regionLikes ) for i in range( 2 ) ] pp.figure( figsize = ( 16, 18 ) ) stat_start, stat_end, stat_nbins = LoadBins( likesTable[0].likesBins ) stat_start1, stat_end1, stat_nbins1 = LoadBins( likesTable[1].likesBins ) assert( stat_start == stat_start1 ) assert( stat_end == stat_end1 ) assert( stat_nbins == stat_nbins1 ) assert( hitsLikes[0].headings == hitsLikes[1].headings ) assert( missLikes[0].headings == missLikes[1].headings ) assert( regionLikes[0].headings == regionLikes[1].headings ) regionLine = None for statNum, stat in enumerate( hitsLikes[0].headings ): rawStep = 1.0 / len( hitsLikes[0].headings ) * 0.93 rawBottom = rawStep * statNum rawTop = rawBottom + rawStep r = ( 0.1, 0.05 + rawBottom, 0.8, rawStep * 0.6 ) dbg( 'r' ) pp.axes( r ) pp.title( stat + ( ' (non-normed)' if stat in nonNormedStats else '' ) ) assert len( hitsLikes[0] ) == len( missLikes[0] ) == stat_nbins[ stat ] + CMSBins.maxSpecialBins binSize = ( stat_end[stat] - stat_start[stat] ) / stat_nbins[stat] binStarts = [ stat_start[stat] + binSize * i for i in range( stat_nbins[ stat ] + ( CMSBins.stat_numSpecialBins[ stat ] if includeSpecialBins else 0 ) ) ] pp.gca().set_xticks( binStarts ) pp.gca().set_xticklabels( [ '%.2f' % b for b in binStarts[: stat_nbins[stat] ] ] + ( list( DictGet( CMSBins.stat_specialBinNames, stat, () ) ) if includeSpecialBins else [] ), rotation = 'vertical' ) # pp.gca().set_xticklabels( map( str, binStarts ) + [ 's%d' % i for i in range( CMSBins.stat_numSpecialBins[ stat ] ) ] ) dbg( 'stat binStarts' ) hitsLine = [ None, None ] missLine = [ None, None ] regionLine = [ None, None ] for i, style in ( ( 0, '-' ), ( 1, ':' ) ): hitsLine[i], = pp.plot( binStarts , hitsLikes[i][ stat ][:len( binStarts )], 'r' + style ) missLine[i], = pp.plot( binStarts , missLikes[i][ stat ][:len( binStarts )], 'g' + style ) regionLine[i], = pp.plot( binStarts, regionLikes[i][ stat ][:len(binStarts)], 'b' + style ) pp.figlegend( filter( None, ( hitsLine[0], missLine[0], regionLine[0], hitsLine[1], missLine[1], regionLine[1] ) ), ( 'selected SNPs 1', 'neutral SNPs in neutral regions 1', 'region snps 1', 'selected SNPs 2', 'neutral SNPs in neutral regions 2', 'region snps 2', ), 'upper center' ) pp.savefig( plotFile )
6671877a21749747ce45a020d7a87eec86280d8c
8,656
import os import math def get_res_details(f): """ extracts bmaj, bmin, bpa and coordinate increment""" cmd = "prthd in=%s 2>/dev/null"%(f) pcmd = os.popen(cmd) output = pcmd.read() output = output.split('\n') #print(output) for lin in output: if 'Beam Size' in lin: print(lin) bmaj = float(lin.split()[2]) bmin = float(lin.split()[4]) if 'Position ang' in lin: print(lin.split()) bpa = float(lin.split()[2]) if lin.startswith("RA"): inc = math.fabs(float(lin.split()[4])) return bmaj,bmin,bpa,inc
5a95c08f494beffb85b2942e90561837bc82fbd0
8,657
def del_api_msg(): """ @api {post} /v1/interfaceapimsg/del InterfaceApiImsg_删除接口信息 @apiName interfaceApiImsgDel @apiGroup Interface @apiDescription 删除接口信息 @apiParam {int} apiMsgId 接口信息id @apiParamExample {json} Request-Example: { "apiMsgId": 1, } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "msg": "删除成功", "status": 1 } """ data = request.json api_msg_id = data.get('apiMsgId') jsondata = InterfaceApiMsgBusiness.del_api_msg(api_msg_id) return jsondata
fabd5a2fc257219e2568991f9520587d4053c909
8,658
def find_nearest(array, value): """ Find nearest value of interest in array (used for frequencies, no double value issues) Parameters ---------- array: array Give the array in which you want to find index of value nearest-by value: int or float The value of interest Return ------ idx: int Index of value nearest by value of interest """ array = np.asarray(array) idx = (np.abs(array - value)).argmin() return idx
e96a87b5b857a8cafbc0c6371b395040dde48e8d
8,659
import math def tube_light_generation_by_func(k, b, alpha, beta, wavelength, w = 400, h = 400): """Description: This functio generates a tube light (light beam) with given paratmers, in which, k and b represent the function y = k*x + b # TODO: Test k, b range Args: k (int): y = k*x + b b (int): y = k*x + b alpha (int): An integer (0,1] denotes the illumination intensity. beta (int): Annuatation factor. depends on the annuatation function, current beta/distance^2 wavelength (interger): An interger (380, 750) denotes the wavelength of the light. w (int, optional): Width. Defaults to 400. h (int, optional): Height. Defaults to 400. Returns: tube light: an numpy array with shape (w,h,3) """ tube_light = np.zeros((w,h,3)) full_light_end_y = int(math.sqrt(beta) + 0.5) light_end_y = int(math.sqrt(beta * 20) + 0.5) c = wavelength_to_rgb(wavelength) for x in range(w): for y in range(h): distance = abs(k*x - y + b) / math.sqrt(1 + k*k) if distance < 0: print(distance) if distance <= full_light_end_y: tube_light[y,x,0] = c[0] * alpha tube_light[y,x,1] = c[1] * alpha tube_light[y,x,2] = c[2] * alpha elif distance> full_light_end_y and distance <= light_end_y: attenuation = beta/(distance * distance) tube_light[y,x,0] = c[0] * alpha * attenuation tube_light[y,x,1] = c[1] * alpha * attenuation tube_light[y,x,2] = c[2] * alpha * attenuation return tube_light
c4fe0e817233f7e983e2bc513377e63379e23f93
8,660
def get_book(isbn): """ Retrieve a specific book record by it's ISBN --------------------------------------------- Endpoints: GET /books/isbn GET /books/isbn?act=(borrow|handback) @QueryParams: act: (optional) specific action on book Possible values: borrow, handback @Response: 200: return book record """ try: book = Book.objects.get(isbn=isbn) if request.args.get("act") == "borrow": if book["available"] > 0: book["available"] -= 1 else: return "This book is unavailable" elif request.args.get("act") == "handback": if book["available"] < book["copies"]: book["available"] += 1 else: return "You can't adda new copy" book.save() return jsonify(book) except: return "We don't carry this book"
fd1471234f6c73062569fea0ae489da3dc9af8ac
8,661
def toint16(i): """ Convert a number to a hexadecimal string of length 2 """ return f'{i:02x}'
3effd2b3f011a962beac19682ad29e930eb0f057
8,662
def is_phone(text): """ 验证字符串是否是固定电话 :param text: 需要检查的字符串 :return: 符合返回True,不符合返回False """ return check_string(text, '\(?0\d{2,3}[) -]?\d{7,8}$')
a90e8d28737b94f02381ed6e959e0a155628eaae
8,663
def get_loc(frameInfo, bbox_type): """Return GeoJSON bbox.""" bbox = np.array(frameInfo.getBBox()).astype(np.float) print("get_loc bbox: %s" %bbox) if bbox_type == "refbbox": bbox = np.array(frameInfo.getReferenceBBox()).astype(np.float) coords = [ [ bbox[0,1], bbox[0,0] ], [ bbox[1,1], bbox[1,0] ], [ bbox[2,1], bbox[2,0] ], [ bbox[3,1], bbox[3,0] ], [ bbox[0,1], bbox[0,0] ], ] print("get_loc coords : [%s]" %coords) return { "type": "Polygon", "coordinates": [coords] }
a95a9eb6ae9e33b5d69451fb1b34e19c7b0be8d3
8,664
def load_df(input_path, fname, ext): """Read chain as Pandas DataFrame""" fname = os.path.join(input_path, fname + ext) print 'loading %s' % fname assert(os.path.isabs(fname)) X = pd.DataFrame.from_csv(fname) return X
be4c0d82bdb8881d3ad555b215469df7e8daaefe
8,665
from typing import Set def _load_order_component(comp_name: str, load_order: OrderedSet, loading: Set) -> OrderedSet: """Recursive function to get load order of components. Async friendly. """ component = get_component(comp_name) # If None it does not exist, error already thrown by get_component. if component is None: return OrderedSet() loading.add(comp_name) for dependency in getattr(component, 'DEPENDENCIES', []): # Check not already loaded if dependency in load_order: continue # If we are already loading it, we have a circular dependency. if dependency in loading: _LOGGER.error("Circular dependency detected: %s -> %s", comp_name, dependency) return OrderedSet() dep_load_order = _load_order_component(dependency, load_order, loading) # length == 0 means error loading dependency or children if not dep_load_order: _LOGGER.error("Error loading %s dependency: %s", comp_name, dependency) return OrderedSet() load_order.update(dep_load_order) load_order.add(comp_name) loading.remove(comp_name) return load_order
c9d2adc8dbcf392e3d904b1e5f9d47f623e5646e
8,666
def clean_english_str_tf(input_str): """Clean English string with tensorflow oprations.""" # pylint: disable=anomalous-backslash-in-string string = tf.regex_replace(input_str, r"[^A-Za-z0-9(),!?\'\`<>/]", " ") string = tf.regex_replace(string, "\'s", " \'s") string = tf.regex_replace(string, "\'ve", " \'ve") string = tf.regex_replace(string, "n\'t", " n\'t") string = tf.regex_replace(string, "\'re", " \'re") string = tf.regex_replace(string, "\'d", " \'d") string = tf.regex_replace(string, "\'ll", " \'ll") string = tf.regex_replace(string, ",", " , ") string = tf.regex_replace(string, "!", " ! ") string = tf.regex_replace(string, "\(", " ( ") string = tf.regex_replace(string, "\)", " ) ") string = tf.regex_replace(string, "\?", " ? ") string = tf.regex_replace(string, "\s{2,}", " ") string = tf.string_strip(string) string = py_x_ops.str_lower(string) return string
6439f708dea8566d5706968811aed7478b1c107c
8,667
def _square_eqt(x, y, x0, y0, angle): """simple equation for a square. this returns: max(np.dstack([abs(x0 - x), abs(y0 -y)]), 2). this should then be compared to the "radius" of the square (half the width) the equation comes from this post: http://polymathprogrammer.com/2010/03/01/answered-can-you-describe-a-square-with-1-equation/ x, y: either one number or arrays of the same size (as returned by meshgrid) angle: angle in degrees. should lie in [-45, 45) """ x = np.array(x) y = np.array(y) vals = np.max(np.dstack([np.abs(x0 - x), np.abs(y0 - y)]), 2) if x.ndim == 2: # only rotate the image if x is 2d. in that case, we're returning a rotated image of the # square. if x is 1d, then we just want the distance to the origin (which we don't rotate) # -- the "radius" of the square will need to be rotated vals = ndimage.rotate(vals, angle) vals = _reshape_rotated_image(vals, x.shape) return vals.reshape(x.shape)
4000bf329399dfc8b842c2a496cdea193dd47fc6
8,668
def multinomial(x, num_samples=1, replacement=False, name=None): """ This OP returns a Tensor filled with random values sampled from a Multinomical distribution. The input ``x`` is a tensor with probabilities for generating the random number. Each element in ``x`` should be larger or equal to 0, but not all 0. ``replacement`` indicates whether it is a replaceable sample. If ``replacement`` is True, a category can be sampled more than once. Args: x(Tensor): A tensor with probabilities for generating the random number. The data type should be float32, float64. num_samples(int, optional): Number of samples, default is 1. replacement(bool, optional): Whether it is a replaceable sample, default is False. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A Tensor filled with sampled category index after ``num_samples`` times samples. Examples: .. code-block:: python import paddle paddle.seed(100) # on CPU device x = paddle.rand([2,4]) print(x) # [[0.5535528 0.20714243 0.01162981 0.51577556] # [0.36369765 0.2609165 0.18905126 0.5621971 ]] paddle.seed(200) # on CPU device out1 = paddle.multinomial(x, num_samples=5, replacement=True) print(out1) # [[3 3 0 0 0] # [3 3 3 1 0]] # out2 = paddle.multinomial(x, num_samples=5) # InvalidArgumentError: When replacement is False, number of samples # should be less than non-zero categories paddle.seed(300) # on CPU device out3 = paddle.multinomial(x, num_samples=3) print(out3) # [[3 0 1] # [3 1 0]] """ assert core.is_compiled_with_rocm() == False, ( "multinomial op is not supported on ROCM yet.") if in_dygraph_mode(): return _C_ops.multinomial(x, 'num_samples', num_samples, 'replacement', replacement) check_variable_and_dtype(x, "x", ["float32", "float64"], "multinomial") helper = LayerHelper("multinomial", **locals()) out = helper.create_variable_for_type_inference( dtype=convert_np_dtype_to_dtype_('int64')) helper.append_op( type='multinomial', inputs={"X": x}, outputs={'Out': out}, attrs={'num_samples': num_samples, 'replacement': replacement}) out.stop_gradient = True return out
6412cf815aaf8b9175946c501beacb716deb0c5c
8,669
def run_experiment( max_epochs, log=None, evaluate=True, projection=True, save_directory=".", save_file=None, save_interval=1, **configuration, ): """Runs the Proof of Constraint experiment with the given configuration :param max_epochs: number of epochs to run the experiment :param log: function to use for logging. None supresses logging :param evaluate: whether to run the evaluator once over the training data at the end of an epoch :param projection: whether to run the projection engine once over the testing data at the end of an epoch :param save_directory: optional directory to save checkpoints into. Defaults to the directory that the main script was called from :param save_file: base filename for checkpointing. If not provided, then no checkpointing will be performed :param save_interval: frequency of saving out model checkpoints. Defaults to every epoch :param configuration: kwargs for various settings. See default_configuration for more details :returns: the configuration dictionary, a tuple of all engines (first will be the training engine), and a corresponding tuple of all monitors """ # Determine the parameters of the analysis should_log = log is not None should_checkpoint = save_file is not None kwargs = default_configuration() kwargs.update(configuration) if should_log: log(kwargs) # Get the data train_dl, test_dl = get_data(kwargs) # Build the model, optimizer, loss, and constraint model, opt, proj_opt = build_model_and_optimizer(kwargs) loss, constraint = get_loss_and_constraint(kwargs) # Setup Monitors and Checkpoints training_monitor = TrainingMonitor("training") evaluation_monitor = TrainingMonitor("evaluation") if evaluate else None projection_monitor = ProjectionMonitor() if projection else None prediction_logger = PredictionLogger(model) if should_checkpoint: checkpointer = ModelAndMonitorCheckpointer( save_directory, save_file, kwargs, [training_monitor, evaluation_monitor, projection_monitor], prediction_logger, save_interval=save_interval, ) else: checkpointer = None # This is the trainer because we provide the optimizer trainer = create_engine( model, loss, constraint, opt, projection=False, monitor=training_monitor, regularization_weight=kwargs["regularization_weight"], error_fn=kwargs["error_fn"], device=kwargs["device"], tolerance=kwargs["tolerance"], max_iterations=kwargs["max_iterations"], ) # These are not trainers simply because we don't provide the optimizer if evaluate: evaluator = create_engine( model, loss, constraint, optimizer=None, projection=False, monitor=evaluation_monitor, regularization_weight=kwargs["regularization_weight"], error_fn=kwargs["error_fn"], device=kwargs["device"], tolerance=kwargs["tolerance"], max_iterations=kwargs["max_iterations"], ) else: evaluator = None if projection: projector = create_engine( model, loss, constraint, proj_opt, projection=True, monitor=projection_monitor, regularization_weight=kwargs["regularization_weight"], error_fn=kwargs["error_fn"], device=kwargs["device"], tolerance=kwargs["tolerance"], max_iterations=kwargs["max_iterations"], ) else: projector = None prediction_logger.attach(trainer, projector) # Ensure evaluation happens once per epoch @trainer.on(Events.EPOCH_COMPLETED) def run_evaluation(trainer): if training_monitor is not None and should_log: summary = training_monitor.summarize() log( f"Epoch[{trainer.state.epoch:05d}] Training Summary - {summary}" ) if evaluate: if should_log: log( f"Epoch[{trainer.state.epoch:05d}] - Evaluating on training data..." ) evaluator.run(train_dl) if evaluation_monitor is not None and should_log: summary = evaluation_monitor.summarize() log( f"Epoch[{trainer.state.epoch:05d}] Evaluation Summary - {summary}" ) # Handle projection if projection: if should_log: log(f"Epoch[{trainer.state.epoch:05d}] - Projecting...") projector.run(test_dl, max_epochs=kwargs["max_iterations"]) if projection_monitor is not None and should_log: summary = projection_monitor.summarize() log( f"Epoch[{trainer.state.epoch:05d}] Generalization Summary - {summary}" ) if should_checkpoint: checkpointer(trainer) # Handle projection summary if projection: @projector.on(Events.EPOCH_COMPLETED) def projection_summary(projector): if projection_monitor is not None and should_log: summary = projection_monitor.summarize(during_projection=True) log( f"Epoch[{trainer.state.epoch:05d}-{projector.state.epoch:05d}] Projection Summary - {summary}" ) @projector.on(Events.EPOCH_COMPLETED) def projection_stop(projector): if projection_monitor is not None: if projection_monitor.should_stop_projection( kwargs["tolerance"] ): projector.terminate() @projector.on(Events.COMPLETED) def projection_unterminate(projector): # Unblock the projector so it can resume later projector.should_terminate = False if should_log: @trainer.on(Events.ITERATION_COMPLETED) def log_batch_summary(trainer): log( "Epoch[{:05d}] - Total loss: {:.5f}, Data Loss: {:.5f}, Constraint Error: {:.5f}".format( trainer.state.epoch, trainer.state.total_loss.cpu().item(), trainer.state.mean_loss.cpu().item(), trainer.state.constraints_error.cpu().item(), ) ) trainer.run(train_dl, max_epochs=max_epochs) # Save final model and monitors if should_checkpoint: checkpointer.retrieve_and_save(trainer) return ( kwargs, (trainer, evaluator, projector), (training_monitor, evaluation_monitor, projection_monitor), )
7ebfc72dc3ebe7047e708ffa0903f24de67d8134
8,670
from typing import List from typing import Callable def compose_decorators(decorators: List[Callable]) -> Callable: """Compose multiple decorators into one. Helper function for combining multiple instrumentation decorators into one. :param list(Callable) decorators: A list of instrumentation decorators to be combined into a single decorator. """ def composed(func: Callable, **dkwargs) -> Callable: @wraps(func) def wrapper(*args, **kwargs): wrapped_func = func for decorator in decorators: wrapped_func = decorator(wrapped_func, **dkwargs) return wrapped_func(*args, **kwargs) return wrapper return composed
14d8ecbf5af598419906ba9776bb40be6271279f
8,671
import torch def xyz_to_polar(sphere_points): """ (B,3,N) -> theta, phi (B,2,N), r (B) x = r*cos(theta)*sin(phi) y = r*sin(theta)*sin(phi) z = r*cos(phi) """ r = torch.sqrt(torch.sum(sphere_points*sphere_points, dim=1)) theta = torch.atan2(sphere_points[:,1,:], sphere_points[:,0,:]) z = sphere_points[:,2,:]/r z.clamp_(-1.0+1e-5, 1.0-1e-5) phi = torch.acos(z) phi = phi.masked_fill(z==1, 0.0) r = torch.mean(r, dim=-1) assert(check_values(phi)) assert(check_values(theta)) return torch.stack([theta,phi], dim=1), r
3332240df5230d801800ab3601873d26872326fc
8,672
def get_cpu_cores(): """获取每个cpu核的信息 Returns: 统计成功返回是一个元组: 第一个元素是一个列表存放每个cpu核的信息 第二个元素是列表长度, 也就是计算机中cpu核心的总个数 若统计出来为空, 则返回None """ cpu_cores = [] with open('/proc/cpuinfo') as f: for line in f: info = line.strip() if info.startswith('model name'): model_name = info.split(':')[1].strip() cpu_cores.append(model_name) if cpu_cores: return cpu_cores, len(cpu_cores) return None
ad66faac3a956b1922173263415890bc543e0bba
8,673
from typing import Union from typing import Tuple def itk_resample(image: sitk.Image, spacing: Union[float, Tuple[float, float, float]], *, interpolation: str = "nearest", pad_value: int) -> sitk.Image: """ resample sitk image given spacing, pad value and interpolation. Args: image: sitk image spacing: new spacing, either a scalar or a tuple of three scalars. interpolation: interpolation method, "linear" or "nearest". pad_value: pad value for out of space pixels. Returns: torch.Tensor: affine params in correct shape """ if check_scalar(spacing): spacing: Tuple[float, float, float] = (spacing, spacing, spacing) # noqa ori_spacing = image.GetSpacing() ori_size = image.GetSize() new_size = (round(ori_size[0] * (ori_spacing[0] / spacing[0])), round(ori_size[1] * (ori_spacing[1] / spacing[1])), round(ori_size[2] * (ori_spacing[2] / spacing[2]))) interp = {"linear": sitk.sitkLinear, "nearest": sitk.sitkNearestNeighbor, "cosine": sitk.sitkCosineWindowedSinc}[ interpolation] return sitk.Resample(image, new_size, sitk.Transform(), interp, image.GetOrigin(), spacing, image.GetDirection(), pad_value, image.GetPixelID())
37636c42e3f28c09dc0d3ef511c483eec0d3b3e2
8,674
def gen_anchor_targets( anchors, image, bboxes, labels, num_classes, negative_overlap=0.4, positive_overlap=0.5 ): """ Generate anchor targets for bbox detection. @author: Eli This is a version of anchor_targets_bbox that takes tensors for images, bboxes, and labels to play nice with tensorflow. Args anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2). image_group: List of images. bboxes_group: np.array(n, x1, y1, x2, y2) labels_grpup: np.array(n) num_classes: Number of classes to predict. mask_shape: If the image is padded with zeros, mask_shape can be used to mark the relevant part of the image. negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative). positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive). Returns labels_target: batch that contains labels & anchor states (np.array of shape (batch_size, N, num_classes + 1), where N is the number of anchors for an image and the last column defines the anchor state (-1 for ignore, 0 for bg, 1 for fg). regression_target: batch that contains bounding-box regression targets for an image & anchor states (np.array of shape (batch_size, N, 4 + 1), where N is the number of anchors for an image, the first 4 columns define regression targets for (x1, y1, x2, y2) and the last column defines anchor states (-1 for ignore, 0 for bg, 1 for fg). """ regression_target = np.zeros( (anchors.shape[0], 4 + 1), dtype=np.float32) labels_target = np.zeros( (anchors.shape[0], num_classes + 1), dtype=np.float32) # compute labels and regression targets if bboxes.shape[0]: # obtain indices of ground truth annotations with the greatest overlap positive_indices, ignore_indices, argmax_overlaps_inds = utils.anchors.compute_gt_annotations( anchors, bboxes, negative_overlap, positive_overlap) labels_target[ignore_indices, -1] = -1 labels_target[positive_indices, -1] = 1 regression_target[ignore_indices, -1] = -1 regression_target[positive_indices, -1] = 1 # compute target class labels labels_target[positive_indices, labels [argmax_overlaps_inds[positive_indices]].astype(int)] = 1 regression_target[:, : -1] = utils.anchors.bbox_transform( anchors, bboxes[argmax_overlaps_inds, :]) # ignore annotations outside of image anchors_centers = np.vstack( [(anchors[:, 0] + anchors[:, 2]) / 2, (anchors[:, 1] + anchors[:, 3]) / 2]).T outside_indices = np.logical_or( anchors_centers[:, 0] >= image.shape[1], anchors_centers[:, 1] >= image.shape[0]) # -1 means ignore labels_target[outside_indices, -1] = -1 regression_target[outside_indices, -1] = -1 return regression_target, labels_target
6ac0b5602a6d3aa2d1905da09f457ca44193b02c
8,675
def parameters_to_weights(parameters: Parameters) -> Weights: """Convert parameters object to NumPy weights.""" return [bytes_to_ndarray(tensor) for tensor in parameters.tensors]
e235fee46ad9ffcc31eea86f2491a9ac305d3ac5
8,676
from multiprocessing import Pool def get_iou(data_list, class_num, save_path=None): """ Args: data_list: a list, its elements [gt, output] class_num: the number of label """ ConfM = ConfusionMatrix(class_num) f = ConfM.generateM pool = Pool() m_list = pool.map(f, data_list) pool.close() pool.join() for m in m_list: ConfM.addM(m) aveJ, j_list, M = ConfM.jaccard() # print(j_list) # print(M) # print('meanIOU: ' + str(aveJ) + '\n') if save_path: with open(save_path, 'w') as f: f.write('meanIOU: ' + str(aveJ) + '\n') f.write(str(j_list) + '\n') f.write(str(M) + '\n') return aveJ, j_list
cce5b270a34700eed592e9a47d0c56a8b43027ff
8,677
import torch def hook_modules(module): """ Temporarily adds the hooks to a `nn.Module` for tracing """ hooks = [] def register_submodule_tracer(module): def _submodule_pre_tracer(module, input): log.debug(f'pre tracer in _submodule_pre_tracer in {type(module).__name__}') lock(True) def _submodule_tracer(module, inputs, outputs): log.debug(f'tracer in _submodule_tracer in {type(module).__name__}') lock(False) node = TraceNode(module) add_forward_node(node, inputs, outputs) module_unique_name = current_graph().module_unique_name_dict[id(module)] if module_unique_name in current_graph().traced_modules: log.debug(f"module {module_unique_name} is traced") return None related = False if id(module) in module_constructor_traced: if id(module) in module_constructor_lines: related = True else: if type(module) in overridable_modules: related = True else: for m in overridable_modules: if isinstance(module, m): related = True break if related: hooks.append(module.register_forward_pre_hook(_submodule_pre_tracer)) hooks.append(module.register_forward_hook(_submodule_tracer)) current_graph().traced_modules.append(module_unique_name) return None def _model_pre_tracer(module, inputs): log.debug('pre tracer in _model_pre_tracer') for i in inputs: node = TraceNode(TraceFunction("input")) add_input_node(node, i) def _model_tracer(module, inputs, outputs): log.debug('tracer in _model_tracer') if type(outputs) == torch.Tensor: node = TraceNode(TraceFunction("output")) add_output_node(node, outputs) elif type(outputs) in (list, tuple): for i in outputs: if type(i) == torch.Tensor or (type(i) in (list, tuple) and all((type(x) == torch.Tensor for x in i))): node = TraceNode(TraceFunction("output")) add_output_node(node, i) else: log.warning( "Only tensors or list, tuple of tensors are supported when nested in a class, dict, list or tuple") elif type(outputs) == dict: for k, v in outputs.items(): if type(v) == torch.Tensor or (type(v) in (list, tuple) and all((type(x) == torch.Tensor for x in v))): node = TraceNode(TraceFunction("output")) add_output_node(node, v) else: log.warning( "Only tensors or list, tuple of tensors are supported when nested in a class, dict, list or tuple") else: log.warning(f'Output type is not supported: {type(outputs).__name__}, try to extract tensors from it') for k in outputs.__dir__(): v = getattr(outputs, k) if type(v) == torch.Tensor or (type(v) in (list, tuple) and all((type(x) == torch.Tensor for x in v))): node = TraceNode(TraceFunction("output")) add_output_node(node, v) log.debug('trace: apply register_submodule_tracer') module.apply(register_submodule_tracer) log.debug('trace: add hooks') hooks.append(module.register_forward_pre_hook(_model_pre_tracer)) hooks.append(module.register_forward_hook(_model_tracer)) yield module for hook in hooks: hook.remove()
1a165b4a49f3179485811eced8b114e0b0a6da8b
8,678
def bridge_forward_delay(brname): """Read a bridge device's forward delay timer. :returns ``int``: Bridge forward delay timer. :raises: OSError, IOError (ENOENT) if the device doesn't exist. """ return int(_get_dev_attr(brname, 'bridge/forward_delay'))
ba164ba85f1e1e3c5f82e28f38413cb8ca9e5090
8,679
def RF(X, y, X_ind, y_ind, is_reg=False): """Cross Validation and independent set test for Random Forest model Arguments: X (ndarray): Feature data of training and validation set for cross-validation. m X n matrix, m is the No. of samples, n is the No. of fetures y (ndarray): Label data of training and validation set for cross-validation. m-D vector, and m is the No. of samples. X_ind (ndarray): Feature data of independent test set for independent test. It has the similar data structure as X. y_ind (ndarray): Feature data of independent set for for independent test. It has the similar data structure as y out (str): The file path for saving the result data. is_reg (bool, optional): define the model for regression (True) or classification (False) (Default: False) Returns: cvs (ndarray): cross-validation results. The shape is (m, ), m is the No. of samples. inds (ndarray): independent test results. It has similar data structure as cvs. """ if is_reg: folds = KFold(5).split(X) alg = RandomForestRegressor else: folds = StratifiedKFold(5).split(X, y) alg = RandomForestClassifier cvs = np.zeros(y.shape) inds = np.zeros(y_ind.shape) for i, (trained, valided) in enumerate(folds): model = alg(n_estimators=500, n_jobs=1) model.fit(X[trained], y[trained]) if is_reg: cvs[valided] = model.predict(X[valided]) inds += model.predict(X_ind) else: cvs[valided] = model.predict_proba(X[valided])[:, 1] inds += model.predict_proba(X_ind)[:, 1] return cvs, inds / 5
c8ab9aa7cf6bbe159be172cdea82bc970b896914
8,680
import struct def keystring2list(s): """convert a string of keys to a list of keys.""" if len(s) == 0: return [] keys = [] i = 0 while i < len(s): keylength = struct.unpack(data.MESSAGE_KEY_LENGTH_FORMAT, s[i:i + data.MESSAGE_KEY_LENGTH_SIZE])[0] i += data.MESSAGE_KEY_LENGTH_SIZE key = s[i:i + keylength] keys.append(key) i += keylength return keys
b580d4062be1f5e99f5264aeb5c0a7e4cb70bbd2
8,681
def binary_fmt(num, suffix='B'): """A binary pretty-printer.""" if num == 0.0: return '0 %s' % suffix for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return '%.3g %s%s' % (num, unit, suffix) num /= 1024.0 return '%.3g %s%s' % (num, 'Yi', suffix)
70ae3ee429dd80e8d9cb3a1a3c6eeba09f7ea77a
8,682
def transformMatrices( translation = (0,0,0), center = (0,0,0), rotation = (0,1,0,0), scale = (1,1,1), scaleOrientation = (0,1,0,0), parentMatrix = None, ): """Calculate both forward and backward matrices for these parameters""" T,T1 = transMatrix( translation ) C,C1 = transMatrix( center ) R,R1 = rotMatrix( rotation ) SO,SO1 = rotMatrix( scaleOrientation ) S,S1 = scaleMatrix( scale ) return ( compressMatrices( parentMatrix, T,C,R,SO,S,SO1,C1 ), compressMatrices( parentMatrix, C,SO, S1, SO1, R1, C1, T1) )
0d1ae564b0e27000ce1c8d0a3e5aa04ec02fa19f
8,683
async def get_region(country=None, id=None): """ `linode_region` provides details about a specific Linode region. """ __args__ = dict() __args__['country'] = country __args__['id'] = id __ret__ = await pulumi.runtime.invoke('linode:index/getRegion:getRegion', __args__) return GetRegionResult( country=__ret__.get('country'))
09bd2e83496b6a38d477a24e3bc70a72a8bea8a7
8,684
def entry_type(entry, default): """Return the type of and entry""" if entry.attribute is None: return default return entry.attribute.get('entry_type', default)
04825e225e86bbb98808d0d18633032c022e4870
8,685
def build_expression(backend, arrays, expr): """Build an expression, based on ``expr`` and initial arrays ``arrays``, that evaluates using backend ``backend``. """ return CONVERT_BACKENDS[backend](arrays, expr)
da10481741b2dae18e47a7b203dc548cc6d78a0e
8,686
import argparse def build_parser(args): """ A method to handle argparse. """ parser = argparse.ArgumentParser(usage='$ python verdict.py', description='''Downloads, filters and re-publishes the Google sheet.''', epilog='') parser.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true") return parser.parse_args()
e28ee8a0e9ebbb614802f4cb59b7064138a87fa7
8,687
from mlalchemy.parser import parse_query as mlalchemy_parse_query from typing import OrderedDict def parse_query(qd, session, config): """Parses the given query dictionary to produce a BaseQuery object.""" defaults = { "limit": config["default_limit"], "backref_limit": config["default_backref_limit"], "backref_depth": config["default_backref_depth"], "join_depth": config["default_join_depth"], "exclude": [], "include": [], } qd.setdefault("limit", defaults["limit"]) full_qd = merge_dicts(defaults, qd) if qd["limit"] in (None, False): qd.pop("limit") if isinstance(full_qd["exclude"], str): full_qd["exclude"] = [full_qd["exclude"]] full_qd["exclude"] = list(set(full_qd["exclude"] + config["global_exclude"])) if isinstance(full_qd["include"], str): full_qd["include"] = [full_qd["include"]] mlquery = mlalchemy_parse_query(qd) query = mlquery.to_query(session, session.bind._db.models) order_by = full_qd.pop("order-by", None) if order_by: full_qd["order_by"] = order_by qd_key_sort = [ "from", "where", "order_by", "offset", "limit", "backref_limit", "backref_depth", "join_depth", "exclude", "include", ] if full_qd["include"]: full_qd["join_depth"] = full_qd["backref_depth"] = None else: full_qd["join_depth"] = full_qd["join_depth"] or 0 full_qd["backref_depth"] = full_qd["backref_depth"] or 0 query.query_dict = OrderedDict( sorted(full_qd.items(), key=lambda x: qd_key_sort.index(x[0])) ) query = query.with_loaded_relations( full_qd["join_depth"], full_qd["backref_depth"], full_qd["exclude"], full_qd["include"], ) query = mlquery.apply_filters(query) query.session.parsed_query = query return query
17001d60365451375939fd902a6720b4d5889a7c
8,688
from typing import Dict def get_covid19_us_bears( url_root=CSV_URL_ROOT, file_prefix=CSV_FILE_PREFIX, file_suffix=CSV_FILE_SUFFIX, encoding=CSV_ENCODING) -> Dict[Dict[Bears]]: """Converts USAFACTS confirmed and deaths CSV files to state and county `Bears` to a dictionary of dictionaries. Args: url_root (str): URL prefix for the CSV file_prefix (str): CSV file prefix uid_col_label (str): Unique ID column label encoding (str): CSV encoding Returns: Dict[Dict[Bears]]: :: {'confirmed': {'counties': Bears, 'states': Bears}, 'deaths': {'counties': Bears, 'states': Bears}} """ covid19 = {'confirmed': {'counties': None, 'states': None}, 'deaths': {'counties': None, 'states': None}} for db_type in ['confirmed', 'deaths']: covid19[db_type]['counties'] = Usafacts( from_csv=True, csv_specs=CsvSpecs( url=stitch_time_series_csv_url( db_type=db_type, url_root=url_root, file_prefix=file_prefix, file_suffix=file_suffix), uid_col_label=CSV_COL_UID, encoding=encoding)) for db_type in ['confirmed', 'deaths']: counties = covid19[db_type]['counties'] covid19[db_type]['states'] = Usafacts( dataframe=counties2states_df(counties.df, counties.datetime_index)) return covid19
2cdc1b3112cde9d589388666484cf17a0f6055af
8,689
from typing import Optional from typing import Union from typing import Tuple import json def jsonify_promise( future_obj: Input[Jsonable], indent: Input[Optional[Union[int, str]]]=None, separators: Input[Optional[Tuple[str, str]]]=None ) -> Output[str]: """Convert a Promise object to a Promise to jsonify the result of that Promise. An asyncronous (Promise) version of json.dumps() that operates on Pulumi output values that have not yet been evaluated. Sorts keys to provide stability of result strings. The result is another Pulumi output value that when evaluated will generate the json string associated with future_obj Args: future_obj(Input[Jsonable]): A Pulumi Input Jsonable value that is not yet evaluated Returns: Output[str] A Pulumi "output" value that will resolve to the json string corresponding to future_obj """ def gen_json( obj: Jsonable, indent: Optional[Union[int, str]], separators: Optional[Tuple[str, str]] ) -> str: return json.dumps(obj, sort_keys=True, indent=indent, separators=separators) # "pulumi.Output.all(*future_args).apply(lambda args: sync_func(*args))"" is a pattern # provided by pulumi. It waits until all promises in future_args have been satisfied, # then invokes sync_func with the realized values of all the future_args as *args. Finally # it wraps the synchronous function as a promise and returns the new promise as the result. # this allows you to write synchronous code in pulumi that depends on future values, and # turn it into asynchronous code result = Output.all(future_obj, indent, separators).apply(lambda args: gen_json(*args)) # type: ignore[arg-type] return result
bc0769d6897c771c4a04b76ace11b90c13bde844
8,690
def randnums(start, stop, n_samples): """ Helper function to select real samples and generate fake samples """ ix = [] for i in range(n_samples): ix.append(randint(start, stop)) ix = np.array(ix) return ix
da2e06527e56e9a971a904fee176428bef2b536a
8,691
def shift_1_spectra(spectra, shift): """ This method find the relative position of the FFT of the two spectras \ in order to later k-linearize. Args: :param spectra1: OCT spectra of first mirror. :type spectra1: list Return: :rname: Zspace: - pi to pi linear vector space :rtype: list """ L = len(spectra) mean = np.max(spectra) x = np.arange(L) j = complex(0,1) shifted_spectra = np.real( hilbert(spectra) * np.exp(j * x * shift ) ) shift_mean = np.max(shifted_spectra) shifted_spectra = (shifted_spectra / shift_mean) * mean return shifted_spectra
b76616a064da9eefb9199088ffba50950c9f160a
8,692
import pandas import types def hpat_pandas_series_div(self, other, level=None, fill_value=None, axis=0): """ Pandas Series method :meth:`pandas.Series.div` and :meth:`pandas.Series.truediv` implementation. .. only:: developer Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5 Parameters ---------- self: :class:`pandas.Series` input arg other: :obj:`pandas.Series`, :obj:`int` or :obj:`float` input arg level: :obj:`int` or name *unsupported* fill_value: :obj:`float` or None, default None *unsupported* axis: default 0 *unsupported* Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object """ _func_name = 'Method div() or truediv().' if not isinstance(self, SeriesType): raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self)) if level is not None or fill_value is not None or axis != 0: raise TypingError( '{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis)) if isinstance(other, SeriesType): def hpat_pandas_series_div_impl(self, other): """ Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5 """ return pandas.Series(self._data / other._data) return hpat_pandas_series_div_impl if isinstance(other, types.Integer) or isinstance(other, types.Float): def hpat_pandas_series_div_number_impl(self, other): """ Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar """ return pandas.Series(self._data / other) return hpat_pandas_series_div_number_impl raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other))
25fdfed169738ee0a7d1faabba7b52217736cbe9
8,693
def alias(self, arg): """ set the new alias to magic *alias alias1 string* alias1 is added into magic command """ if arg == '' or arg.lower() == 'help': return dbhelp(self, 'alias') name, fstring = arg.split(" ", 1) print "new alias: %s <%s>" % (DBPRINT.msg_green(name), fstring) __alias_table__[name] = fstring func, params = fstring.split(" ", 1) def afunc(self, arg): """replacing func""" DBPRINT.print_blue(fstring) IP.magic("%%%s" % fstring) IP.expose_magic(name, afunc)
aaf4c3d72b740888b2282258b6138c80827e8665
8,694
def _transform_cat_options(metadata: dict) -> pd.DataFrame: """Transform category options metadata into a formatted DataFrame.""" df = pd.DataFrame.from_dict(metadata.get("categoryOptions")) df = df[["id", "code", "shortName", "name"]] df.columns = ["co_uid", "co_code", "co_shortname", "co_name"] return df
b1e9ac9ac578c8c0253ee7a0ece58a090d134385
8,695
def idaview(request, idadb, idadf): """ IdaDataFrame fixture to be used for the whole testing session. Open a view based on idadf fixture. """ def fin(): try: idadb.drop_view("TEST_VIEW_ibmdbpy") idadb.commit() except: pass request.addfinalizer(fin) if idadb.exists_view("TEST_VIEW_ibmdbpy"): idadb.drop_view("TEST_VIEW_ibmdbpy") idadb._create_view(idadf, "TEST_VIEW_ibmdbpy") return ibmdbpy.IdaDataFrame(idadb, "TEST_VIEW_ibmdbpy")
6540f4e844b8709b4b8338b15aa913e3ed67d4da
8,696
import sys import os def get_openmp_flag(compiler): """Returns list of flags for using OpenMP depending on compiler and platform. Parameters ---------- compiler : numpy.distutils.compiler Compiler used when invoking setup.py build """ if hasattr(compiler, 'compiler'): compiler = compiler.compiler[0] else: compiler = compiler.__class__.__name__ if sys.platform == "win32" and ('icc' in compiler or 'icl' in compiler): return ['/Qopenmp'] elif sys.platform == "win32": return ['/openmp'] elif sys.platform in ("darwin", "linux") and "icc" in compiler: return ['-qopenmp'] elif sys.platform == "darwin" and 'openmp' in os.getenv('CPPFLAGS', ''): return ['-openmp'] # Default flag for GCC and clang: return ['-fopenmp']
f8dd4ad0cda24f517746828a6d879abd6d6ced4d
8,697
def heuristical_lengths(items): """ heuristical_lengths tries to deriver the lengths of the content of items. It always returns a list. a) If typeof(items) is a string, it'll return [len(items)] b) If typeof(items) is a dict, it'll return [len(items)] c) If typeof(items) is either list or tuple, it'll best case try to iterate over each element and record those lengths and return them all flattened. If it can't retrieve the lengths yet len(items) > 0, then it will return [len(items)] d) If items has the '__len__' attribute, it'll return [len(items)] e) Otherwise if it can't derive the type, it'll return [] """ if items is None: return [] elif isinstance(items, str): return [len(items)] elif isinstance(items, dict): return [len(items)] elif isinstance(items, tuple) or isinstance(items, list): lengths = [] for item in items: i_lengths = heuristical_lengths(item) lengths.extend(i_lengths) # In the best case, if len(lengths) == 0 # yet len(items) > 0, just use len(items) if len(lengths) == 0 and len(items) > 0: lengths = [len(items)] return lengths elif hasattr(items, '__len__'): return [len(items)] elif hasattr(items, '__iter__'): lengths = [] itr = iter(items) for it in itr: it_lengths = heuristical_lengths(it) lengths.extend(it_lengths) return lengths else: return []
94a0759bcdc2e57431e8524f164a51f2091b6e61
8,698
import subprocess def tifpages(file_id, filename, db_cursor): """ Check if TIF has multiple pages """ p = subprocess.Popen(['identify', '-format', '%n\\n', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate() try: if int(len(out.split())) == 1: pages_vals = 0 no_pages = str(int(len(out.split()))) + " page" else: pages_vals = 1 no_pages = str(int(len(out.split()))) + " pages" except Exception as e: no_pages = "Unknown ({})".format(e) pages_vals = 1 db_cursor.execute(queries.file_check, {'file_id': file_id, 'file_check': 'tifpages', 'check_results': pages_vals, 'check_info': no_pages}) db_cursor.execute(queries.insert_log, {'project_id': settings.project_id, 'file_id': file_id, 'log_area': 'tifpages', 'log_text': db_cursor.query.decode("utf-8")}) return True
f126370866b421b120b56e8b6e84087d617e31c8
8,699