content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import pyarrow as pa def load_arrow(file) -> pa.Table: """Load batched data written to file back out into a table again Example ------- >>> t = pa.Table.from_pandas(df) # doctest: +SKIP >>> with open("myfile", mode="wb") as f: # doctest: +SKIP ... for batch in t.to_batches(): # doctest: +SKIP ... dump_batch(batch, f, schema=t.schema) # doctest: +SKIP >>> with open("myfile", mode="rb") as f: # doctest: +SKIP ... t = load_arrow(f) # doctest: +SKIP See Also -------- dump_batch """ try: sr = pa.RecordBatchStreamReader(file) return sr.read_all() except Exception: raise EOFError
d599caff58d48efefa9b4367ec9844a538081b1e
44,889
def random_min_var(pbcids, actual_votes, xs, nonsample_sizes): """ Choose the county to audit by choosing the county that minimizes the variance for the estimated number of final votes for A in that county. This formula uses the normal approximation as seen in Rivest's estimation audit paper: in particular, we estimate the variance of n_a, using the formula (n^2*p_a*q_a) / (s-1). """ best_pbcid, best_var = None, None for pbcid in actual_votes: candidate = list(actual_votes[pbcid].keys())[0] actual_sample_size = sum([actual_votes[pbcid][k] for k in actual_votes[pbcid]]) new_unsampled = nonsample_sizes[pbcid] - xs[pbcid] # Is this reasonable? Currently not being triggered, since all counties have # 40 ballots sampled to start with. if actual_sample_size <= 1: return pbcid stratum_size = new_unsampled + actual_sample_size + xs[pbcid] frac_vote = float(actual_votes[pbcid][candidate]) / actual_sample_size # Extend fractional vote linearly var_before = (stratum_size**2 * frac_vote * (1.-frac_vote)) / (actual_sample_size + xs[pbcid] - 1) updated_sample_size = actual_sample_size + 1 var_after = (stratum_size**2 * frac_vote * (1.-frac_vote)) / (updated_sample_size + xs[pbcid] - 1) if best_pbcid is None or (var_after - var_before) < best_var: best_pbcid = pbcid best_var = (var_after - var_before) return best_pbcid
4af8c280137d4d2d608710d5879ff9448e802067
44,890
import torch import math def count_qubits_gate_matrix(gate: torch.Tensor): """Get the number of qubits that a gate matrix acts on. By convention, a gate matrix has the shape (*optional_batch_dims, 2^k, 2^k) where k is the number of qubits that the gate acts on. Note that k might be smaller than the number of qubits in a state that we are going to apply the gate to. """ length = gate.size()[-1] num_bits = round(math.log2(length)) if 2 ** num_bits != length: raise RuntimeError(f'Given gate matrix has size {gate.size()} which ' f'is not consistent with any number of qubits.') return num_bits
def1082c7e598589cf09aa3dfe67ce6598a93990
44,891
from typing import List from typing import Tuple def logits_to_span_score(start_logits, end_logits, context_length, max_span_length=30): """ This is batched operation that convert start logits and end logtis to a list of scored, span. Each example of the two logits will also depends on the context length. :param start_logits: # [B, T] :param end_logits: # [B, T] :param context_length: # [B] :return: """ # pass batch_size = start_logits.size(0) s_logits = start_logits.tolist() e_logits = end_logits.tolist() c_l = context_length.tolist() b_scored_span_list: List[List[Tuple[float, float, Tuple[int, int]]]] = [] if max_span_length is None: max_span_length = 1000 for b_i in range(batch_size): cur_l = c_l[b_i] cur_s_logits = s_logits[b_i][:cur_l] cur_e_logits = e_logits[b_i][:cur_l] cur_span_list: List[Tuple[float, float, Tuple[int, int]]] = [] for s_i in range(len(cur_s_logits)): for e_i in range(s_i, min(len(cur_e_logits), s_i + max_span_length)): s_score = cur_s_logits[s_i] e_score = cur_e_logits[e_i] assert s_i <= e_i cur_span_list.append((s_score, e_score, (s_i, e_i))) cur_span_list = sorted(cur_span_list, key=lambda x: x[0] + x[1], reverse=True) # Start + End logits b_scored_span_list.append(cur_span_list) return b_scored_span_list
55216cd67933d8d6150097dd62e0a209958dd243
44,893
def greatest_common_superclass(*types): """ Finds the greatest common superclass of the given *types. Returns None if the types are unrelated. Args: *types (type): Returns: type or None """ if len(types) == 1: return types[0] mros = [t.__mro__ for t in types] for t in mros[0]: if all(t in mro for mro in mros[1:]): break else: return None if t is object: return None return t
dedbb52f3927a8d53408c72073a7fb04a4c5435f
44,894
def most_affected_area(affected_areas_count): """Find most affected area and the number of hurricanes it was involved in.""" max_area = 'Central America' max_area_count = 0 for area in affected_areas_count: if affected_areas_count[area] > max_area_count: max_area = area max_area_count = affected_areas_count[area] return max_area, max_area_count
e7abbf76f2f7cf10183fefadca943bdac83c33be
44,895
import re def capitalize(nom): """Donne la capitalisation correcte pour un nom de région.""" nom = nom.lower() # On split sur les non-mots, en les conservant fragments = re.split(r"(\W)", nom) cap_frag = [] for f in fragments: if re.match(r"\W", f): cap_frag.append(f) # On saute les chaines vides elif f: cap_frag.append(f[0].upper()) cap_frag.append(f[1:]) return "".join(cap_frag)
f5c21d308c6168a94cd769a3fa121374ddebf416
44,900
import numpy def nNans(arr,column=0): """ Counts up the number of occurances of NaN in a row of a 2-d array """ if len(arr.shape) != 2: raise RuntimeError('nanRows works only on 2-d arrays, this array had shape %s' % (str(arr.shape))) nans = arr[:,column] == numpy.nan return len(numpy.nonzero(nans)[0])
776d1a41e9cdf2ce008c58d29bc7c9f3d2a3de5a
44,901
import re def word_list(raw): """ Converts raw sentences to list of words. :param raw: sentence to be cleaned up :return: list of words """ clean_words = re.sub(r"[^a-zA-Z]", ' ', raw) return clean_words.split()
25bd3d04c4dca62cbe724da677e0da54f883ec4e
44,904
def FindPart(part: dict, mime_type: str): """ Recursively parses the parts of an email and returns the first part with the requested mime_type. :param part: Part of the email to parse (generally called on the payload). :param mime_type: MIME Type to look for. :return: The part of the email with the matching type. """ if part['mimeType'] == mime_type: return part elif 'multipart' in part['mimeType']: for child in part['parts']: out = FindPart(child, mime_type) if out is not None: return out
ab557300860b6030acc5b851aa5bdb10ae2850cc
44,905
def shape_select(dict_sizes, max_size): """takes a dict in parameter, returns list of shapes\ of blocker of at most max size""" shapes = [] for size in dict_sizes: if size is not None and max_size is not None and size <= max_size: for item in dict_sizes[size]: shapes.append(item) return shapes
be209776c349df756ce37b34f6ea9bb819fc4313
44,906
from functools import reduce def scalar_prod(scalars_list): """ This method returns the product of the list of scalars which it has as input. Parameters ---------- scalars_list : list[int|float|complex] | tuple[int|float|complex] Returns ------- complex|float|int """ if len(scalars_list) == 1: return scalars_list[0] else: return reduce(lambda x, y: x*y, scalars_list)
b6ab3834e8e8eb175c8b3da92c042fd83458fbf3
44,907
def get_column_headers(df): """Return column headers for all count colums in dataframe.""" col_headers = [name for name in df.columns if 'ct' in name] return col_headers
3e08ce7d11b2cd911cc412f780806c7301b085af
44,908
def strategy(history, memory): """ Defect every few turns, based on the fibonacci sequence. i.e., defect turn 2 (1), turn 3 (1), turn 5 (2), turn 8 (3), turn 13 (5) """ if memory is None: last_defection_turn = 0 prev_fibonacci = 1 current_fibonacci = 1 else: last_defection_turn, prev_fibonacci, current_fibonacci = memory if history.shape[1] == last_defection_turn + current_fibonacci: last_defection_turn = history.shape[1] next_fibonacci = prev_fibonacci + current_fibonacci prev_fibonacci = current_fibonacci current_fibonacci = next_fibonacci choice = 0 else: choice = 1 return choice, (last_defection_turn, prev_fibonacci, current_fibonacci)
009710f3fb9eb4c5802b3beb0239afe2de6acdfb
44,909
def trim_words(s, max_chars, separator=' '): """ Trim sentence at last word preceding max_chars """ if max_chars and len(s) >= max_chars: head, sep, tail = s[:max_chars].rpartition(separator) return (head or tail) + '...' else: return s
1ef9a79734d81c9ce72beba4251d41358a4f2bbe
44,910
def isDmzProxySecurityLevelValid( level ): """Indicates whether the supplied level is valid for secure proxy security.""" if (('high' == level) or ('medium' == level) or ('low' == level)): return True return False
ea00bd321e6e3aaf955cfbb017638cfbc93ce578
44,911
def create_cifar_filter_func(classes, get_val): """Create filter function that takes in the class label and filters if it is not one of classes. If get_val is True, subtracts 50 from class label first (since validation for CIFAR-50 is done on last 50 classes). """ if get_val: return lambda x: (x - 50) in classes else: return lambda x: x in classes
db2cb98798636a76ad6515bc5bbd00507e6f9fa8
44,912
def slice_function_graph(function_graph, cfg_slice_to_sink): """ Slice a function graph, keeping only the nodes present in the <CFGSliceToSink> representation. Because the <CFGSliceToSink> is build from the CFG, and the function graph is *NOT* a subgraph of the CFG, edges of the function graph will no be present in the <CFGSliceToSink> transitions. However, we use the fact that if there is an edge between two nodes in the function graph, then there must exist a path between these two nodes in the slice; Proof idea: - The <CFGSliceToSink> is backward and recursively constructed; - If a node is in the slice, then all its predecessors will be (transitively); - If there is an edge between two nodes in the function graph, there is a path between them in the CFG; - So: The origin node is a transitive predecessor of the destination one, hence if destination is in the slice, then origin will be too. In consequence, in the end, removing the only nodes not present in the slice, and their related transitions gives us the expected result: a function graph representing (a higher view of) the flow in the slice. *Note* that this function mutates the graph passed as an argument. :param networkx.DiGraph graph: The graph to slice. :param CFGSliceToSink cfg_slice_to_sink: The representation of the slice, containing the data to update the CFG from. :return networkx.DiGraph: The sliced graph. """ nodes_to_remove = list(filter( lambda node: node.addr not in cfg_slice_to_sink.nodes, function_graph.nodes() )) function_graph.remove_nodes_from(nodes_to_remove) return function_graph
d88f6784ebae21e5a8d2132542daf662e8cb250f
44,913
from typing import Counter def repeating_bits(n: list[str], index: int) -> tuple: """ Find most and least repeating bit in given index :param n: list of numbers as strings :param index: index of bit to find :return: tuple of most and least repeating bit """ bits = [int(i[index]) for i in n] c = Counter(bits) return c.most_common()[0][0], c.most_common()[-1][0]
b002c35d38b793c173a0449e4b89f8dd459afd7d
44,914
import csv def load_asdp_ordering(orderfile): """ Loads an ASDP ordering produced by JEWEL from a CSV file Parameters ---------- orderfile: str path to order CSV file Returns ------- ordering: list list of dicts containing entries for the following fields: - asdp_id - initial_sue - final_sue - initial_sue_per_byte - final_sue_per_byte - size_bytes - timestamp """ with open(orderfile, 'r') as f: reader = csv.DictReader(f) return list(reader)
430bdea030c6399e258dd49548a954069fe89851
44,915
def show_admin_tools(on=0): """Mostrar Ferramentas Administrativas no Painel de Controle DESCRIPTION Esta entrada lhe permite definir se o applet "Ferramentas Administrativas" e mostrado no Painel de Controle. COMPATIBILITY Windows XP MODIFIED KEYS {D20EA4E1-3957-11d2-A40B-0C5020524153} : Deve ser deletado/criado para esconder/mostrar as Ferramentas administrativas. """ if on: return '''[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\\ CurrentVersion\\Explorer\\ControlPanel\\NameSpace\\{D20EA4E1-3957-11d2-A40B-\ 0C5020524153}]''' else: return '''[-HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\\ CurrentVersion\\Explorer\\ControlPanel\\NameSpace\\{D20EA4E1-3957-11d2-A40B-\ 0C5020524153}]'''
1835fbcf93c21940e209a3b71bf91731395631a3
44,916
def funcname(fn, rep='-'): """ Gets the name of a function from a function object """ return fn.func_name.replace('_', rep)
c82b678e82cba0d4ad4bc598c08079717469027f
44,917
import sys def sol(arr, n, v): """ If V == 0, then 0 coins required. If V > 0 minCoin(coins[0..m-1], V) = min {1 + minCoins(V-coin[i])} where i varies from 0 to m-1 and coin[i] <= V """ dp = [sys.maxsize for i in range(v+1)] # No 2d array is required here because we need not track coins dp[0] = 0 for i in range(1, v+1): for j in range(n): if arr[j] <= i: x = dp[i-arr[j]] if x != sys.maxsize and x+1 < dp[i]: dp[i] = x+1 if dp[v] != sys.maxsize: return dp[v] return -1
4df621e1821a89ec28601949b52fad568569ca17
44,919
def get_metric_BASE_T(map_dict, metric=None): """ :param map_dict: Parsed mapping.json as a dict """ if not isinstance(map_dict, dict): raise if metric is None: return for period in map_dict['period_colls']: metrics = map_dict.get(str(period)) if not metrics: continue elif metric in metrics['meter_type']: return int(period) return
f7ba0da4b80db796ee6bfb0150a5783bcf1f31bc
44,921
def day_humidity_chart(qs_list, plot_func, width, height, colors): """ Returns a URL which produces one line for each queryset. """ data_list = [] # list of value lists for date_qs in qs_list: humidity = [] for rec in date_qs: if rec is None: humidity.append(None) else: humidity.append(rec.humidity) data_list.append(humidity) plot_list = [] # list of plot lines, each a list of values for val_list in data_list: # add list of humidity values to list of plot lines plot_list.append(val_list) chart = plot_func(plot_list, 0, 100, width, height, colors, [4, 2, 2]) return chart
e84aa381b69fd9cf7bdf6a69db2b612268f00df3
44,922
def _to_mumps_number(v): """Given a value, attempt to coerce it to either an integer or float.""" sign = 1 ndec = 0 try: tmp = float(v) if tmp.is_integer(): return int(tmp) else: return tmp except ValueError: v = str(v) n = [] # Build a number based on the MUMPS numeric conversion rules for c in v: # Look for numeric characters (digits, decimal, or sign) if c.isnumeric() or c in ('.', '+', '-'): # Make sure we only add one decimal if c == '.': if ndec >= 1: break else: ndec += 1 # Correctly swap the sign if c == '-': sign *= -1 continue # Ignore the plus signs if c == '+': continue # If we made it this far, this is a valid numeric character n.append(c) else: # If we don't find any, break # Re-assemble the digits and attempt to convert it n = float("".join(n)) * sign return n if not n.is_integer() else int(n)
410246c14e55de56209e6b19c7a94102ca03893b
44,923
from re import sub def unmodify(peptide): """ >>> from re import sub >>> sub(r'\[(\-|\d|\.)+\]', '', 'A[-12.34]B') 'AB' """ peptide = sub(r'\[(\-|\d|\.)+\]', '', peptide) peptide = sub(r'^.\.|\..$|n|c', '', peptide) #peptide = sub(r'\..$', '', peptide) #peptide = sub(r'\[.+\]', '', peptide) return peptide
7565d4c8f5b67ce138aeaea5d6d5c759768af618
44,924
def indent_lines(message): """This will indent all lines except the first by 7 spaces. """ indented = message.replace("\n", "\n ").rstrip() return indented
2315eff60b737d7c418d73e2fdd97851a1db6ff1
44,925
def generate_shard_args(outfiles, num_examples): """Generate start and end indices per outfile.""" num_shards = len(outfiles) num_examples_per_shard = num_examples // num_shards start_idxs = [i * num_examples_per_shard for i in range(num_shards)] end_idxs = list(start_idxs) end_idxs.pop(0) end_idxs.append(num_examples) return zip(start_idxs, end_idxs, outfiles)
8f059536a8ab2e36c89e5f6a6fe8dc4d827a782d
44,927
import os def get_signatures_with_results(vcs): """Returns the list of signatures for which test results are saved. Args: vcs (easyci.vcs.base.Vcs) Returns: List[str] """ results_dir = os.path.join(vcs.private_dir(), 'results') if not os.path.exists(results_dir): return [] rel_paths = os.listdir(results_dir) return [p for p in rel_paths if os.path.isdir(os.path.join(results_dir, p))]
e561dde58e9df5b540c2ea51b59472dbbefb4753
44,929
def find_biggest_pattern_in_patterns(dict): """ dict: dictionary of translation vector->pattern Returns the biggest pattern and its corresponding translation vector. """ max_length = -1 pattern = None trans_vector = None for key in dict: if len(dict[key])>max_length: max_length=len(dict[key]) trans_vector = key pattern = dict[key] return pattern, trans_vector
34e658369145ccb30d25ba5d1d80085a61f68413
44,931
def _installable(args): """ Return True only if the args to pip install indicate something to install. >>> _installable(['inflect']) True >>> _installable(['-q']) False >>> _installable(['-q', 'inflect']) True >>> _installable(['-rfoo.txt']) True >>> _installable(['projects/inflect']) True >>> _installable(['~/projects/inflect']) True """ return any( not arg.startswith('-') or arg.startswith('-r') or arg.startswith('--requirement') for arg in args )
1a374c75fca3289f0f6f86321cc7b76eee4c7d3b
44,932
import random def simulated_multi_experiment(design_thing, models_to_fit, response_model): """Simulate an experiment where we have one response model, but have mulitple models which we do parameter estimation with and get designs from.""" if response_model.θ_true is None: raise ValueError("response_model must have θ_true values set") n_models = len(models_to_fit) for trial in range(666): # get the design from a random model m = random.randint(0, n_models - 1) design = design_thing.get_next_design(models_to_fit[m]) if design is None: break print(f"trial {trial}, design from model: {m}") # get response from response model response = response_model.simulate_y(design) design_thing.enter_trial_design_and_response(design, response) # update beliefs of all models [model.update_beliefs(design_thing.data) for model in models_to_fit] return models_to_fit, design_thing
e459db2a9dc99fd0f8e53430fd8c9289b75db63e
44,933
import os import sys def command_name(): """Return a cleaned up name of this program.""" return os.path.basename(sys.argv[0])
366484a0511366181b584c0c2eac6277381fcb52
44,935
def gen_none(): """Generate nothing!.""" return None
c1d9e506e7e4af8a987e71875a20d4bac9dc338d
44,937
from typing import Counter def sum_counters(counters): """Aggregate collections.Counter objects by summing counts :param counters: list/tuple of counters to sum :return: aggregated counters with counts summed >>> d1 = text_analyzer.Document('1 2 fizz 4 buzz fizz 7 8') >>> d2 = text_analyzer.Document('fizz buzz 11 fizz 13 14') >>> sum_counters([d1.word_counts, d2.word_counts]) Counter({'buzz': 2, 'fizz': 4}) """ return sum(counters, Counter())
c73f374639851548a4e11249ae7faf0dc1b80956
44,939
import re def remove_special_chars(headline_list): """ Returns list of headlines with all non-alphabetical characters removed. """ rm_spec_chars = [re.sub('[^ A-Za-z]+', "", headline) for headline in headline_list] return rm_spec_chars
641a8fd8083771386ec83f5f5c27c38b6bbebf0e
44,940
import time import calendar def gen_date_list(begin_date, end_date): """Generates a list of dates of the form yyyymmdd from a being date to end date Inputs: begin_date -- such as "20070101" end_date -- such as "20070103" Returns: date_list -- such as ["20070101","20070102","20070103"] """ begin_tm = time.strptime(begin_date, "%Y%m%d") end_tm = time.strptime(end_date, "%Y%m%d") begin_tv = calendar.timegm(begin_tm) end_tv = calendar.timegm(end_tm) date_list = [] for tv in range(begin_tv, end_tv+86400, 86400): date_list.append(time.strftime("%Y%m%d", time.gmtime(tv))) return date_list
7663b5329e0a7ac65910c1b2df39205758c75c58
44,941
def n(indexes): """Return n ranking index sets in colexicographical order. >>> [n(ind) for ind in ((), (0,), (1,), (0, 1), (2,))] [0, 1, 2, 3, 4] """ return sum(1 << i for i in indexes)
b4317c17173df5bd164ad34e8e9761b6afcad9ba
44,943
def frames_to_time(frames, framerate): """Convert frame count to time (using framerate).""" return frames / framerate
398ce55c09706c286a682c86452f10d9bd8e1140
44,944
def int2verilog(val, vw): """ :param val: A signed integer to convert to a verilog literal. :param vw: The word length of the constant value. """ sign = '-' if val < 0 else '' s = ''.join((sign, str(vw), '\'sd', str(abs(val)))) return s
42e82203cb5cfd4015664e49b94091d027979d23
44,945
def difference( f_inverted_index, s_inverted_index ) -> list: """ Operator "OR" :type f_inverted_index: list :type s_inverted_index: list """ if (not f_inverted_index) and (not s_inverted_index): return [] if not f_inverted_index: return [] if not s_inverted_index: return f_inverted_index differences = f_inverted_index[:] for index in s_inverted_index: if index in differences: differences.remove(index) return differences
fc8e5f840f6c09c93f8409624639d5c800b331d9
44,946
from pathlib import Path def project_dir(): """Return the root directory of the project.""" script = Path(__file__).resolve() return script.parents[1]
85b6d50222773ec5d67ac02493faa79357f81571
44,947
def get_index_names(df): """ Get names from either single or multi-part index """ if df.index.name is not None: df_index_names = [df.index.name] else: df_index_names = list(df.index.names) df_index_names = [x for x in df_index_names if x is not None] return df_index_names
143f7e86594d39ccb19ab1e4e36cd9933cb07304
44,949
import json def parseinputquery(query): """ Checks naively whether the input could be a MongoDB query-clause If not returns a MongoDB text search query-caluse with given input being the search term """ qc = None if isinstance(query, dict): qc = query else: try: qc = json.loads(query) except ValueError: pass finally: if qc is None: qc = {"$text": {"$search": query}} return qc
cf537cc66189b4beea08127ad2e75d3d3b6acc75
44,951
import re def sanitize_path(path): """Replace illegal path characters and spaces in path.""" return re.sub(r"[^a-zA-Z0-9_\-/\.]", "", path.replace(" ", "_"))
c74db40399524f6deedc023ca76e828fc3d4019e
44,952
def get_coordinates(region): """ Define coordinates chr, start pos and end positions from region string chrX:start-end. Return coordinate list. """ chromosome = region.split(":")[0] coord = region.split(":")[1] coord_list = coord.split("-") begin = int(coord_list[0]) end = int(coord_list[1]) return [chromosome, begin, end]
3e76420ad607d5dfb195992fb5466b118bd67bcd
44,953
def getSocketFamily(socket): """ Return the family of the given socket. @param socket: The socket to get the family of. @type socket: L{socket.socket} @rtype: L{int} """ return socket.family
e67095574949dc12022676b2795010aff3a12446
44,956
def import_param_space(filename): """ gets the variable param_space from a file without executing its __main__ section """ content = "" with open(filename) as f: lines = f.readlines() for l in lines: if "if __name__ ==" in l: # beware: we assume that the __main__ execution block is the # last one in the file break content += l vars = {} exec(content, vars) return vars["param_space"]
2f40f7c400b911acfc49726ae508cfee434652b5
44,957
def rename_part(oEditor, oldname, newname): """ Rename a part. Parameters ---------- oEditor : pywin32 COMObject The HFSS editor in which the operation will be performed. oldname : str The name of the part to rename newname : str The new name to assign to the part Returns ------- None """ renameparamsarray = ["Name:Rename Data", "Old Name:=", oldname, "New Name:=", newname] return oEditor.RenamePart(renameparamsarray)
cb255d54a11aa37a48958be651a93b3f80e3b85b
44,958
import requests import json def address(coordinate, key, poitype=None, radius=None): """显示经纬度规定范围内的建筑信息和街道信息""" url = "https://restapi.amap.com/v3/geocode/regeo?parameters" parameters = { "location": coordinate, # 经纬度坐标字符串 "key": key, # 高德web key "radius": radius if radius else 100, # 范围,默认为100米 "poitype": poitype, # 搜索类型 "extensions": "all" # 显示结果控制 } result = requests.get(url, parameters) return json.loads(result.text)
90e0b86ae9281d30593d1c8c1a51e9dc38a5861a
44,960
def get_shape_points2(cur, shape_id): """ Given a shape_id, return its shape-sequence (as a dict of lists). get_shape_points function returns them as a list of dicts Parameters ---------- cur: sqlite3.Cursor cursor to a GTFS database shape_id: str id of the route Returns ------- shape_points: dict of lists dict contains keys 'seq', 'lat', 'lon', and 'd'(istance) of the shape """ cur.execute('''SELECT seq, lat, lon, d FROM shapes where shape_id=? ORDER BY seq''', (shape_id,)) shape_points = {'seqs': [], 'lats': [], 'lons': [], 'd': []} for row in cur: shape_points['seqs'].append(row[0]) shape_points['lats'].append(row[1]) shape_points['lons'].append(row[2]) shape_points['d'].append(row[3]) return shape_points
7e7708a155f2f04510844565054fb32d0b770e1a
44,961
def get_matching_card(card_list, card_to_match): """ This function returns the card that matches the one passed in """ the_matching_card = None for test_card in card_list: if test_card.value == card_to_match.value and test_card != card_to_match: the_matching_card = test_card break return the_matching_card
7c67fa95465c131ef11703096c3bb8ac77531ed1
44,962
def generate_sorted(degree): """ Generates list of numbers in ascending order. """ lst = [] for i in range(2**degree): lst.append(i) return lst
1dcf27801f4e253a87294dcb4aa742fdd12c525f
44,963
def indices(A): """ Return a sequence containing all the indices for elements in A. >>> indices([6, 3, 2, 9, 10]) [0, 1, 2, 3, 4] """ return range(len(A))
da06e01da439144851b005fb22ec050932d8c5f0
44,964
def topic_word_set(topic): """topic_word_set. Takes a topic from an LDA model and returns a set of top words for the topic. Parameters ---------- topic : (int, [ (str, float)]) A topic from a LDA model. Input should be one element of the list returned by get_topics. """ word_tuple_list = topic[1] word_set = {word_tuple[0] for word_tuple in word_tuple_list} return word_set
e9ca50712155769bc699af9634fb79986965d91d
44,967
from typing import List def _erase_elements_from(items: List, start_i: int): """ Erase from the given 'i' onward >>> _erase_elements_from([1, 2, 3], 0) [None, None, None] >>> _erase_elements_from([1, 2, 3], 1) [1, None, None] >>> _erase_elements_from([1, 2, 3], 2) [1, 2, None] >>> _erase_elements_from([1, 2, 3], 3) [1, 2, 3] """ items[start_i:] = [None] * (len(items) - start_i) # Return the list just for convenience in doctest. It's actually mutable. return items
5e3694272bca02dadbbf154a4132ea9dfdda8097
44,968
def get_ods_by_race_3rentan(xml): """ 3連単 のページを読み込み、オッズを得る """ ods_3rentan = [] tag_tables = xml.find_all('table', \ attrs={'class': 'santanOddsHyo'}) for tag_table in tag_tables: tag_tbody = tag_table.find('tbody', recursive=False) if tag_tbody is None: tag_trs = tag_table.find_all('tr', recursive=False) else: tag_trs = tag_tbody.find_all('tr', recursive=False) uma1 = 0 uma2 = [] for i, tag_tr in enumerate(tag_trs): if i==0: # 1着 tag_th = tag_tr.find('th', attrs={'class': 'ubn2'}) uma1 = int(tag_th.text) elif i==1: # 2着 tag_ths = tag_tr.find_all('th', \ attrs={'class': 'ubn2'}) for tag_th in tag_ths: uma2.append(int(tag_th.text)) else: # 3着 tag_td_uma2s = tag_tr.find_all('td', \ attrs={'class': 'jikubetuOdds'}) for j, tag_td_uma2 in enumerate(tag_td_uma2s): tag_trs_uma3s = tag_td_uma2.find_all('tr') for tag_tr_uma3 in tag_trs_uma3s: tag_th_uma3 = tag_tr_uma3.find('th', \ attrs={'class': 'ubn3'}) uma3 = int(tag_th_uma3.text) try: tag_td_odds = tag_tr_uma3.find('td', \ attrs={'class': 'oddsData'}) ods = float(tag_td_odds.text) uma_info = { 'no': [uma1, uma2[j], uma3], 'ods': ods } ods_3rentan.append(uma_info) except: pass return ods_3rentan
f4cb6a5f370721432d561a0b200d12923e020edb
44,969
def mapping(n, start1, stop1, start2, stop2): """Set the background color for the p5.renderer. :param args: :param args: :param args: :param args: :param args: :returns: """ return ((n - start1) / (stop1 - start1)) * (stop2 - start2) + start2
0910df41bcc986ac87c5d5d35597905118a91adc
44,970
def cyclic_rotation_2(nums: list, k: int) -> list: """ This modifies the array nums in place though """ length = len(nums) if length == 0: return nums k = k % length nums[:] = nums[length - k:] + nums[:length - k] return nums
306ab9bbf02067aeea6513accd346c56b1ba2c5f
44,971
import re def hump_to_underline(hump_str): """ Transfer hump to underline Args: hump_str(string): hump code string """ p = re.compile(r'([a-z]|\d)([A-Z])') sub = re.sub(p, r'\1_\2', hump_str).lower() return sub
9f91ccfbc3dcac284e75a40de88f0e668f113fdf
44,972
import requests def get_json(url, payload=None, verbose=True): """ Get JSON from an online service with optimal URL parameters. """ r = requests.get(url, params=payload) if verbose: print("Fetched: {}".format(r.url)) if r.status_code != 200: print("Error: {}.".format(r.status_code)) return r.json()
02ec9a3648946c8cc9999a61573378bbb12174c1
44,973
def unpack(s): """Convenience function to get a list as a string without the braces. Parameters ---------- s : list The list to be turned into a string. Returns ------- string The list as a string. """ x = " ".join(map(str, s)) return x
fa5933cb9f67ebff699ab5751f8b188f9764f693
44,974
def group_instance_count_json(obj): """ Test lifecycle parser >>> group_instance_count_json(json.loads('{ "AutoScalingGroups": [ { "Instances": [ { "LifecycleState": "InService" } ] } ] }')) 1 """ if not obj['AutoScalingGroups']: return 0 instances = obj['AutoScalingGroups'][0]['Instances'] in_service = 0 for instance in instances: if instance['LifecycleState'] == 'InService': in_service += 1 return in_service
141c165f11d9582e50c5e35cf2b4925b4d382692
44,975
import timeit def tree_complexity(tree, words): """Estimate complexity of lookup with trie.""" start_time = timeit.default_timer() for word in words: tree.search(word) return timeit.default_timer() - start_time
cb3fbf63100854a50c783db43b64c8fcb6b41cc0
44,976
import re import json def read_jsonc(file: str) -> dict: """Cコメント付きJSONファイルを読み込む Args: file (str): JSONファイル Returns: dict: JOSNオブジェクト """ with open(file, encoding='utf-8') as f: text = f.read() text = re.sub(r'/\*[\s\S]*?\*/|//.*', '', text) return json.loads(text)
1405783375dc7281a743f7d5bfd83734fa7e4c4c
44,977
def chi_squared(*choices): """Calculates the chi squared""" term = lambda expected, observed: float((expected - observed) ** 2) / max(expected, 1) mean_success_rate = float(sum([c.rewards for c in choices])) / max(sum([c.plays for c in choices]), 1) mean_failure_rate = 1 - mean_success_rate return sum([ term(mean_success_rate * c.plays, c.rewards) + term(mean_failure_rate * c.plays, c.plays - c.rewards ) for c in choices])
c519a8fa9a05d669d18b11e802a47d847dac5f63
44,978
def getOutputsNames(net): """ Get the names of the output layers """ # Get the names of all the layers in the network layersNames = net.getLayerNames() # Get the names of the output layers, i.e. the layers with unconnected outputs return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
72470cf88735729313a153737963ac5eaa7e255d
44,979
import os def relative_module_path(module_file, relative_path): """Returns path relative to current python module.""" dir_path = os.path.dirname(os.path.realpath(module_file)) return os.path.join(dir_path, relative_path)
48a1ca6f4b93175ce587cd973157db30bcd8694b
44,980
import os def find_image_paths(path, files): """Finds image paths from given path.""" # Loop for all files file_paths = [] for k in range(len(files)): # Data path try: os.listdir(path + "\\" + files[k] + "\\" + "Registration") file_paths.append(path + "\\" + files[k] + "\\" + "Registration") except FileNotFoundError: # Case: Unusable folder try: os.listdir(path + "\\" + files[k] + "\\" + files[k] + "\\" + "Registration") file_paths.append(path + "\\" + files[k] + "\\" + files[k] + "\\" + "Registration") except FileNotFoundError: try: os.listdir(path + "\\" + files[k] + '\\' + files[k] + '_Rec') file_paths.append(path + "\\" + files[k] + '\\' + files[k] + '_Rec') except FileNotFoundError: try: pth = os.path.basename(path + '\\' + files[k]) file_paths.append(pth) except FileNotFoundError: # Case: Unusable folder print('Skipping folder {0}'.format(files[k])) continue return file_paths
ee4dc85992cce83f549dc8fbccabb0d4081829c2
44,981
from functools import reduce import operator def _iterated_domfronts(cfg): """Compute the iterated dominance frontiers (DF+ in literatures). Returns a dictionary which maps block label to the set of labels of its iterated dominance frontiers. """ domfronts = {k: set(vs) for k, vs in cfg.dominance_frontier().items()} keep_going = True while keep_going: keep_going = False for k, vs in domfronts.items(): inner = reduce(operator.or_, [domfronts[v] for v in vs], set()) if inner.difference(vs): vs |= inner keep_going = True return domfronts
8fdacf245bf0a47ba2f08f03900245f19bad6a2c
44,982
def _is_short_code_start(block): """Start with SHORT_CODE_""" if block.type == "text": if str(block.title).startswith("<!-- SHORT_CODE_"): return True return False
d68c06888a6e3f135b8b633841b312bdc60100d6
44,984
import imghdr import os def get_real_ext(image_path, return_is_same=False): """@Image Utils 获取图像文件的真实后缀 如果不是图片,返回后缀为 None 该方法不能判断图片是否完整 Args: image_path: return_is_same: 是否返回 `is_same` Returns: ext_real, is_same 真实后缀,真实后缀与当前后缀是否相同 如果当前文件不是图片,则 ext_real 为 None """ # 获取当前后缀 ext_cur = os.path.splitext(image_path)[1] if ext_cur.startswith('.'): ext_cur = ext_cur[1:] # 获取真实后缀 ext_real = imghdr.what(image_path) if return_is_same: # 是否相同 is_same = ext_cur == ext_real or {ext_cur, ext_real} == {'jpg', 'jpeg'} return ext_real, is_same return ext_real
227dbb7c1bf524c5044791a89a7049aac7004cf3
44,985
def _msd_anom_1d(time, D_alpha, alpha): """1d anomalous diffusion function.""" return 2.0*D_alpha*time**alpha
ba4b2e3ec597f2a936fca73423e31642403a4b55
44,986
import sys def filter_nan_cols(df, col): """Filtering nan columns if present """ if df[col].isnull().values.any(): msg = 'WARNING: nan values found in column: "{}". Filtering these nan rows\n' sys.stderr.write(msg.format(col)) return df.dropna(subset=[col])
fa80fe54c13ae6c5c88eb67c03deae16868a9f5a
44,987
def get_output_size(dataloaders): """ Infer the expected output size and task to perform Args: dataloaders: dict of torch.dataloader objects Return: an int, inferred output size a str, task to carry out """ # Find the max amount of unique labels among dataloaders nb_labels = 0 labels_data_type = None for partition, dataloader in dataloaders.items(): labels = list(set(dataloader.dataset.labels)) if isinstance(labels[0], float): # regression return 1, "regression" nb_labels = max(len(labels), nb_labels) out_size = 1 if nb_labels == 2 else nb_labels task = "binary" if nb_labels == 2 else "multilabel" return out_size, task
1886e2b1ca9279846b761bc8efe259fc66550cac
44,988
import numpy as np def str_list_2_num(str_list): """ Take a list of strings to correlative numbers """ classes = np.unique(str_list) num_list = [] # print(classes) for current_string in str_list: num = 0 flag = 'go' for current_class in classes: if flag == 'go': if current_class == current_string: num_list.append(num) flag = 'stop' num += 1 return np.array(num_list)
5bf41f18298728a98f889f7f828f0eecb499e7a1
44,989
def list_flavors(flavor_table, level): """ :usage: Calling the function in the terminal (python list_flavors.py) reads in the scaa_flavor_wheel.json file containing coffee flavors wheel and creates flavor_table.json and flavor_table_processed.json saved in the current directory for further processing. Calling list_flavors(flavor_table, 1) from another function will query the flavor_table to return a list of strings with tags from the particular level (in this example, level 1: ['Nutty/Cocoa', 'Sour/Fermented',...,'Fruity']). flavor_table is a list of dictionaries containing information specific to each level of coffee flavor wheel such that when you call e.g. flavor_table[42] the output will be {u'level_1': u'Fruity', u'level_2': u'Other Fruit', u'level_3': u'Apple'}. This way, when looking for "Apple" [level 3] it is easy to find other tags associated with it from higher levels. flavor_table_processed is the same as flavor_table except that it contains entries which were tokenized, lemmatized and stemmed. :param: list of dictionaries, int (level of flavor wheel) :returns: list of dictionaries :rvalue: unicode """ key_str = 'level_{}'.format(level) return list(set([ f[key_str] for f in flavor_table if f[key_str] ]))
6266b09700f56f2bd01bf1a06ab30c22241b531b
44,991
def create_basename(args): """ Name to characterize the data. Can be used for dir name and file name. """ ls = args.drug_fea + args.cell_fea if args.src is None: name = '.'.join(ls) else: src_names = '_'.join(args.src) name = '.'.join([src_names] + ls) name = 'data.' + name return name
0588972eeed81cef2ce9cb143da45bce444a0229
44,992
from pathlib import Path def _validate_ignore_cells_with_warning(actual: str, test_nb_path: Path) -> bool: """Validate the results of notebooks with warnings.""" expected_out = [ "cell_2:3:1: F401 'glob' imported but unused", ] expected = "".join(f"{str(test_nb_path)}:{i}\n" for i in expected_out) return expected == actual
9a505973a63cb04761951e4edf996e2ce27fa09e
44,993
from pathlib import Path def base_path(): """ Use the Path library to define the file path to the test folder. To be used when loading or saving test data. This works for almost all tests except for saving models in FCpredictor and XGBpredictor. For those use root_dir """ return Path(__file__).parent
f1836333477acedd6a8b1d11b0d1b3517006c0bc
44,995
import importlib import operator def _evaluate(object, property=None): """Take a dotted string and return the object it is referencing. Used by the Forward types.""" try: object = importlib.import_module(object) if property is None: return object except ModuleNotFoundError: module, _, next = object.rpartition(".") property = next if property is None else f"{next}.{property}" else: return operator.attrgetter(property)(object) return _evaluate(module, property)
dd000f0992896093acb1ffebd8ef8b433f401cab
44,997
import pickle as pkl from typing import Any import os def unpickle(path :str) -> Any: """Function that loads a pickle file generated by [`pickle(path)`](./#pickle). Arguments: path: File path to a valid pickle file. Returns: The unpickled object. """ if not os.path.isfile(path): raise FileNotFoundError(path) with open(path, 'rb') as in_file: obj = pkl.load(in_file) return obj
e1258dde3620533e3b9deb3271769ba170b4e3c8
44,998
def parse_pkg_list(output): """ :param output: show install active/inactive summary :return: a list of active/inactive packages """ pkg_list = [] flag = False lines = output.split('\n') lines = [x for x in lines if x] for line in lines: if 'ctive Packages:' in line: flag = True continue if flag: if line[2].isalnum(): break else: pkg = line.strip() pkg_list.append(pkg) return pkg_list
221cb19b05f7e3b88695a722b59de9bba3b17b40
44,999
def is_permutation(n, d): """Checks to see if n is a permutation of the digits 0-d.""" n = [int(i) for i in str(n)] if len(n) < d or len(n) > d+1: return False elif len(n) < d+1: n.insert(0, 0) while n: i = n.pop(0) if i in n or i > d: return False return True
7b06ba08c9efa07b82d3d51e9792b3e0499887f6
45,000
def m3c_config_str(config): """Change the dtype of parameters and make a appropriate string""" int_parameters = { 'overlap': 6, 'r1_left_cut': 10, 'r1_right_cut': 10, 'r2_left_cut': 10, 'r2_right_cut': 10, 'quality_threshold': 20, 'length_threshold': 30, 'total_read_pairs_min': 1, 'total_read_pairs_max': 6000000, 'mapq_threshold': 10, 'num_upstr_bases': 0, 'num_downstr_bases': 2, 'compress_level': 5, 'split_left_size': 40, 'split_right_size': 40, 'split_middle_min_size': 30, 'min_gap': 2500, 'trim_on_both_end': 5 } str_parameters = { 'mode': 'mc', 'barcode_version': 'required', 'r1_adapter': 'AGATCGGAAGAGCACACGTCTGAAC', 'r2_adapter': 'AGATCGGAAGAGCGTCGTGTAGGGA', 'bismark_reference': 'required', 'reference_fasta': 'required', 'mc_stat_feature': 'CHN CGN CCC', 'mc_stat_alias': 'mCH mCG mCCC', 'chrom_size_path': 'required' } typed_config = {} for k, default in int_parameters.items(): if k in config: typed_config[k] = int(config[k]) else: if default != 'required': typed_config[k] = default else: raise ValueError(f'Required parameter {k} not found in config. ' f'You can print the newest mapping config template via "yap default-mapping-config".') for k, default in str_parameters.items(): if k in config: typed_config[k] = f"'{config[k]}'" else: if default != 'required': typed_config[k] = f"'{default}'" else: raise ValueError(f'Required parameter {k} not found in config. ' f'You can print the newest mapping config template via "yap default-mapping-config".') config_str = "" for k, v in typed_config.items(): config_str += f"{k} = {v}\n" return config_str
d73daa7d10d6913c0fdc5f7180d365d024a87ee4
45,001
def bubble_sort(items): """Sorts a list of items in ascending order. """ for i in range(len(items)): swapped = False for j in range(len(items) - 1): if items[j] > items[j+1]: swapped = True items[j], items[j+1] = items[j+1], items[j] if not swapped: break return items
16f392bfea089c146a8a59f6e886283893f0ea51
45,003
def verify_account_available(email): """ Check to see if this email is already registered """ #Run a query, use an ORM, use Twilio to call someone and ask them :-) return True
e29af1103af6e7764e372d46ef4f118ac6e4ac14
45,004
async def remaining(github): """Helper to calculate the remaining calls to github.""" try: ratelimits = await github.get_ratelimit() except: # pylint: disable=broad-except return 0 if ratelimits.remaining: return int(ratelimits.remaining) return 0
c1614b51e21c4f1dd807ce35ba2196a0a9067bc1
45,007
def guard_duty_all(*_): """ author: spiper description: Alert on GuardDuty events playbook: (a) identify the AWS account in the log (b) identify what resource(s) are impacted (c) contact the point-of-contact for the account testing: From the GuardDuty AWS page (https://console.aws.amazon.com/guardduty/home) click the button to "Generate Sample Findings" """ return True
19e943eb0c292197b899c70cefe9c01e0bec2898
45,009
def getMinMaxOfRangeList(ranges): """ Get the min and max of a given range list. """ _max = max([x[1] for x in ranges]) _min = min([x[0] for x in ranges]) return _min, _max
b7fe06dab71df72a54873401f2c0955910ef8d7c
45,010
from typing import List from typing import Dict import sys def build_variable_table(definitions: List[str]) -> Dict[str, float]: """Parse the list of `-d` switches and return a dictionary associating variable names with their values""" variables = {} for declaration in definitions: parts = declaration.split(":") if len(parts) != 2: print(f"error, the definition «{declaration}» does not follow the pattern NAME:VALUE") sys.exit(1) name, value = parts try: value = float(value) except ValueError: print(f"invalid floating-point value «{value}» in definition «{declaration}»") variables[name] = value return variables
260267c85aff0f64e10ac7c773a980d30901b4ed
45,011
import os import errno def mkdir(directory): """ recursivley create a directory if it does not exist :param directory: path to the directory to be created :return: directory """ directory = os.path.abspath(directory) try: os.makedirs(directory) except OSError as e: if e.errno != errno.EEXIST: raise # raises the error again return directory
7176d1e3cf9d252095deae76d680b0d6bfc8a091
45,012
import functools def ensure_key_exists(func): """Decorator that checks if the provided key is valid""" @functools.wraps(func) def wrapped(*args, **kwargs): self, key = args[0], kwargs.get('key', args[1]) if key not in self._defaults: raise KeyError(f"The specified key is not in configurations: '{key}'") return func(*args, **kwargs) return wrapped
6311784120d1c401e006713ea1c41330bb51800c
45,014
import numpy def inv_mass(pt1, eta1, phi1, pt2, eta2, phi2): """ Assumes massless particles (ok for photons/eles, probably need to update for heavier particles) """ mass = float(0) mass = numpy.sqrt(2 * pt1 * pt2 * (numpy.cosh(eta1 - eta2) - numpy.cos(phi1 - phi2))) return mass
ae30336c889ab621c5b722345685c78e28840710
45,015
def unique_list(l, preserve_order=True): """Make list contain only unique elements but preserve order. >>> l = [1,2,4,3,2,3,1,0] >>> unique_list(l) [1, 2, 4, 3, 0] >>> l [1, 2, 4, 3, 2, 3, 1, 0] >>> unique_list(l, preserve_order=False) [0, 1, 2, 3, 4] >>> unique_list([[1],[2],[2],[1],[3]]) [[1], [2], [3]] See Also -------- http://www.peterbe.com/plog/uniqifiers-benchmark """ try: if preserve_order: s = set() return [x for x in l if x not in s and not s.add(x)] else: return list(set(l)) except TypeError: # Special case for non-hashable types res = [] for x in l: if x not in res: res.append(x) return res
dbc4c1a16538a6be8c114abb7411622eecb5b98c
45,018
import json def response_error400(err): """Return error response.""" data = { "error": err, "code": 400, } return { "statusCode": 400, "body": json.dumps(data) }
af0b6d536e054971a79cd95e65afac6730baeaf9
45,020
import re def extract_ref_numbers_from_bbl(df, filename=None): """Extract reference numbers from .bbl file and add them to df. Args: df (pd.DataFrame): dataframe containing the data items spreadsheet. Keyword Args: filename (str): path to the .bbl file (created when compiling the main tex file). Returns: (pd.DataFrame): dataframe with new column 'ref_nb'. """ filename = '../data/output.bbl' with open(filename, 'r', encoding = 'ISO-8859-1') as f: text = ''.join(f.readlines()) ref_nbs = re.findall(r'\\bibitem\{(.*)\}', text) ref_dict = {ref: i + 1 for i, ref in enumerate(ref_nbs)} df['ref_nb'] = df['Citation'].apply(lambda x: '[{}]'.format(ref_dict[x])) return df
db518cd569f65ac715a8121c9a1c4b8311101458
45,021
def get_periodicity(self): """Gives periodicity of the axis. Parameters ---------- self: Data1D a Data1D object Returns ------- per, is_antiper """ per = 1 is_antiper = False if "antiperiod" in self.symmetries: if self.symmetries["antiperiod"] > 1: per = self.symmetries["antiperiod"] is_antiper = True elif "period" in self.symmetries: if self.symmetries["period"] > 1: per = self.symmetries["period"] return (per, is_antiper)
bf53aadca7c75ea1e1760b8c6eb149f5f9450ee2
45,023
def mocked_wikiloader_response(): """returns wikiloader fake response""" return {"anecdote": "Une anecdote", "url": "un url"}
ebdb53c1370c96ee2ce5774998391b407afef922
45,024