content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Tuple def convert_tuple_to_prayer_dict(tuple_data: Tuple) -> dict: """Convert tuple record from DB to prayer dictionary. :param tuple_data: Prayer data from DB :return: Dict in thr format -> {Title Name Prayer} """ prayer = {"Title": tuple_data[0], "Name": tuple_data[1], "Prayer": tuple_data[2]} return prayer
172643341e7b94ee91d977706f9e21b1023664ed
44,827
def is_num(val): """Return True if val is number, i.e. int of float.""" return isinstance(val, int) or isinstance(val, float)
e2177b70c090701f03d517bece2b3bc3d86edcac
44,828
import torch def _softplus(x): """Implements the softplus function.""" return torch.nn.functional.softplus(x, beta=1, threshold=10000)
65f83d32b949eccbf8ba5e4b2471852ffa5b9c6b
44,832
def _filter_distance(barcodes, candidate, min_dist, distance): """Test whether {candidate} can be added to {barcodes} based on the minimum distance between {candidate} and all barcodes in {barcodes}. :arg list barcodes: List of barcodes. :arg str candidate: Candidate barcode. :arg int min_dist: Minimum distance between the barcodes. :arg function distance: Distance function. :returns bool: True if the barcode is clean, False otherwise. """ for i in barcodes: if distance(i, candidate) < min_dist: return False return True
d61643d1bffe21a713466b2e7bd44891b2149a33
44,833
def read_list(filename): """ read a list of strings from file, one per line """ fd = open(filename,'r') lines = [] for line in fd: lines.append(line.strip()) return lines
f717a5bcc0015ff39adfb131cd3e611f77e0924c
44,839
def create_fan_string(fan_id): """ Function which accepts a fan_id arg and returns sql string param: fan_id = '1234' returns: " AND youtube_fan_id = 1234" """ video_string = f" AND youtube_fan_id = {fan_id}" return video_string
7f444e6e60445a4347850f58f32030e06df67621
44,840
def _CredentialFrom(messages, credential_data): """Translate a dict of credential data into a message object. Args: messages: The API message to use. credential_data: A dict containing credential data. Returns: An Credential message object derived from credential_data. """ basic_auth = messages.BasicAuth( password=credential_data['basicAuth']['password'], user=credential_data['basicAuth']['user']) return messages.Credential(basicAuth=basic_auth)
c96acdf28dab1ba60acc0c5b1bee73585c20106f
44,843
def int_lin(y1, y2, x1, x2, score): """Interpolates score in a linear function. """ return y1 + (score - x1) * (y2 - y1) / (x2 - x1)
2b188ec699cbadd0431bc159187b6674c3ac7f89
44,845
def display( function=None, *, boolean=None, ordering=None, description=None, empty_value=None ): """ Conveniently add attributes to a display function:: @admin.display( boolean=True, ordering='-publish_date', description='Is Published?', ) def is_published(self, obj): return obj.publish_date is not None This is equivalent to setting some attributes (with the original, longer names) on the function directly:: def is_published(self, obj): return obj.publish_date is not None is_published.boolean = True is_published.admin_order_field = '-publish_date' is_published.short_description = 'Is Published?' """ def decorator(func): if boolean is not None and empty_value is not None: raise ValueError( "The boolean and empty_value arguments to the @display " "decorator are mutually exclusive." ) if boolean is not None: func.boolean = boolean if ordering is not None: func.admin_order_field = ordering if description is not None: func.short_description = description if empty_value is not None: func.empty_value_display = empty_value return func if function is None: return decorator else: return decorator(function)
10e1a0b44ea34c705754e148370b0a24baf80323
44,847
def getMode(mode): """ Process EAST mode code and returns reveal class Classes are chosen by closest resemblance :param mode: EAST mode code :return: Reveal class """ if(mode == "incremental_allume"): return "fragment highlight-red" if(mode == "incremental_ombre"): return "fragment highlight-blue" if(mode == "pliage"): return "fragment grow" if(mode == "accordeon"): return "fragment current-visible" return "fragment"
9bb6222c0aa8be51b28323e981bc1521f89bdb8e
44,849
def do_intersect(bb1, bb2): """ Helper function that returns True if two bounding boxes overlap. """ if bb1[0] + bb1[2] < bb2[0] or bb2[0] + bb2[2] < bb1[0]: return False if bb1[1] + bb1[3] < bb2[1] or bb2[1] + bb2[3] < bb1[1]: return False return True
12b2797254f0bdfebcc646019e261bdc21474602
44,858
import torch def class_reduce(num: torch.Tensor, denom: torch.Tensor, weights: torch.Tensor, class_reduction: str = 'none') -> torch.Tensor: """ Function used to reduce classification metrics of the form `num / denom * weights`. For example for calculating standard accuracy the num would be number of true positives per class, denom would be the support per class, and weights would be a tensor of 1s Args: num: numerator tensor decom: denominator tensor weights: weights for each class class_reduction: reduction method for multiclass problems - ``'micro'``: calculate metrics globally (default) - ``'macro'``: calculate metrics for each label, and find their unweighted mean. - ``'weighted'``: calculate metrics for each label, and find their weighted mean. - ``'none'``: returns calculated metric per class """ valid_reduction = ('micro', 'macro', 'weighted', 'none') if class_reduction == 'micro': return torch.sum(num) / torch.sum(denom) # For the rest we need to take care of instances where the denom can be 0 # for some classes which will produce nans for that class fraction = num / denom fraction[fraction != fraction] = 0 if class_reduction == 'macro': return torch.mean(fraction) elif class_reduction == 'weighted': return torch.sum(fraction * (weights / torch.sum(weights))) elif class_reduction == 'none': return fraction raise ValueError(f'Reduction parameter {class_reduction} unknown.' f' Choose between one of these: {valid_reduction}')
d790cee3cf30f20a0eaeedaa15a6099dbe4b2508
44,860
def repr_opcode(opcode): """Returns a textual representation for the given opcode. @type opcode: int @rtype: str """ opmap = {1: "open", 2: "refresh", 3: "update", 4: "notify", 5: "status", 6: "delete"} return opmap.get(opcode, "unknown (%d)" % opcode)
31346162db7439ddc33192b63de74081ec04febc
44,869
def pip_to_rez_package_name(dist_name): """Convert a distribution name to a rez compatible name. The rez package name can't be simply set to the dist name, because some pip packages have hyphen in the name. In rez this is not a valid package name (it would be interpreted as the start of the version). Example: my-pkg-1.2 is 'my', version 'pkg-1.2'. Args: dist_name (str): Distribution name to convert. Returns: str: Rez-compatible package name. """ return dist_name.replace("-", "_")
6a817d5dc11b072d3b44abb56c29e853c3722873
44,872
def check_parse_errors(options, args): """Do validations on command line options, returning error messages, if any.""" if not options.language: return "language parameter not informed." elif not args: return "base path not informed." else: return None
cefa9608bc37b551d8ca3f93a196ff072f67b451
44,873
def strip_characters(text: str, *args: str) -> str: """Remove specified characters from given text.""" for char in args: text = text.replace(char, "") return text
a44788dcba864aa97b8165c6668008f692a2a45b
44,874
import re import html def format_type(expr): """ Format type expression for popup Set span 'type' for types and 'tyvar' for type variables """ if not expr: return expr tyname = re.search(r'([a-zA-Z]\w*)|(->|=>|::|\u2192|\u21d2|\u2237)', expr) if tyname: tyexpr = expr[tyname.start():tyname.end()] expr_class = '' if tyname.group(1): expr_class = 'type' if tyexpr[0].isupper() else 'tyvar' elif tyname.group(2): expr_class = 'operator' decorated = '<span class="{0}">{1}</span>'.format(expr_class, html.escape(tyexpr, quote=False)) return html.escape(expr[0:tyname.start()], quote=False) + decorated + format_type(expr[tyname.end():]) else: return html.escape(expr, quote=False)
967fbfffe751cccb51a0bb7384180d2813efaafa
44,875
def null_guess(atomic, params={}): """Default guess type for testing, returns empty array""" return []
2e2b9cec0759833f635b4f639f41c14e1c19a30e
44,880
def reduce_func(nodes): """Collect messages and update node representations. Parameters ---------- nodes : NodeBatch A batch of nodes. Returns ------- dict mapping 'hv_new' to Float32 tensor of shape (V, K * T) Updated node representations. V for the number of nodes, K for the number of radial filters and T for the number of features to use (types of atomic number in the paper). """ return {'hv_new': nodes.mailbox['m'].sum(1)}
647f0bda0ea6a9b77972ce571f2bd868de0a032a
44,881
import torch def hsv_to_rgb(input_hsv_tensor): """ Differentiable HSV to RGB conversion function. :param input_hsv_tensor: Batch of HSV images [batch_size, 3, height, width] :return: Batch of RGB images [batch_size, 3, height, width] """ assert len(input_hsv_tensor.shape) == 4 and input_hsv_tensor.shape[1] == 3 hues = input_hsv_tensor[:, 0, :, :] sats = input_hsv_tensor[:, 1, :, :] vals = input_hsv_tensor[:, 2, :, :] c = sats * vals x = c * (1 - torch.abs((hues * 6.0) % 2.0 - 1.0)) m = vals - c # Compute R r_hat = torch.zeros_like(hues) filter_hues = hues.clone() r_hat[filter_hues < 1.0 / 6.0] = c[filter_hues < 1.0 / 6.0] filter_hues[filter_hues < 1.0 / 6.0] += 10.0 r_hat[filter_hues < 2.0 / 6.0] = x[filter_hues < 2.0 / 6.0] filter_hues[filter_hues < 2.0 / 6.0] += 10.0 r_hat[filter_hues < 3.0 / 6.0] = 0 filter_hues[filter_hues < 3.0 / 6.0] += 10.0 r_hat[filter_hues < 4.0 / 6.0] = 0 filter_hues[filter_hues < 4.0 / 6.0] += 10.0 r_hat[filter_hues < 5.0 / 6.0] = x[filter_hues < 5.0 / 6.0] filter_hues[filter_hues < 5.0 / 6.0] += 10.0 r_hat[filter_hues <= 6.0 / 6.0] = c[filter_hues <= 6.0 / 6.0] filter_hues[filter_hues <= 6.0 / 6.0] += 10.0 # Compute G g_hat = torch.zeros_like(hues) filter_hues = hues.clone() g_hat[filter_hues < 1.0 / 6.0] = x[filter_hues < 1.0 / 6.0] filter_hues[filter_hues < 1.0 / 6.0] += 10.0 g_hat[filter_hues < 2.0 / 6.0] = c[filter_hues < 2.0 / 6.0] filter_hues[filter_hues < 2.0 / 6.0] += 10.0 g_hat[filter_hues < 3.0 / 6.0] = c[filter_hues < 3.0 / 6.0] filter_hues[filter_hues < 3.0 / 6.0] += 10.0 g_hat[filter_hues < 4.0 / 6.0] = x[filter_hues < 4.0 / 6.0] filter_hues[filter_hues < 4.0 / 6.0] += 10.0 g_hat[filter_hues < 5.0 / 6.0] = 0 filter_hues[filter_hues < 5.0 / 6.0] += 10.0 g_hat[filter_hues <= 6.0 / 6.0] = 0 filter_hues[filter_hues <= 6.0 / 6.0] += 10.0 # Compute B b_hat = torch.zeros_like(hues) filter_hues = hues.clone() b_hat[filter_hues < 1.0 / 6.0] = 0 filter_hues[filter_hues < 1.0 / 6.0] += 10.0 b_hat[filter_hues < 2.0 / 6.0] = 0 filter_hues[filter_hues < 2.0 / 6.0] += 10.0 b_hat[filter_hues < 3.0 / 6.0] = x[filter_hues < 3.0 / 6.0] filter_hues[filter_hues < 3.0 / 6.0] += 10.0 b_hat[filter_hues < 4.0 / 6.0] = c[filter_hues < 4.0 / 6.0] filter_hues[filter_hues < 4.0 / 6.0] += 10.0 b_hat[filter_hues < 5.0 / 6.0] = c[filter_hues < 5.0 / 6.0] filter_hues[filter_hues < 5.0 / 6.0] += 10.0 b_hat[filter_hues <= 6.0 / 6.0] = x[filter_hues <= 6.0 / 6.0] filter_hues[filter_hues <= 6.0 / 6.0] += 10.0 r = (r_hat + m).view(input_hsv_tensor.shape[0], 1, input_hsv_tensor.shape[2], input_hsv_tensor.shape[3]) g = (g_hat + m).view(input_hsv_tensor.shape[0], 1, input_hsv_tensor.shape[2], input_hsv_tensor.shape[3]) b = (b_hat + m).view(input_hsv_tensor.shape[0], 1, input_hsv_tensor.shape[2], input_hsv_tensor.shape[3]) rgb = torch.cat([r, g, b], dim=1) return rgb
68a802433cf42d4f2d67931db7fe4a82ddb270a8
44,885
def get_profile_info(org_vm, inst): """ Get the org, name, and version from the profile instance and return them as a tuple. """ org = org_vm.tovalues(inst['RegisteredOrganization']) name = inst['RegisteredName'] vers = inst['RegisteredVersion'] return org, name, vers
1206c4aec45b33bf6f865c80a0e354125d42c554
44,886
import pyarrow as pa def load_arrow(file) -> pa.Table: """Load batched data written to file back out into a table again Example ------- >>> t = pa.Table.from_pandas(df) # doctest: +SKIP >>> with open("myfile", mode="wb") as f: # doctest: +SKIP ... for batch in t.to_batches(): # doctest: +SKIP ... dump_batch(batch, f, schema=t.schema) # doctest: +SKIP >>> with open("myfile", mode="rb") as f: # doctest: +SKIP ... t = load_arrow(f) # doctest: +SKIP See Also -------- dump_batch """ try: sr = pa.RecordBatchStreamReader(file) return sr.read_all() except Exception: raise EOFError
d599caff58d48efefa9b4367ec9844a538081b1e
44,889
import torch import math def count_qubits_gate_matrix(gate: torch.Tensor): """Get the number of qubits that a gate matrix acts on. By convention, a gate matrix has the shape (*optional_batch_dims, 2^k, 2^k) where k is the number of qubits that the gate acts on. Note that k might be smaller than the number of qubits in a state that we are going to apply the gate to. """ length = gate.size()[-1] num_bits = round(math.log2(length)) if 2 ** num_bits != length: raise RuntimeError(f'Given gate matrix has size {gate.size()} which ' f'is not consistent with any number of qubits.') return num_bits
def1082c7e598589cf09aa3dfe67ce6598a93990
44,891
def most_affected_area(affected_areas_count): """Find most affected area and the number of hurricanes it was involved in.""" max_area = 'Central America' max_area_count = 0 for area in affected_areas_count: if affected_areas_count[area] > max_area_count: max_area = area max_area_count = affected_areas_count[area] return max_area, max_area_count
e7abbf76f2f7cf10183fefadca943bdac83c33be
44,895
import re def word_list(raw): """ Converts raw sentences to list of words. :param raw: sentence to be cleaned up :return: list of words """ clean_words = re.sub(r"[^a-zA-Z]", ' ', raw) return clean_words.split()
25bd3d04c4dca62cbe724da677e0da54f883ec4e
44,904
def FindPart(part: dict, mime_type: str): """ Recursively parses the parts of an email and returns the first part with the requested mime_type. :param part: Part of the email to parse (generally called on the payload). :param mime_type: MIME Type to look for. :return: The part of the email with the matching type. """ if part['mimeType'] == mime_type: return part elif 'multipart' in part['mimeType']: for child in part['parts']: out = FindPart(child, mime_type) if out is not None: return out
ab557300860b6030acc5b851aa5bdb10ae2850cc
44,905
from functools import reduce def scalar_prod(scalars_list): """ This method returns the product of the list of scalars which it has as input. Parameters ---------- scalars_list : list[int|float|complex] | tuple[int|float|complex] Returns ------- complex|float|int """ if len(scalars_list) == 1: return scalars_list[0] else: return reduce(lambda x, y: x*y, scalars_list)
b6ab3834e8e8eb175c8b3da92c042fd83458fbf3
44,907
def get_column_headers(df): """Return column headers for all count colums in dataframe.""" col_headers = [name for name in df.columns if 'ct' in name] return col_headers
3e08ce7d11b2cd911cc412f780806c7301b085af
44,908
def strategy(history, memory): """ Defect every few turns, based on the fibonacci sequence. i.e., defect turn 2 (1), turn 3 (1), turn 5 (2), turn 8 (3), turn 13 (5) """ if memory is None: last_defection_turn = 0 prev_fibonacci = 1 current_fibonacci = 1 else: last_defection_turn, prev_fibonacci, current_fibonacci = memory if history.shape[1] == last_defection_turn + current_fibonacci: last_defection_turn = history.shape[1] next_fibonacci = prev_fibonacci + current_fibonacci prev_fibonacci = current_fibonacci current_fibonacci = next_fibonacci choice = 0 else: choice = 1 return choice, (last_defection_turn, prev_fibonacci, current_fibonacci)
009710f3fb9eb4c5802b3beb0239afe2de6acdfb
44,909
def isDmzProxySecurityLevelValid( level ): """Indicates whether the supplied level is valid for secure proxy security.""" if (('high' == level) or ('medium' == level) or ('low' == level)): return True return False
ea00bd321e6e3aaf955cfbb017638cfbc93ce578
44,911
def create_cifar_filter_func(classes, get_val): """Create filter function that takes in the class label and filters if it is not one of classes. If get_val is True, subtracts 50 from class label first (since validation for CIFAR-50 is done on last 50 classes). """ if get_val: return lambda x: (x - 50) in classes else: return lambda x: x in classes
db2cb98798636a76ad6515bc5bbd00507e6f9fa8
44,912
def slice_function_graph(function_graph, cfg_slice_to_sink): """ Slice a function graph, keeping only the nodes present in the <CFGSliceToSink> representation. Because the <CFGSliceToSink> is build from the CFG, and the function graph is *NOT* a subgraph of the CFG, edges of the function graph will no be present in the <CFGSliceToSink> transitions. However, we use the fact that if there is an edge between two nodes in the function graph, then there must exist a path between these two nodes in the slice; Proof idea: - The <CFGSliceToSink> is backward and recursively constructed; - If a node is in the slice, then all its predecessors will be (transitively); - If there is an edge between two nodes in the function graph, there is a path between them in the CFG; - So: The origin node is a transitive predecessor of the destination one, hence if destination is in the slice, then origin will be too. In consequence, in the end, removing the only nodes not present in the slice, and their related transitions gives us the expected result: a function graph representing (a higher view of) the flow in the slice. *Note* that this function mutates the graph passed as an argument. :param networkx.DiGraph graph: The graph to slice. :param CFGSliceToSink cfg_slice_to_sink: The representation of the slice, containing the data to update the CFG from. :return networkx.DiGraph: The sliced graph. """ nodes_to_remove = list(filter( lambda node: node.addr not in cfg_slice_to_sink.nodes, function_graph.nodes() )) function_graph.remove_nodes_from(nodes_to_remove) return function_graph
d88f6784ebae21e5a8d2132542daf662e8cb250f
44,913
from typing import Counter def repeating_bits(n: list[str], index: int) -> tuple: """ Find most and least repeating bit in given index :param n: list of numbers as strings :param index: index of bit to find :return: tuple of most and least repeating bit """ bits = [int(i[index]) for i in n] c = Counter(bits) return c.most_common()[0][0], c.most_common()[-1][0]
b002c35d38b793c173a0449e4b89f8dd459afd7d
44,914
import csv def load_asdp_ordering(orderfile): """ Loads an ASDP ordering produced by JEWEL from a CSV file Parameters ---------- orderfile: str path to order CSV file Returns ------- ordering: list list of dicts containing entries for the following fields: - asdp_id - initial_sue - final_sue - initial_sue_per_byte - final_sue_per_byte - size_bytes - timestamp """ with open(orderfile, 'r') as f: reader = csv.DictReader(f) return list(reader)
430bdea030c6399e258dd49548a954069fe89851
44,915
def _to_mumps_number(v): """Given a value, attempt to coerce it to either an integer or float.""" sign = 1 ndec = 0 try: tmp = float(v) if tmp.is_integer(): return int(tmp) else: return tmp except ValueError: v = str(v) n = [] # Build a number based on the MUMPS numeric conversion rules for c in v: # Look for numeric characters (digits, decimal, or sign) if c.isnumeric() or c in ('.', '+', '-'): # Make sure we only add one decimal if c == '.': if ndec >= 1: break else: ndec += 1 # Correctly swap the sign if c == '-': sign *= -1 continue # Ignore the plus signs if c == '+': continue # If we made it this far, this is a valid numeric character n.append(c) else: # If we don't find any, break # Re-assemble the digits and attempt to convert it n = float("".join(n)) * sign return n if not n.is_integer() else int(n)
410246c14e55de56209e6b19c7a94102ca03893b
44,923
def generate_shard_args(outfiles, num_examples): """Generate start and end indices per outfile.""" num_shards = len(outfiles) num_examples_per_shard = num_examples // num_shards start_idxs = [i * num_examples_per_shard for i in range(num_shards)] end_idxs = list(start_idxs) end_idxs.pop(0) end_idxs.append(num_examples) return zip(start_idxs, end_idxs, outfiles)
8f059536a8ab2e36c89e5f6a6fe8dc4d827a782d
44,927
def find_biggest_pattern_in_patterns(dict): """ dict: dictionary of translation vector->pattern Returns the biggest pattern and its corresponding translation vector. """ max_length = -1 pattern = None trans_vector = None for key in dict: if len(dict[key])>max_length: max_length=len(dict[key]) trans_vector = key pattern = dict[key] return pattern, trans_vector
34e658369145ccb30d25ba5d1d80085a61f68413
44,931
def _installable(args): """ Return True only if the args to pip install indicate something to install. >>> _installable(['inflect']) True >>> _installable(['-q']) False >>> _installable(['-q', 'inflect']) True >>> _installable(['-rfoo.txt']) True >>> _installable(['projects/inflect']) True >>> _installable(['~/projects/inflect']) True """ return any( not arg.startswith('-') or arg.startswith('-r') or arg.startswith('--requirement') for arg in args )
1a374c75fca3289f0f6f86321cc7b76eee4c7d3b
44,932
from typing import Counter def sum_counters(counters): """Aggregate collections.Counter objects by summing counts :param counters: list/tuple of counters to sum :return: aggregated counters with counts summed >>> d1 = text_analyzer.Document('1 2 fizz 4 buzz fizz 7 8') >>> d2 = text_analyzer.Document('fizz buzz 11 fizz 13 14') >>> sum_counters([d1.word_counts, d2.word_counts]) Counter({'buzz': 2, 'fizz': 4}) """ return sum(counters, Counter())
c73f374639851548a4e11249ae7faf0dc1b80956
44,939
import time import calendar def gen_date_list(begin_date, end_date): """Generates a list of dates of the form yyyymmdd from a being date to end date Inputs: begin_date -- such as "20070101" end_date -- such as "20070103" Returns: date_list -- such as ["20070101","20070102","20070103"] """ begin_tm = time.strptime(begin_date, "%Y%m%d") end_tm = time.strptime(end_date, "%Y%m%d") begin_tv = calendar.timegm(begin_tm) end_tv = calendar.timegm(end_tm) date_list = [] for tv in range(begin_tv, end_tv+86400, 86400): date_list.append(time.strftime("%Y%m%d", time.gmtime(tv))) return date_list
7663b5329e0a7ac65910c1b2df39205758c75c58
44,941
def frames_to_time(frames, framerate): """Convert frame count to time (using framerate).""" return frames / framerate
398ce55c09706c286a682c86452f10d9bd8e1140
44,944
def int2verilog(val, vw): """ :param val: A signed integer to convert to a verilog literal. :param vw: The word length of the constant value. """ sign = '-' if val < 0 else '' s = ''.join((sign, str(vw), '\'sd', str(abs(val)))) return s
42e82203cb5cfd4015664e49b94091d027979d23
44,945
def get_index_names(df): """ Get names from either single or multi-part index """ if df.index.name is not None: df_index_names = [df.index.name] else: df_index_names = list(df.index.names) df_index_names = [x for x in df_index_names if x is not None] return df_index_names
143f7e86594d39ccb19ab1e4e36cd9933cb07304
44,949
import re def sanitize_path(path): """Replace illegal path characters and spaces in path.""" return re.sub(r"[^a-zA-Z0-9_\-/\.]", "", path.replace(" ", "_"))
c74db40399524f6deedc023ca76e828fc3d4019e
44,952
def get_coordinates(region): """ Define coordinates chr, start pos and end positions from region string chrX:start-end. Return coordinate list. """ chromosome = region.split(":")[0] coord = region.split(":")[1] coord_list = coord.split("-") begin = int(coord_list[0]) end = int(coord_list[1]) return [chromosome, begin, end]
3e76420ad607d5dfb195992fb5466b118bd67bcd
44,953
def getSocketFamily(socket): """ Return the family of the given socket. @param socket: The socket to get the family of. @type socket: L{socket.socket} @rtype: L{int} """ return socket.family
e67095574949dc12022676b2795010aff3a12446
44,956
def rename_part(oEditor, oldname, newname): """ Rename a part. Parameters ---------- oEditor : pywin32 COMObject The HFSS editor in which the operation will be performed. oldname : str The name of the part to rename newname : str The new name to assign to the part Returns ------- None """ renameparamsarray = ["Name:Rename Data", "Old Name:=", oldname, "New Name:=", newname] return oEditor.RenamePart(renameparamsarray)
cb255d54a11aa37a48958be651a93b3f80e3b85b
44,958
def get_shape_points2(cur, shape_id): """ Given a shape_id, return its shape-sequence (as a dict of lists). get_shape_points function returns them as a list of dicts Parameters ---------- cur: sqlite3.Cursor cursor to a GTFS database shape_id: str id of the route Returns ------- shape_points: dict of lists dict contains keys 'seq', 'lat', 'lon', and 'd'(istance) of the shape """ cur.execute('''SELECT seq, lat, lon, d FROM shapes where shape_id=? ORDER BY seq''', (shape_id,)) shape_points = {'seqs': [], 'lats': [], 'lons': [], 'd': []} for row in cur: shape_points['seqs'].append(row[0]) shape_points['lats'].append(row[1]) shape_points['lons'].append(row[2]) shape_points['d'].append(row[3]) return shape_points
7e7708a155f2f04510844565054fb32d0b770e1a
44,961
def get_matching_card(card_list, card_to_match): """ This function returns the card that matches the one passed in """ the_matching_card = None for test_card in card_list: if test_card.value == card_to_match.value and test_card != card_to_match: the_matching_card = test_card break return the_matching_card
7c67fa95465c131ef11703096c3bb8ac77531ed1
44,962
def generate_sorted(degree): """ Generates list of numbers in ascending order. """ lst = [] for i in range(2**degree): lst.append(i) return lst
1dcf27801f4e253a87294dcb4aa742fdd12c525f
44,963
def indices(A): """ Return a sequence containing all the indices for elements in A. >>> indices([6, 3, 2, 9, 10]) [0, 1, 2, 3, 4] """ return range(len(A))
da06e01da439144851b005fb22ec050932d8c5f0
44,964
from typing import List def _erase_elements_from(items: List, start_i: int): """ Erase from the given 'i' onward >>> _erase_elements_from([1, 2, 3], 0) [None, None, None] >>> _erase_elements_from([1, 2, 3], 1) [1, None, None] >>> _erase_elements_from([1, 2, 3], 2) [1, 2, None] >>> _erase_elements_from([1, 2, 3], 3) [1, 2, 3] """ items[start_i:] = [None] * (len(items) - start_i) # Return the list just for convenience in doctest. It's actually mutable. return items
5e3694272bca02dadbbf154a4132ea9dfdda8097
44,968
import requests def get_json(url, payload=None, verbose=True): """ Get JSON from an online service with optimal URL parameters. """ r = requests.get(url, params=payload) if verbose: print("Fetched: {}".format(r.url)) if r.status_code != 200: print("Error: {}.".format(r.status_code)) return r.json()
02ec9a3648946c8cc9999a61573378bbb12174c1
44,973
def unpack(s): """Convenience function to get a list as a string without the braces. Parameters ---------- s : list The list to be turned into a string. Returns ------- string The list as a string. """ x = " ".join(map(str, s)) return x
fa5933cb9f67ebff699ab5751f8b188f9764f693
44,974
import re import json def read_jsonc(file: str) -> dict: """Cコメント付きJSONファイルを読み込む Args: file (str): JSONファイル Returns: dict: JOSNオブジェクト """ with open(file, encoding='utf-8') as f: text = f.read() text = re.sub(r'/\*[\s\S]*?\*/|//.*', '', text) return json.loads(text)
1405783375dc7281a743f7d5bfd83734fa7e4c4c
44,977
def getOutputsNames(net): """ Get the names of the output layers """ # Get the names of all the layers in the network layersNames = net.getLayerNames() # Get the names of the output layers, i.e. the layers with unconnected outputs return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
72470cf88735729313a153737963ac5eaa7e255d
44,979
def _msd_anom_1d(time, D_alpha, alpha): """1d anomalous diffusion function.""" return 2.0*D_alpha*time**alpha
ba4b2e3ec597f2a936fca73423e31642403a4b55
44,986
def get_output_size(dataloaders): """ Infer the expected output size and task to perform Args: dataloaders: dict of torch.dataloader objects Return: an int, inferred output size a str, task to carry out """ # Find the max amount of unique labels among dataloaders nb_labels = 0 labels_data_type = None for partition, dataloader in dataloaders.items(): labels = list(set(dataloader.dataset.labels)) if isinstance(labels[0], float): # regression return 1, "regression" nb_labels = max(len(labels), nb_labels) out_size = 1 if nb_labels == 2 else nb_labels task = "binary" if nb_labels == 2 else "multilabel" return out_size, task
1886e2b1ca9279846b761bc8efe259fc66550cac
44,988
def create_basename(args): """ Name to characterize the data. Can be used for dir name and file name. """ ls = args.drug_fea + args.cell_fea if args.src is None: name = '.'.join(ls) else: src_names = '_'.join(args.src) name = '.'.join([src_names] + ls) name = 'data.' + name return name
0588972eeed81cef2ce9cb143da45bce444a0229
44,992
from pathlib import Path def _validate_ignore_cells_with_warning(actual: str, test_nb_path: Path) -> bool: """Validate the results of notebooks with warnings.""" expected_out = [ "cell_2:3:1: F401 'glob' imported but unused", ] expected = "".join(f"{str(test_nb_path)}:{i}\n" for i in expected_out) return expected == actual
9a505973a63cb04761951e4edf996e2ce27fa09e
44,993
def bubble_sort(items): """Sorts a list of items in ascending order. """ for i in range(len(items)): swapped = False for j in range(len(items) - 1): if items[j] > items[j+1]: swapped = True items[j], items[j+1] = items[j+1], items[j] if not swapped: break return items
16f392bfea089c146a8a59f6e886283893f0ea51
45,003
def getMinMaxOfRangeList(ranges): """ Get the min and max of a given range list. """ _max = max([x[1] for x in ranges]) _min = min([x[0] for x in ranges]) return _min, _max
b7fe06dab71df72a54873401f2c0955910ef8d7c
45,010
def unique_list(l, preserve_order=True): """Make list contain only unique elements but preserve order. >>> l = [1,2,4,3,2,3,1,0] >>> unique_list(l) [1, 2, 4, 3, 0] >>> l [1, 2, 4, 3, 2, 3, 1, 0] >>> unique_list(l, preserve_order=False) [0, 1, 2, 3, 4] >>> unique_list([[1],[2],[2],[1],[3]]) [[1], [2], [3]] See Also -------- http://www.peterbe.com/plog/uniqifiers-benchmark """ try: if preserve_order: s = set() return [x for x in l if x not in s and not s.add(x)] else: return list(set(l)) except TypeError: # Special case for non-hashable types res = [] for x in l: if x not in res: res.append(x) return res
dbc4c1a16538a6be8c114abb7411622eecb5b98c
45,018
def _get_docks_available(sta): """Given a GBFS station status blob, return the number of docks""" return sta['num_docks_available']
f74eb148af688f86a5d1f1960ce18cabac8d3cfb
45,025
import math def dist(a, b): """ distance between point a and point b """ return math.sqrt(sum([(a[i] - b[i]) * (a[i] - b[i]) for i in range(len(a))]))
fe5f2ad25bc7297e441986c78a950f06966ad0d7
45,031
def __calc_year(entered_year: int, beginning_year: int) -> int: """ Calculates the year as a single digit (0 for first year, 2 for 3rd year) (+1 because it's zero indexed) """ return entered_year - beginning_year + 1
9a471b5d8893d6a848320f494ff5acc51786df3f
45,032
def atom(text): """Parse text into a single float or int or str.""" try: x = float(text) return round(x) if round(x) == x else x except ValueError: return text
f8d3856c7864a1f6a07ad0c9e8a6cd7f2f16ac8b
45,033
def _WrapUnaryOp(op_fn, inner, ctx, item): """Wrapper for unary operator functions. """ return op_fn(inner(ctx, item))
69bd9088211a341a0245005b2ab2ea9f173cd096
45,034
from typing import Any def _convert_sql_format(value: Any) -> str: """ Given a Python value, convert to string representation of the equivalent SQL datatype. :param value: A value, ie: a literal, a variable etc. :return: The string representation of the SQL equivalent. >>> _convert_sql_format(1) "1" >>> _convert_sql_format("John Smith") '"John Smith"' """ if value is None: return "NULL" elif isinstance(value, str): return f'"{value}"' elif isinstance(value, bytes): return '"' + str(value).replace("b'", "")[:-1] + '"' else: return str(value)
7e1728c19fb8698694ac194e60d76b2bddaf9c41
45,036
def _format_digest(digest, scheme, encoding): """Formats the arguments to a string: {scheme[.encoding]}digest.""" if not encoding: return "{%s}%s" % (scheme, digest) return "{%s.%s}%s" % (scheme, encoding, digest)
ed73e06c9ff2a1c3a96b7b0296de553e5b29596d
45,037
def _read_text_file(path): """ Read and return all the contents of the text-file with the given path. It is returned as a single string where all lines are concatenated. """ with open(path, 'rt') as file: # Read a list of strings. lines = file.readlines() # Concatenate to a single string. text = " ".join(lines) return text
0e343f5ee18b277483fb45c5f6e1221b0688c13b
45,043
def get_model_shortcode(model_list): """ Get shortcode for the models, passed in as a list of strings """ shortcode_dict = { "tuned_localboosting": "tK", "tuned_cfsv2pp": "tC", "tuned_climpp": "tD", "perpp": "L", "multillr": "M", "tuned_salient2": "tS" } model_str = "" for m in model_list: if m in shortcode_dict: model_str += shortcode_dict[m] else: model_str += m[0].upper() return model_str
6cb089ab9e7f83ff89d02448b1700760c7b5fe80
45,044
def preamble_for_label(label): """ Return the preamble for the documentation block for the given label. :param label: The label to use as the paragraph title. :return: The string that should be preamble of the new section. """ preamble = str("\n" + " "*4 + "@par " + label + ":\n\n") preamble += str(" "*4 + "@htmlonly\n\n") return preamble
876a242a1a3807a1298753389d8619115d498ed8
45,046
def isbn13_checksum (isbn_str): """ Return the checksum over the coding (first 12 digits) of an ISBN-13. :Parameters: isbn_str : string An ISBN-13 without the trailing checksum digit. :Returns: The checksum character, ``0`` to ``9``. For example: >>> isbn13_checksum ("978094001673") '6' >>> isbn13_checksum ("979123456789") '6' """ ## Preconditions & preparation: assert (len (isbn_str) == 12), 'expecting a 12-digit string' ## Main: csum = 0 for i in range (0, len (isbn_str), 2): csum += int (isbn_str[i]) + (3 * int (isbn_str[i+1])) cdigit = 10 - (csum % 10) if (cdigit == 10): cdigit = 0 ## Return: return str (cdigit)
d9eb185f14e35508d993924ebc37d32442e8329b
45,048
import re def get_output_path(manifest, regex): """ lists status.nodes in an argo manifest, and grabs intermediary output files paths using the node tree represented by status.nodes[*].name. Keeps only nodes of type 'Pod' and phase 'succeeded'. Parameters ---------- manifest : dict regex : str regular expression syntax str to filter nodes based on which templates were executed within a given node and before that given node in the tree. Returns ------ dict: path : str, the path to the intermediary output file nodeId: the id of the manifest node that outputted this file """ out_zarr_path = None nodeId = None i = 0 for node in manifest['status']['nodes']: this_node = manifest['status']['nodes'][node] if this_node['type'] == 'Pod' and this_node['phase'] == 'Succeeded' and re.search(regex, this_node['name']): i = i + 1 if i > 1: raise Exception('I could not identify a unique node in the manifest for regex : ' + regex + '\n' + '. Id of the first match : ' + nodeId + '\n' + 'Id of second match : ' + this_node['id']) nodeId = this_node['id'] if 'outputs' in this_node and 'parameters' in this_node['outputs']: for param in this_node['outputs']['parameters']: if param['name'] == 'out-zarr': out_zarr_path = param['value'] if out_zarr_path is None and nodeId is None: raise Exception('I could not identify any node in the manifest') return ({'path': out_zarr_path, 'nodeId': nodeId})
13c5cc55e1e3d212436babe0aaf2c4fee33d3f3b
45,054
import logging def setup_logger(log_path): """Create a logger and return the handle.""" # Create a custom logger logger = logging.getLogger(__name__) logger.setLevel(10) # Create console handler c_handler = logging.StreamHandler() c_handler.setLevel(21) # Create file handler f_handler = logging.FileHandler(log_path) f_handler.setLevel(logging.INFO) # Create formatters and add it to handlers c_format = logging.Formatter('%(asctime)s - %(message)s', datefmt = '%Y-%m-%d %H:%M') f_format = logging.Formatter('%(asctime)s - %(message)s') c_handler.setFormatter(c_format) f_handler.setFormatter(f_format) # Add handlers to the logger logger.addHandler(c_handler) logger.addHandler(f_handler) return logger
6f19656cdd61f46f75d171703cf46381646a5291
45,057
def to_string(pairs): """Converts a series of (int, int) tuples to a time series string.""" return " ".join("%i:%i" % pair for pair in pairs)
db720677504e254b9fe81d5d9e41004b63304de0
45,058
def get_members(module): """Get all public members from a module.""" namespace = [attr for attr in dir(module) if not attr.startswith("_")] return [getattr(module, attr) for attr in namespace]
1ae8893c7a3b7a32ba01e9ffd1c41651998a84af
45,059
def get_icon_info(family_name, icons, host, asset_url_pattern): """Returns a list containing tuples of icon names and their URLs""" icon_info = [] for icon in icons: if family_name not in icon['unsupported_families']: name = icon['name'] url_params = { 'family' : family_name, 'icon' : name, 'version' : icon['version'], 'asset' : '24px.xml' } info = (name, 'http://' + host + asset_url_pattern.format(**url_params)) icon_info.append(info) return icon_info
e43b0edd9c4421fac338a41fb245946cc6335463
45,062
def get_epsg_srid(srs_name): """Parse a given srs name in different possible formats WFS 1.1.0 supports (see 9.2, page 36): * EPSG:<EPSG code> * URI Style 2 * urn:EPSG:geographicCRS:<epsg code> :param srs_name: the Coordinate reference system. Examples: * EPSG:<EPSG code> * http://www.opengis.net/def/crs/EPSG/0/<EPSG code> (URI Style 1) * http://www.opengis.net/gml/srs/epsg.xml#<EPSG code> (URI Style 2) * urn:EPSG:geographicCRS:<epsg code> * urn:ogc:def:crs:EPSG::4326 * urn:ogc:def:crs:EPSG:4326 :return: the authority and the srid :rtype: tuple """ authority = None srid = None values = srs_name.split(':') if srs_name.find('/def/crs/') != -1: # URI Style 1 vals = srs_name.split('/') authority = vals[5].upper() srid = int(vals[-1]) elif srs_name.find('#') != -1: # URI Style 2 vals = srs_name.split('#') authority = vals[0].split('/')[-1].split('.')[0].upper() srid = int(vals[-1]) elif len(values) > 2: # it's a URN style if len(values) == 3: # bogus pass else: authority = values[4].upper() # code is always the last value try: srid = int(values[-1]) except Exception: srid = values[-1] elif len(values) == 2: # it's an authority:code code authority = values[0].upper() try: srid = int(values[1]) except Exception: srid = values[1] return authority, srid
e6fa3c65dcfe0a76858118bb6575a45ebcdfe0a3
45,063
import re def strip_item(s): """ removes all types of brackets and quotation marks from string object used to strip name objects """ s = re.sub(r'[()\[\],"]', '', s) # removes all types of brackets and quotes return s.strip().lower()
51b86f29fff31ad4cc035720a709d9ae21f68cdb
45,065
def team_year_key(*args): """ Create a key string to identify a combination of team and year. If 2 arguments are passed, it assumes it must construct a key from a pair of team and year. If 1 argument is passed, it assumes it is a key and must de-construct it into a team and year pair. """ if len(args) == 2: team, year = args return team + ':' + str(year) elif len(args) == 1: team, year = args[0].split(':') if year != 'all_time': year = int(year) return team, year else: raise ValueError("Don't know what to do with %i elements" % len(args))
1f266149a8397db16a9041dcd406b0cb2c31e69e
45,066
def c2m(pos, scale=100): """Convert screen/pixel coordinates to map coordinates""" return pos * 40.85 * 100 / 2048 / scale + 1
22ea2638e6e5cf26c21c2a689690ceeabc7c03bc
45,068
def build_label(vertex_id, agents): """Builds the emoji representation of all agents on a single vertex""" return f"{vertex_id}" + "".join(agent.emojify() for agent in agents)
25e32e246e8893335958f03d5fbdcd36aae9106f
45,070
def wrap_msg(msg): """ Wrap a log message with '=' marks. Necessary for making cluster load background logging distinguishable Args: msg (str): The log message to wrap Returns: str: The wrapped log message """ marks = "=" * len(msg) if len(msg) < 150 else "=" * 150 return f"\n{marks}\n{msg}\n{marks}"
b0fd02b0a7a75a05bf4578cb2ca1c186d725da08
45,073
def Tail(filename, n_lines): """Return the last several lines of a file. If the file does not exist, an empty string is returned. Args: filename: Name of the file to read. n_lines: Number of lines to return. Returns: String containing the file data. """ lines = [] try: with open(filename, 'r') as f: lines = f.readlines() except IOError: pass return ''.join(lines[-n_lines:])
a5ec45629cea9c6dae2e7de3d168c7c071fe42d3
45,075
def sec_to_time(sec: int) -> str: """ Convert second to human readable time """ h = sec // 3600 sec = sec % 3600 m = sec // 60 sec = sec % 60 return '{:02}:{:02}:{:02}'.format(h, m, sec)
879f93dbbf1bf4388e3824c7c02cffda80f76d98
45,079
import re from typing import OrderedDict def expose_extr(hrefs): """Takes soup element of one search page of immowelt and returns all the exposes as list of str Args: hrefs (soup element): search page of immowelt Returns: list: exposes as strings """ exposes = [re.findall("\/expose\/(\w+)", a) for a in hrefs] exposes = [a[0] for a in exposes if len(a) != 0] exposes = list(OrderedDict.fromkeys(exposes)) return exposes
79aed3d1c121658766721fad023e4cfd1cf0ca7d
45,085
import sympy def symbol(name, real=True): """ Create symbolic variables :param name: symbol names :type name: str :param real: assume variable is real, defaults to True :type real: bool, optional :return: SymPy symbols :rtype: sympy .. runblock:: pycon >>> from spatialmath.base.symbolic import * >>> theta = symbol('theta') >>> theta >>> theta, psi = symbol('theta psi') >>> theta >>> psi >>> q = symbol('q_:6') >>> q .. note:: In Jupyter symbols are pretty printed. - symbols named after greek letters will appear as greek letters - underscore means subscript as it does in LaTex, so the symbols ``q`` above will be subscripted. :seealso: :func:`sympy.symbols` """ return sympy.symbols(name, real=real)
15b3f80eef4da60bd1240e344a922f6107bc42d2
45,091
def bucketed_list(l, bucket_size): """Breaks an input list into multiple lists with a certain bucket size. Arguments: l: A list of items bucket_size: The size of buckets to create. Returns: A list of lists, where each entry contains a subset of items from the input list. """ n = max(1, bucket_size) return [l[i:i + n] for i in range(0, len(l), n)]
ba56f5ab2c58c231129cdfc34983965034ef1f3b
45,093
def assert_indexed(func): """ Decorator to ensure surface has been indexed prior to calling function. """ def asserted(self, *args, **kwargs): if self.indexed is None: raise RuntimeError("Surface must be indexed before calling ", func.__name__) return func(self, *args, **kwargs) return asserted
4f39d071224a4870b676fee8a3bfb890a3025091
45,102
def get_credit_card_full_po_lines_from_date(alma_api_client, date): """Get a list of full PO line records for credit card purchases (acquisition_methood = EXCHANGE) from the specified date.""" brief_po_lines = alma_api_client.get_brief_po_lines("EXCHANGE") credit_card_full_po_lines = [] for brief_po_line in ( p for p in brief_po_lines if p.get("created_date") == f"{date}Z" and p.get("number") is not None ): full_po_line = alma_api_client.get_full_po_line(brief_po_line.get("number")) credit_card_full_po_lines.append(full_po_line) return credit_card_full_po_lines
7d2840b28c0ac4ca9e8f0af00997cb5771ed3815
45,103
def pkcs_1_5(b: bytes, size: int) -> int: """ PKCS#1.5 padding. Create a block of the form: 00 || BT || PS || 00 || b Where BT is usually 0x01 and PS are 0xff bytes in a number such that the whole block is filled. The length of b must be less than the size of the block minus 3 (00 x 2 and BT). :param b: A buffer of bytes. :param size: The block size. :return: The padded buffer (as int). """ assert len(b) < size - 3 padded = bytearray((0x00, 0x02)) padded += bytearray(0xff for _ in range(size - 3 - len(b))) padded += bytearray((0x00,)) padded += b return int.from_bytes(padded, byteorder="big")
340aea6cf6f4f9640c4b0ed15f1525caa89de167
45,104
from typing import List def spatial_to_serial_order(hole_sequence: List[int], holes: List[int]) -> List[int]: """ Converts a temporal sequence of spatial holes into a list of serial order positions. Converts the list of spatial holes in use (``hole_sequence``) and the temporal sequence of hole indexes (``holes``) into a sequence of spatial hole numbers. Args: hole_sequence: ordered list of spatial hole numbers to be presented in the first phase of the task, e.g. [3, 1, 4]. holes: spatial hole numbers to be enquired about: "what was the temporal order of these holes in the first phase?"; e.g. [4, 3]. Returns: list of serial order positions (in this example: [3, 1]). """ return [hole_sequence.index(h) + 1 for h in holes]
2f1d474d310ee9c89c710f875612dd97d4153479
45,106
def _find_formatter(formatters): """Returns a formatter that takes x, and applies formatter based on types. Args: formatters: map from type to formatter Returns: function: x -> displayable output """ def formatter(x): for type_, formatter_for_type in formatters.items(): if isinstance(x, type_): return formatter_for_type(x) return x return formatter
00961fe103fd1888b85184245ce19bf025759c46
45,109
def _f2s(number, dec=4): """ Return string representation of ``number``. Returned string is: * without trailing decimal zeros, * with at most ``dec`` decimal places. """ if not isinstance(number, (int, float)): return number return '{{:.{:d}f}}'.format(dec).format(number).rstrip('0').rstrip('.')
20cbf5e5bf26e35b075b2785a7da15260bb92974
45,118
def generate_script(group, entry_point, header, template): """Generate the script based on the template. :param str group: The entry-point group name, e.g., "console_scripts". :param str header: The first line of the script, e.g., "!#/usr/bin/env python". :param str template: The script template. :returns: The templated script content :rtype: str """ if not entry_point.attrs or len(entry_point.attrs) > 2: raise ValueError("Script targets must be of the form " "'func' or 'Class.class_method'.") script_text = template % dict( group=group, module_name=entry_point.module_name, import_target=entry_point.attrs[0], invoke_target='.'.join(entry_point.attrs), ) return header + script_text
847871bc7344dcfda994a9e985e9a541c96fff81
45,123
def determine_band_channel(kal_out): """Return band, channel, target frequency from kal output.""" band = None channel = None tgt_freq = None while band is None: for line in kal_out.splitlines(): line = line.decode("utf-8") if "Using " in line and " channel " in line: band = line.split()[1] channel = line.split()[3] tgt_freq = line.split()[4].replace( "(", "").replace(")", "") return(band, channel, tgt_freq)
abb38ee2b889283e13ef5597a145dd3c503ba638
45,124
import random import string def random_string(length: int = 8) -> str: """ Returns a random string (just letters) of a specified length :param length: the length :return: the random string """ return "".join(random.choice(string.ascii_letters) for _ in range(length))
65f80033cd0265205f1369dbdef52c71a2035559
45,126
def f_rise(t_half): """ Davenport+ 2014 Eqn. 1 """ return (1 + 1.941 * t_half - 0.175 * t_half**2 - 2.246 * t_half**3 - 1.125 * t_half**4)
633696c351dc3c3b8ff5a9947ea6b14a102f1b1f
45,127