content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def rev(arr): """Reverse array-like if not None""" if arr is None: return None else: return arr[::-1]
62cb3acd57ffa19c903d819f2599bec891eae69e
58,858
def get_dpv(statvar: dict, config: dict) -> list: """A function that goes through the statvar dict and the config and returns a list of properties to ignore when generating the dcid. Args: statvar: A dictionary of prop:values of the statvar config: A dict which maps constraint props to the statvar based on values in the CSV. See scripts/fbi/hate_crime/config.json for an example. The 'dpv' key is used to identify dependent properties. Returns: A list of properties to ignore when generating the dcid """ ignore_props = [] for spec in config['dpv']: if spec['cprop'] in statvar: dpv_prop = spec['dpv']['prop'] dpv_val = spec['dpv']['val'] if dpv_val == statvar.get(dpv_prop, None): ignore_props.append(dpv_prop) return ignore_props
a05b41a5452255f49574311526a69a191fa66c18
58,861
def compile_dir(dbt_project_file): """Return a path to the directory with compiled files.""" d = dbt_project_file.parent return d / "target" / "compiled" / "test" / "models"
b9f4f561c2e3d3da6522b2cd02534a34684b587d
58,862
def encode_label(label: str) -> int: """ Encodes a label into a number If there is no label, the number is 0. Othewise, the number is the index in the sequence: ``` A1, B1, C1, D1, E1, A2, B2, C2, ... ``` A, B, C, D, E are interpretted as A1, B1, C1, D1, E1, respectively. """ if not label: return 0 # part after letter if it has a number, otherwise 1 index = int(label[1:]) if len(label) > 1 else 1 # A = 1, B = 2, ... E = 5 offset = ord(label[0]) - ord("A") + 1 # compute label number return (index - 1) * 5 + offset
c1d38dbfa240d257bd2610c9d1de6652456b4a02
58,866
import torch def quaternion_to_rotmat_jac(q): """ Converts batched quaternions q of shape (batch, 4) to the jacobian of the corresponding rotation matrix w.r.t. q of shape (batch, 9, 4) """ qr = q[:, 0:1] qi = q[:, 1:2] qj = q[:, 2:3] qk = q[:, 3:4] z = torch.zeros_like(qk) r1 = 2. * torch.cat((z, z, -2. * qj, -2. * qk), dim=1) r2 = 2. * torch.cat((-qk, qj, qi, -qr), dim=1) r3 = 2. * torch.cat((qj, qk, qr, qi), dim=1) r4 = 2. * torch.cat((qk, qj, qi, qr), dim=1) r5 = 2. * torch.cat((z, -2 * qi, z, -2 * qk), dim=1) r6 = 2. * torch.cat((-qi, -qr, qk, qj), dim=1) r7 = 2. * torch.cat((-qj, qk, -qr, qi), dim=1) r8 = 2. * torch.cat((qi, qr, qk, qj), dim=1) r9 = 2. * torch.cat((z, -2 * qi, -2 * qj, z), dim=1) return torch.cat((r1.unsqueeze(1), r2.unsqueeze(1), r3.unsqueeze(1), r4.unsqueeze(1), r5.unsqueeze(1), r6.unsqueeze(1), r7.unsqueeze(1), r8.unsqueeze(1), r9.unsqueeze(1)), dim=1)
0b97c5232b4b11f1feabf27df39e38db135443d7
58,868
def _parameter_present(query_string, key, value=None): """ Check whether the given key/value pair is present in the query string. If the value is blank, it simply checks for the presence of the key """ already_there = False if query_string: for p in query_string.split('&'): k, v = p.split('=') if str(k) == str(key): if value: if str(v) == str(value): already_there = True else: already_there = True return already_there
81f92197ba3caefbe9957102c8d29bfe48a255c8
58,872
import torch def weighted_dice_loss( prediction, target_seg, weighted_val: float = 1.0, reduction: str = "mean", eps: float = 1e-8, ): """ Weighted version of Dice Loss Args: prediction: prediction target_seg: segmentation target weighted_val: values of k positives, reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum' : The output will be summed. eps: the minimum eps, """ target_seg_fg = target_seg == 1 target_seg_bg = target_seg == 0 target_seg = torch.stack([target_seg_bg, target_seg_fg], dim=1).float() n, _, h, w = target_seg.shape prediction = prediction.reshape(-1, h, w) target_seg = target_seg.reshape(-1, h, w) prediction = torch.sigmoid(prediction) prediction = prediction.reshape(-1, h * w) target_seg = target_seg.reshape(-1, h * w) # calculate dice loss loss_part = (prediction ** 2).sum(dim=-1) + (target_seg ** 2).sum(dim=-1) loss = 1 - 2 * (target_seg * prediction).sum(dim=-1) / torch.clamp(loss_part, min=eps) # normalize the loss loss = loss * weighted_val if reduction == "sum": loss = loss.sum() / n elif reduction == "mean": loss = loss.mean() return loss
d7163342c7280b60287307b4445eded624b1b01b
58,875
import json def parse_file(filepath: str): """[loads a json file and returns a python object] Args: filepath (str): [path to json file] Returns: [type]: [a python object] """ f = open(filepath) data = json.load(f) f.close() return data
c56f3a8870a3583fd895b01dc3ca142263600dcb
58,877
def onOffFromBool(b): """Return 'ON' if `b` is `True` and 'OFF' if `b` is `False` Parameters ---------- b : boolean The value to evaluate Return ------ 'ON' for `True`, 'OFF' for `False` """ #print(b) r = "ON" if b else "OFF" return r
2c031a39bc86a7d0827ab0ff9f078d2fca0d5c60
58,878
from datetime import datetime def parse_yyyy_mm_dd(d): """ :param d: str Date in the form yyyy-mm-dd to parse :return: datetime Date parsed """ d = str(d).strip() # discard jibberish return datetime.strptime(d, "%Y-%m-%d")
9c67865960cebe87269362d49668e54950607b6a
58,886
import re def has_diacritics(string: str) -> bool: """ Check if the string contains diacritics. :param string: String to be checked :type string: str :return: :rtype: bool """ return bool(re.search("[゙゚゛゜]", string))
39922e93d4eca923c0f384e6015c6e662330e8db
58,887
import collections def calc_topic_uniqueness(top_words_idx_all_topics): """ This function calculates topic uniqueness scores for a given list of topics. For each topic, the uniqueness is calculated as: (\sum_{i=1}^n 1/cnt(i)) / n, where n is the number of top words in the topic and cnt(i) is the counter for the number of times the word appears in the top words of all the topics. :param top_words_idx_all_topics: a list, each element is a list of top word indices for a topic :return: a dict, key is topic_id (starting from 0), value is topic_uniquness score """ n_topics = len(top_words_idx_all_topics) # build word_cnt_dict: number of times the word appears in top words word_cnt_dict = collections.Counter() for i in range(n_topics): word_cnt_dict.update(top_words_idx_all_topics[i]) uniqueness_dict = dict() for i in range(n_topics): cnt_inv_sum = 0.0 for ind in top_words_idx_all_topics[i]: cnt_inv_sum += 1.0 / word_cnt_dict[ind] uniqueness_dict[i] = cnt_inv_sum / len(top_words_idx_all_topics[i]) return uniqueness_dict
577b68e57a99734f2fb4fcb537cba42ae63773a9
58,889
def cmmdc(x, y): """Computes CMMDC for two numbers.""" if y == 0: return x return cmmdc(y, x % y)
f1d731ca0e1942e33b4fdf2d792e16c8ccddd13c
58,891
import io def bufferize(f, buf): """Bufferize a fileobject using buf.""" if buf is None: return f else: if (buf.__name__ == io.BufferedWriter.__name__ or buf.__name__ == io.BufferedReader.__name__): return buf(f, buffer_size=10 * 1024 ** 2) return buf(f)
cd97a221e70ebf5611fa5091dc0183dff43ca663
58,894
def select_features_by_type(features: list, feature_type: str) -> list: """ Selects features from a list of BitomeFeatures that have the provided type as their .type attribute :param List[BitomeFeature] features: the list of BitomeFeature objects (or sub-objects thereof) from which to extract features of a given type :param str feature_type: the type of feature to select :return list selected_features: the features selected from the list by type """ return [feature for feature in features if feature.type == feature_type]
fa6aa39f35ccbc152c54fb7b201181b361f75573
58,896
import json def read_json(filepath): """Read a JSON file""" try: with open(filepath, "r") as file: return json.load(file) except IOError: exit()
e948cf4a68c93dfd58267ce012117178f5622574
58,897
def drop_labels(events, min_pct=.05): """ Snippet 3.8 page 54 This function recursively eliminates rare observations. :param events: (data frame) events :param min_pct: (float) a fraction used to decide if the observation occurs less than that fraction :return: (data frame) of events """ # Apply weights, drop labels with insufficient examples while True: df0 = events['bin'].value_counts(normalize=True) if df0.min() > min_pct or df0.shape[0] < 3: break print('dropped label: ', df0.argmin(), df0.min()) events = events[events['bin'] != df0.argmin()] return events
49a93a0ad4ed81bd86733f275cd199f0f56d9f34
58,898
def parse_date(yyyymmdd): """Convert yyyymmdd string to tuple (yyyy, mm, dd)""" return (yyyymmdd[:-4], yyyymmdd[-4:-2], yyyymmdd[-2:])
b9ad26ac7bbe1218e8c880738d7e9e4de540cf3d
58,899
import operator def sortModules(modules): """Sorts a given list of module objects by their stage""" return sorted(modules, key=operator.attrgetter("stage"))
fc1fa5871f3ab4bb2c02b45ed5ee6af389f476a0
58,901
def get_shape(coordinates): """ Return shape of coordinates Parameters ---------- coordinates : :class:`xarray.DataArrayCoordinates` Coordinates located on a regular grid. Return ------ shape : tuple Tuple containing the shape of the coordinates """ return tuple(coordinates[i].size for i in coordinates.dims)
a3e261b401c54951ff2ccf78050f50faf0bbe772
58,907
def CreateIndexMessage(messages, index): """Creates a message for the given index. Args: messages: The Cloud Firestore messages module. index: The index ArgDict. Returns: GoogleFirestoreAdminV1beta2Index """ # Currently all indexes are COLLECTION-scoped query_scope = (messages.GoogleFirestoreAdminV1beta2Index. QueryScopeValueValuesEnum.COLLECTION) # Since this is a single-field index there will only be 1 field index_fields = [messages.GoogleFirestoreAdminV1beta2IndexField( arrayConfig=index.get('array-config'), order=index.get('order'))] return messages.GoogleFirestoreAdminV1beta2Index( queryScope=query_scope, fields=index_fields)
561565404bf0e7eba637b3517391ece1bd8764e9
58,911
def has_block_sibling(item): """ Test if passed node has block-level sibling element @type item: ZenNode @return: bool """ return item.parent and item.parent.has_block_children()
f06849f71c6a99e8f4004a038ae424106845c5ff
58,912
def a2p(sep, m1, m2): """ It computes the period (day) given m1 (Msun), m2 (Msun) and the sep (Rsun). """ yeardy=365.24 AURsun=214.95 p = ((sep/AURsun)**3./(m1 + m2))**(0.5) return p, p*yeardy
f01ed1038645f921aed0bd73d29a18896a829876
58,916
from typing import List def at_least_one(annotations: List[bool]): """Returns True if at least one of the annotations is True. Args: - annotations: A list of annotations. Returns: - True if at least one of the annotations is True. """ return any(annotations)
d5579de2d289abcc3718da22282216139678529b
58,917
def is_n_pandigital(n): """Tests if n is 1-len(n) pandigital.""" if len(str(n)) > 9: return False if len(str(n)) != len(set(str(n))): return False m = len(str(n)) digits = list(range(1, m+1)) filtered = [d for d in str(n) if int(d) in digits] return len(str(n)) == len(filtered)
50b35365341b9ed5a31176942057e02a843f485b
58,918
def isWallTri(sheet_mesh, is_wall_vtx): """ Determine which triangles are part of a wall (triangles made of three wall vertices). """ return is_wall_vtx[sheet_mesh.triangles()].all(axis=1)
524f3220fc8e6db8163e64ee6a41058441a2817d
58,920
import ast def get_dependencies_from_ast(tree): """Return list with imported modules found in tree.""" imports = [] for node in ast.walk(tree): if isinstance(node, ast.Import): for name in node.names: imports.append(name.name) if isinstance(node, ast.ImportFrom): imports.append(node.module) return imports
ce29e6ca108550d17566aac3bc09703b296ae424
58,924
import re def normalize_whitespace(text): """ This function normalizes whitespaces, removing duplicates. """ corrected = str(text) corrected = re.sub('\s', ' ', corrected) corrected = re.sub(r"( )\1+",r"\1", corrected) return corrected.strip(" ")
8add22789cde45893649c4ddd439534af48e8adb
58,929
def after_request(response): """ Set header - X-Content-Type-Options=nosniff, X-Frame-Options=deny before response """ response.headers['X-Content-Type-Options'] = 'nosniff' response.headers['X-Frame-Options'] = 'deny' return response
a16a0a6f8dc7b639a003fa9a49681093958b7a85
58,931
import re def first_match(pattern, string): """Return the first match of <pattern>""" match = re.search(pattern, string) if not match: return None return match.group()
cd26f4358811b536383a1ec10ae678a69747a22a
58,933
def retrieval_precision(gold, predicted): """ Compute retrieval precision on the given gold set and predicted set. Note that it doesn't take into account the order or repeating elements. :param gold: the set of gold retrieved elements :param predicted: the set of predicted elements :return: precision value >>> retrieval_precision({1,2,3},{2}) 1.0 >>> retrieval_precision({2}, {1,2,3}) 0.3333333333333333 >>> retrieval_precision({2,3,4,8}, {1,6,3}) 0.3333333333333333 """ gold = set(gold) predicted = set(predicted) tp = len(gold & predicted) fp_tp = len(predicted) return tp/fp_tp
e901fd82899ae5bfad71ec2df352dcbca832ea6c
58,938
import hashlib def get_fake_gid(grpname): """Use if we have strict_ldap_groups off, to assign GIDs to names with no matching Unix GID. We would like them to be consistent, so we will use a hash of the group name, modulo some large-ish constant, added to another large-ish constant. There is a chance of collision, but it doesn't really matter. We do need to keep the no-GID groups around, though, because we might be using them to make options form or quota decisions (if we know we don't, we should turn on strict_ldap_groups). """ grpbase = 3e7 grprange = 1e7 grphash = hashlib.sha256(grpname.encode("utf-8")).hexdigest() grpint = int(grphash, 16) igrp = int(grpbase + (grpint % grprange)) return igrp
23e2d28c19625b8cc44eda95d7658158af3e8b35
58,941
def FindClashes(chains_fixed, re_chain_movil): """Searches for clashes between pairs of chains. Arguments: chains_fixed -- list of residues re_chain_movil -- list of chains Returns: True if it finds one clash between the residues of chains_fixed and any residue belonging to the chains of re_chain_movil False if it does not find any clash """ aa=['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLU', 'GLN', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL'] for chain in chains_fixed: re_chain_fixed = [i for i in chain] for r1 in re_chain_fixed: for r2 in re_chain_movil: if r1.get_resname() in aa and r2.get_resname() in aa: distance = r1['CA'] - r2['CA'] if distance < 2: return True else: for atom1 in r1.get_atoms(): for atom2 in r2.get_atoms(): distance = atom1 - atom2 if distance < 1: return True return False
9945322f9b72c22fcfbee04c8d46cd193f66256d
58,942
def sanitize_column_name(i): """Sanitize column name.""" return i.replace('-', '_')
a187250c1a792fda218172c5067e636e6f8b5a49
58,947
def url(request): """ Return URL specified in command line. * if `--url` parameter is set use it * raise error if url is not set """ if request.config.option.url: return request.config.option.url raise ValueError( "Please provide URL in format https://site-domain/site-path to test" )
42412540d180a0a51e818a603738e96253deff23
58,950
def get_file_str(path, num_files, labelled=False, valid_split=None, split_count_thre=None, subset_pct=100): """ Create unique file name for processed data from the number of files, directory name, validation split type, and subset percentage """ # grab the directory name as part of the names dir_name = path.split('/')[-1] if len(path.split('/')[-1]) > 0 else path.split('/')[-2] label_str = 'labelled' if labelled else '' split_thre_str = 'thre_{}'.format(split_count_thre) if split_count_thre else '' dir_str = 'doc_{}_{}_{}_{}'.format(label_str, dir_name, num_files, split_thre_str) if valid_split: split_str = '_split_{}'.format(valid_split*100) else: split_str = '' if subset_pct != 100: subset_str = '_subset_{}'.format(subset_pct) else: subset_str = '' file_str = dir_str + split_str + subset_str return file_str
dd005f586fd85a7d1bf4d82243abf597f4f2c637
58,953
def bullet(text): """ turns raw text into a markdown bullet""" return '- ' + text
cdc820de52e6021873d6a1147e9cb83d10c075da
58,959
def blank_line(ascii_file): """Returns True if the next line contains only white space; False otherwise (including comments).""" file_pos = ascii_file.tell() line = ascii_file.readline() ascii_file.seek(file_pos) if len(line) == 0: return True # end of file words = line.split() return len(words) == 0
b15a727ce5a551135836e3ef179dff033b1bfc75
58,966
from typing import Hashable def set_child_key(parent_dict, parent_key, key, value): """Take dictionary of url components, and update 'key' with 'value'. args: parent_dict (dict): dictionary to add parent_key to parent_key (str|int|other hashable type): parent key to add to the parent_dict key (str|int|other hashable type): key to put under parent_key value (any): value to give that key returns: parent_dict (dict): The updated parent dictionary """ if isinstance(parent_dict, dict) \ and isinstance(parent_key, Hashable) \ and isinstance(key, Hashable): if parent_key not in parent_dict: parent_dict[parent_key] = {} parent_dict[parent_key][key] = value return parent_dict
1e97e3b792f8b9984bde0ceee98054ab1abe79de
58,968
import torch def approx_ndcg(scores, relevances, alpha=10., mask=None): """Computes differentiable estimate of NDCG of scores as following. Uses the approximation framework from Qin et. al., 2008 IDCG = sum_i (exp(rel[i]) - 1) / ln(i + 1) DCG = sum_i (exp(rel[i]) - 1) / ln(pos(score, i) + 1) pos(score, i) = 1 + sum_{j != i} exp(-alpha s_{i, j}) / (1 + exp(-alpha s_{i, j})) (differentiable approximate position function) s_{i, j} = scores[i] - scores[j] NDCG loss = -DCG / IDCG Args: scores (torch.FloatTensor): tensor of shape (batch_size, num_elems). relevances (torch.FloatTensor): tensor of same shape as scores (rel). alpha (float): value to use in the approximate position function. The approximation becomes exact as alpha tends toward inf. mask (torch.ByteTensor | None): tensor of same shape as scores. Masks out elements at index [i][j] if mask[i][j] = 0. Defaults to no masking. Returns: ndcg (torch.FloatTensor): tensor of shape (batch_size). """ def approx_positions(scores, alpha=10.): # s_{i, j} (batch_size, num_elems) diff = (scores.unsqueeze(-1).expand(-1, -1, scores.shape[1]) - scores.unsqueeze(1).expand(-1, scores.shape[1], -1)) # Add 0.5 instead of 1, because s_{1, i} = 0.5 is included return 0.5 + torch.sigmoid(alpha * diff).sum(1) if mask is None: mask = torch.ones_like(scores) # +1 because indexing starts at 1 in IDCG idcg = torch.expm1(relevances) * mask.float() / torch.log1p( torch.arange(scores.shape[-1]).float() + 1) pos = approx_positions(scores, alpha) dcg = torch.expm1(relevances) * mask.float() / torch.log1p(pos) return -dcg.sum(-1) / (idcg.sum(-1) + 1e-8)
73d4a8660577f18f60f5659037bc711566ab2635
58,969
def ternary_search(left, right, key, arr): """ Find the given value (key) in an array sorted in ascending order. Returns the index of the value if found, and -1 otherwise. If the index is not in the range left..right (ie. left <= index < right) returns -1. """ while right >= left: mid1 = left + (right-left) // 3 mid2 = right - (right-left) // 3 if key == arr[mid1]: return mid1 if key == mid2: return mid2 if key < arr[mid1]: # key lies between l and mid1 right = mid1 - 1 elif key > arr[mid2]: # key lies between mid2 and r left = mid2 + 1 else: # key lies between mid1 and mid2 left = mid1 + 1 right = mid2 - 1 # key not found return -1
93414ea0814ed226e8d7c4651fba5adf4c4030c8
58,970
def is_multi_geometry(geom): """ Whether the shapely geometry is a Multi or Collection type. """ return 'Multi' in geom.geom_type or 'Collection' in geom.geom_type
002d336afb3075a02ac0b259c2f5668976bf02e8
58,973
import re def dropdot(sentence): """Drop the period after a sentence.""" return re.sub("[.]$", "", sentence)
541269c494902a42522893d018879345bb71eedc
58,974
from typing import Any def none_or_blank_string(x: Any) -> bool: """ Is ``x`` either ``None`` or a string that is empty or contains nothing but whitespace? """ if x is None: return True elif isinstance(x, str) and not x.strip(): return True else: return False
2a3cf76ccc31514edd2aec4393a40725f103ad68
58,977
from datetime import datetime def change_datetime_format(the_datetime): """Change the format of the datetime value to a true python datetime value.""" year = int(the_datetime[:4]) month = int(the_datetime[5:7]) day = int(the_datetime[8:10]) try: hour = int(the_datetime[11:13]) minutes = int(the_datetime[14:16]) seconds = int(the_datetime[17:19]) except ValueError: hour = 9 minutes = 0 seconds = 0 the_datetime = datetime(year, month, day, hour, minutes, seconds) return the_datetime
ce2fb337c012ea91ca5a336d7647a282b0911762
58,982
def move_item_to_list(list_of_lists, target_list_idx): """Takes a list of lists and moves one item to the list specified from the next list. This function works in-place upon the list of lists. Args: list_of_lists (list): A list of lists. target_list_idx (int): Index of the list that will have an item moved to it. Returns: None: The list is modified in place. """ # Check to see if the next list exists if target_list_idx + 2 > len(list_of_lists): raise IndexError("No list to move an item from exists.") # Add an element from the next group to the group specified in the arguments list_of_lists[target_list_idx].append( list_of_lists[target_list_idx + 1].pop(0)) # Check to see if the above operation created an empty group. If so then # remove it. if len(list_of_lists[target_list_idx + 1]) == 0: del list_of_lists[target_list_idx + 1] return None
d48378efaac77c207ace8d97a79b2917aa48886d
58,983
def peal_speed_to_blow_interval(peal_minutes: float, num_bells: int) -> float: """ Calculate the blow interval from the peal speed, assuming a peal of 5040 changes """ peal_speed_seconds = peal_minutes * 60 seconds_per_whole_pull = peal_speed_seconds / 2520 # 2520 whole pulls = 5040 rows return seconds_per_whole_pull / (num_bells * 2 + 1)
4127b57089f8f348cfeea46c763a2b7bbb089684
58,988
def validate_plurals(plurals): """ Removes trailing new lines from the values """ validated = {} for key, value in plurals.items(): validated[key] = [plural.rstrip("\n\r") for plural in value] return validated
f33fdecfb5eb8761d29b0e57f5c141ceedee0133
58,992
def _override_tlim(tlim_base, tlim_override): """Override None entries of tlim_base with those in tlim_override.""" return tuple([override if override is not None else base for base, override in zip(tlim_base, tlim_override)])
a8d390c0567a49b4c7b932767285800196275d78
58,993
import requests def create_new_corridor_report( access_token, start_date, end_date, corridors, granularity, map_version ): """Create new INRIX Roadway Analytics report for specified corridor(s). Args: access_token (str): Get from seperate endpoint start_date (str): "YYYY-MM-DD" end_date (str): "YYYY-MM-DD" corridors (list of dict): Each dict is a corridor definition with the format: { "name" : <corridor name (string)>, "direction" : <corridor direction (single char from N, S, W, E)>, "xdSegIds" : <xd segment ids (list of int)> } granularity (int): 1, 5, 15, or 60 map_version (str): Version the Inrix XD map segmentation "1902", "2001", etc Returns: The ID of the newly created report More info: See Step 2 of the INRIX Roadway Analytics Data Downloader API documentation """ endpoint = "https://roadway-analytics-api.inrix.com/v1/data-downloader" auth_header = {"Authorization": f"Bearer {access_token}"} # this report definition is similar to reportContents.json found in the # zip file of any downloaded report report_def = { "unit": "IMPERIAL", "fields": [ "LOCAL_DATE_TIME", "XDSEGID", "UTC_DATE_TIME", "SPEED", "NAS_SPEED", "REF_SPEED", "TRAVEL_TIME", "CVALUE", "SCORE", "CORRIDOR_REGION_NAME", "CLOSURE" ], "corridors" : corridors, "timezone": "America/Los_Angeles", "dateRanges": [{ "start": start_date, "end": end_date, "daysOfWeek": [1, 2, 3, 4, 5, 6, 7] }], "mapVersion": str(map_version), "reportType": "DATA_DOWNLOAD", "granularity": granularity, "emailAddresses": [], "includeClosures": True } r = requests.post(endpoint, json=report_def, headers=auth_header) r.raise_for_status() return r.json()["reportId"]
40109f659e136c65233b1dccea5f29fb0ca4ef05
59,000
def unwrap(text): """Unwrap a hard-wrapped paragraph of text. """ return ' '.join(text.splitlines())
ad84e184524f1f77a6ce3b490d0471f916199cac
59,001
def _nested(submatch, match): """Check whether submatch is nested in match""" ids_submatch = set((key, tok.id) for key, tok in list(submatch.items())) id_match = set((key, tok.id) for key, tok in list(match.items())) return set.issubset(ids_submatch, id_match)
f7e557c6728b381e8904d16e5dc6ad2ed5d95b25
59,008
def strip_metacols(header, cols=['Name', 'Rank', 'Lineage']): """Extract ordered metadata columns from the right end of a table header. Parameters ---------- header : list Table header. cols : list, optional Candidate columns in order. Returns ------- list Table header with metadata columns stripped. list Extracted metadata columns. Notes ----- Metadata columns can be whole or a subset of the provided candidates, but they must appear in the given order at the right end of the table header. Unordered and duplicated columns as well as columns mixed within samples will not be extracted. """ res = [] for col in reversed(header): try: cols = cols[slice(cols.index(col))] except ValueError: break else: res.append(col) return header[:-len(res) or None], res[::-1]
37811f544de4561e72824d719c87717986eca1c8
59,009
from typing import Dict def exclude_none_values(dictionary: Dict) -> Dict: """ Create a new dictionary, removing the keys whose value is None. Args: dictionary: Source dict Examples: >>> exclude_none_values({'a': None, 'b': 1}) {'b': 1} """ return {k: v for k, v in dictionary.items() if v is not None}
0de5d42a578bf2418b79c5813f477a4dfb7f9e02
59,011
def resolved_name(pkg): """ Return the resolved name for the given package """ return "%s %s" % (pkg.name, pkg.arch)
b7636d227f5f311494b1dd6de165ee0dac505f0d
59,012
from typing import Callable from typing import Iterable from typing import Tuple from typing import List import inspect import itertools def override_args_kwargs(f: Callable, args: Iterable, kwargs: dict, new_kwargs: dict) -> Tuple[List, dict]: """Overrides positional and keyword arguments according to signature of the function using new keyword arguments. Args: f: callable, which signature is used to determine how to override arguments. args: original values of positional arguments. kwargs: original values of keyword arguments. new_kwargs: new keyword arguments, their values will override original arguments. Return: args: updated list of positional arguments. kwargs: updated dictionary of keyword arguments. """ args = list(args) new_kwargs = new_kwargs.copy() p = inspect.signature(f).parameters for idx, (k, v) in enumerate(itertools.islice(p.items(), len(args))): if v.kind not in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD): break if k in new_kwargs: args[idx] = new_kwargs.pop(k) return args, {**kwargs, **new_kwargs}
91257ba7e0f68af4c6e9aa79f00e60ce71636c2d
59,014
import pytz from datetime import datetime def get_time_at_timezone(timezone): """ Get the current time a a time zone. Args: timezone: Returns: time at the requestd timezone """ tz = pytz.timezone(timezone) now = datetime.now(tz=tz) target_tz = pytz.timezone(timezone) return target_tz.normalize(now.astimezone(target_tz))
25181220f34dd5b3570af9e3b8b69e09aedf9311
59,015
def get_special_case_tol(dataset, tree_type, method, default_tol=1e-5): """ Special cases for `leaf_inf` and `leaf_refit`. Input dataset: str, dataset. tree_type: str, tree-ensemble model. method: str, explainer. default_tol: float, original tolerance. Return - Tolerance (float). """ tol = default_tol if method in ['leaf_inf', 'leaf_refit', 'leaf_infLE', 'leaf_refitLE']: if tree_type == 'lgb' and dataset == 'flight_delays': tol = 1e-1 elif tree_type == 'cb': if dataset == 'bean': tol = 0.5 elif dataset == 'naval': tol = 1e-4 return tol
d09b3d8066486227c3b5d9650b87cfc2f31ad9ab
59,022
def double_day(bday_1, bday_2, n=2): """Return the day when one person is twice as old as the other, given their birthdays. Optionally find when one person is n times older. bday_1, bday_2: date (or datetime) objects. Must be the same type. n: number, > 1 """ # Double day will be when the younger person's age is the # same as their difference in age. # The n-1 factor will just be 1 and have no effect if we're # doubling (n=2). # younger = older - diff. So if older = n * younger, # younger = n*younger - diff ==> younger = diff/(n-1). # So we just need the date when younger's age is diff/(n-1). diff = abs(bday_2 - bday_1) return max(bday_1, bday_2) + diff / (n-1)
95510f8edba658e9a7f532edb057fc9ffc75545e
59,031
def get_total_seconds(time_delta): """ Returns the total number of seconds in a passed timedelta """ return (time_delta.microseconds + (time_delta.seconds + time_delta.days * 24 * 3600) * 10**6) / 10**6
18d6d0cfa6cc6c623d9444b04795fb72ce75e585
59,033
def get_reqs(fname): """ Get the requirements list from the text file JCC 03.10.2020 :param fname: the name of the requirements text file :return: a list of requirements """ file = open(fname) data = file.readlines() file.close() return [data[i].replace('\n', '') for i in range(len(data))]
947e62b7ecf50bca256224cb490c8faf47292cad
59,035
def find_matrix_size(coordinates): """ This finds the maximum x and y values from the list of coordinates coordinates -> dict of coordinate objects returns max rows and columns values, plus 1 added space because of 0 indexing """ max_x = 0 max_y = 0 for key, coordinate in coordinates.items(): if coordinate.x > max_x: max_x = coordinate.x if coordinate.y > max_y: max_y = coordinate.y return max_y+1, max_x+1
94deba2d1664cfd94b6baeed6675e90d1868b37f
59,037
def register_parser(parser_dict, name=None, force=False): """Register a parser function. A record will be added to ``parser_dict``, whose key is the specified ``name``, and value is the function itself. It can be used as a decorator or a normal function. Example: >>> BACKWARD_PARSER_DICT = dict() >>> @register_parser(BACKWARD_PARSER_DICT, 'ThnnConv2DBackward') >>> def conv_backward_parser(): >>> pass Args: parser_dict (dict): A dict to map strings to parser functions. name (str | None): The function name to be registered. If not specified, the function name will be used. force (bool, optional): Whether to override an existing function with the same name. Default: False. """ def _register(parser_func): parser_name = parser_func.__name__ if name is None else name if (parser_name not in parser_dict) or force: parser_dict[parser_name] = parser_func else: raise KeyError( f'{parser_name} is already registered in task_dict, ' 'add "force=True" if you want to override it') return parser_func return _register
dec3b6e59260f35bdb72700ffd06ace328fbf62d
59,043
def get_ir_frame_number(rgb_idx, n_ir, n_rgb): """Returns index of IR frame corresponding to the RGB frame idx.""" ir_idx = round(n_ir*float(rgb_idx)/n_rgb) return ir_idx
95c21fde7c3e8ffac881caf46f9a4052c929ccc5
59,046
def get_disks_from_domain(domain_xml): """ From the ElementTree of a domain, get the set of device names for all disks assigned to the domain. Parameters ---------- domain_xml: ElementTree The xml representation of the domain. Returns ------- set The set of device names for all disks assigned to the domain. """ devices = domain_xml.find('./devices') if devices is None: return None disks = [] for disk in devices.findall('./disk'): try: disks.append(disk.find('./source').attrib['dev']) except Exception: pass return set(disks)
89189e8129068a9f2ce613e674e8495997019867
59,049
from typing import Set from pathlib import Path def expand_path(path) -> Set[Path]: """ If ``path`` points to a directory, return all files within the directory (recursively). Otherwise, return a set that contains ``path`` as its sole member. """ if path.is_dir(): return set(path.glob('**/*')) return {path}
d959e3dd4f34690b4bc1e9932648a2784d120119
59,058
def reboot(isamAppliance, check_mode=False, force=False): """ Reboot the appliance """ if check_mode is True: return isamAppliance.create_return_object(changed=True) else: return isamAppliance.invoke_post("Rebooting appliance", "/diagnostics/restart_shutdown/reboot", {})
77e506addf07181d86a8399f6a3aa9e166c1f8f6
59,061
def _is_valid_ticker(ticker: str) -> bool: """Helper to drop tickers/terms with unwanted patterns""" restricted = ["Overnight", "W"] for el in restricted: if el in ticker: return False return True
558199124f59aabcbfb7730a26df3164e32427c3
59,062
def add_producer_function(new_producer_list, xml_producer_function_list, output_xml): """ Check if input list is not empty, write in xml for each element and return update list if some updates has been made Parameters: new_producer_list ([Data_name_str, Function]) : Data's name and producer's function list xml_producer_function_list ([Data_name_str, Function]) : Data's name and producer's function list from xml output_xml (GenerateXML object) : XML's file object Returns: update_list ([0/1]) : Add 1 to list if any update, otherwise 0 is added """ if not new_producer_list: return 0 output_xml.write_producer(new_producer_list) # Warn the user once added within xml for producer in new_producer_list: xml_producer_function_list.append(producer) print(f"{producer[1].name} produces {producer[0]}") return 1
7c30289d18d79fd6c2a7ee006f528bf5dd5d56a1
59,064
def response_creator(text, card): """ Builds a response with speech part and Alexa appcard contents :param text: text to be spoken :param card: text for the app card :return: JSON object to be returned """ text_item = {"type": "PlainText", "text": text} card_item = {"type": "Simple", "title": "Stop Info", "content": card} reprompt = { "outputSpeech": {"text": "Which stop do you want to know about?", "type": "PlainText"}} response = {"version": "1.0", "response": {"outputSpeech": text_item, "card": card_item, "reprompt": reprompt, "shouldEndSession": True}} return response
cc1ce310616fc7de60b636698e3d288403db8af6
59,070
def unique(in_list, key=None): """Unique values in list ordered by first occurance""" uniq = [] if key is not None: keys = [] for item in in_list: item_key = key(item) if item_key not in keys: uniq.append(item) keys.append(item_key) else: for item in in_list: if item not in uniq: uniq.append(item) return uniq
cda3698ed63331edde3d8a12815319a84513b6f5
59,071
def renaming_modelGW(name): """Renaming variables. This function simply translates variable names used in the code into Latex style variables usable in plots. These rules are valide for the double adder model Parameters ---------- name : list of strings list of names to translate Returns ------- name : list of strings translated list of names """ name = [x.replace('tau_g','$\\alpha$') if type(x)==str else x for x in name] name = [x.replace('Lig2_fit','$\\Lambda_f$') if type(x)==str else x for x in name] name = [x.replace('Lb_fit','$\\Lambda_b$') if type(x)==str else x for x in name] name = [x.replace('Lig_fit','$\\Lambda_i$') if type(x)==str else x for x in name] name = [x.replace('DeltaLgi','$d\\Lambda_{if}$') if type(x)==str else x for x in name] name = [x.replace('DeltaLigb','$d\\Lambda_{ib}$') if type(x)==str else x for x in name] name = [x.replace('Tbg','$T_{ib}$') if type(x)==str else x for x in name] name = [x.replace('Tg','$T_{if}$') if type(x)==str else x for x in name] name = [x.replace('rLig','$R_{if}$') if type(x)==str else x for x in name] return name
cbfb5b995a1a36ba63d5d337ea5392b45717c997
59,074
def align_rows(rows, bbox): """ For every row, align the left and right boundaries to the final table bounding box. """ try: for row in rows: row['bbox'][0] = bbox[0] row['bbox'][2] = bbox[2] except Exception as err: print("Could not align rows: {}".format(err)) pass return rows
756957ec1554f8eb4847a439cba45429c46b9ac4
59,077
import yaml def get_config(config_file_path): """Convert config yaml file to dictionary. Args: config_file_path : Path Path to config directory. Returns: config : dict Config represented as dictionary. """ with open(config_file_path) as data: config = yaml.load(data, Loader=yaml.FullLoader) return config
c569eed1a942071e5ae6404f53483ef90a35d1a3
59,078
def _str_eval_true(eval, act, ctxt, x) : """Returns the Python 'not' of the argument.""" return [not x]
82d1a52d661b696bb376c140bb4d976c7ab2cb59
59,079
import json def merge_jsons_single_level(filenames): """ Merge a list of toplevel-dict json files. Union dicts at the first level. Raise exception when duplicate keys encountered. Returns the merged dict. """ merged = {} for filename in filenames: with open(filename) as f: d = json.load(f) for key, value in d.iteritems(): if key not in merged: merged[key] = value continue # Handle pre-existing keys if merged[key] == value: pass elif type(value) == dict and type(merged[key]) == dict: merged[key].update(value) else: raise ValueError("No merge strategy for key %s, value of type %s, from %s into %s" % (str(key), str(type(value)), str(value), str(merged[key]))) return merged
180b4fb24915b3773e83b2859ac0128a070b5e35
59,080
def mod_inverse(x, p): """Given 0 <= x < p, and p prime, returns y such that x * y % p == 1. >>> mod_inverse(2, 5) 3 >>> mod_inverse(3, 5) 2 >>> mod_inverse(3, 65537) 21846 """ return pow(x, p - 2, p)
12dafbe546de99dc174143ee00e910f1c9519ef0
59,081
def _read_file(path): """ Returns the contents of the file at ``path``. """ with open(path) as f: expected_output = f.read() return expected_output
9d20651be4b3529b4ecb8fbbf4a87949096963f3
59,082
def format_as_index(container, indices): """ Construct a single string containing indexing operations for the indices. For example for a container ``bar``, [1, 2, "foo"] -> bar[1][2]["foo"] Arguments: container (str): A word to use for the thing being indexed indices (sequence): The indices to format. """ if not indices: return container return f"{container}[{']['.join(repr(index) for index in indices)}]"
33a378aeef06325f5a3c4e0c561eca86d4115270
59,092
def get_body(response): """Access the response body from an HTTP response. Args: response (object): The HTTP response object. Returns: bytes: The body of the ``response``. """ return response.content
4f173257cda41a3f8ba19b672c72d10a206c5887
59,098
import re def clean_field_type(field, row=None): """Determines the value of what to run the logic against. This might be a calculated value from the current row, or a supplied value, such as a number. Parameters ---------- field - str, int, or float, logic field to check row - dict, dictionary of values of the current frame Returns ------- str or int """ if row: if not isinstance(row, dict): row = row._asdict() if isinstance(field, str): if field.isnumeric(): return int(field) if re.match(r"^-?\d+(?:\.\d+)$", field): # if its a string in a float return float(field) if type(field) is bool: return field if isinstance(field, int) or isinstance(field, float): return field if row: return row[field] return row
99dd91dc598546bdd1a6c9b3dc17ce420cf6397f
59,106
def first(iter): """Helper function that takes an iterable and returns the first element. No big deal, but it makes the code readable in some cases. It is typically useful when `iter` is (or can be) a generator expression as you can use the indexing operator for ordinary lists and tuples.""" for item in iter: return item raise ValueError("iterable is empty")
8629c3f3cb26aae03fd2123f7e752d80ea6783b2
59,108
def flipx(tile): """ Return a copy of the tile, flipped horizontally """ return list(reversed(tile))
1203919042cdedd49edd35942d19964d4f1acfdf
59,111
def split_haiku(haiku): """Split haiku, remove the period and new_line""" word_array = haiku.lower().replace('.', '').split() return word_array
6339ff5360c9852668b31affff3d61017f9c95b5
59,117
def _version_tuple_to_string(version_tuple): """Convert a version_tuple (major, minor, patch) to a string.""" return '.'.join((str(n) for n in version_tuple))
2156671b1e1fd95e505869da336136447ca229af
59,125
def join_places_building_data(places_proj, buildings_proj): """ Add summary building data onto city blocks. Requires columns to be present in the gdfs generated by other functions in osmuf. Parameters ---------- places_proj : geodataframe buildings_proj : geodataframe Returns ------- GeoDataFrame """ building_areas_by_place=buildings_proj[['footprint_m2','total_GEA_m2']].groupby(buildings_proj['city_block_id']).sum() # if there are buildings not associated with a city_block they aggregate under 0 # if this happens remove them from the dataframe if building_areas_by_place.index.contains(0): building_areas_by_place = building_areas_by_place.drop([0]) places_proj = places_proj.merge(building_areas_by_place, on = 'city_block_id') places_proj['net_GSI'] = (places_proj['footprint_m2']/places_proj.area).round(decimals=3) places_proj['net_FSI'] = (places_proj['total_GEA_m2']/places_proj.area).round(decimals=3) places_proj['gross_GSI'] = (places_proj['footprint_m2']/places_proj['gross_area_m2']).round(decimals=3) places_proj['gross_FSI'] = (places_proj['total_GEA_m2']/places_proj['gross_area_m2']).round(decimals=3) places_proj['avg_building:levels'] = (places_proj['total_GEA_m2']/places_proj['footprint_m2']).round(decimals=1) return places_proj
375ee92e7913ba20ed782a5e99c5113d1226f601
59,126
def distinct(keys): """ Return the distinct keys in order. """ known = set() outlist = [] for key in keys: if key not in known: outlist.append(key) known.add(key) return outlist
40681a54e8983472223f1872808dd4a093be32f1
59,128
def eq100 (A, B, C, D, E, T): """Chemsep equation 100 :param A: Equation parameter A :param B: Equation parameter B :param C: Equation parameter C :param D: Equation parameter D :param E: Equation parameter E :param T: Temperature in K""" return A + B*T + C*(T**2) + D*(T**3) + E*(T**4)
322ab7c04dca39251ba57bb247de5903412062c4
59,129
import random import string def gen_string(n: int) -> str: """ Generate a new string of N chars """ return ''.join([random.choice(string.ascii_lowercase) for i in range(n)])
177141971d873d983bf8cadcc6383bad0bb38287
59,132
def label_language(language: str) -> str: """Convert scenario language to place in a resource label.""" return { 'c++': 'cxx', }.get(language, language)
487367f3f6b88c286c88ff32d6347473340bf46b
59,133
def sfo(x0, rho, optimizer, num_steps=50): """ Proximal operator for an arbitrary function minimized via the Sum-of-Functions optimizer (SFO) Notes ----- SFO is a function optimizer for the case where the target function breaks into a sum over minibatches, or a sum over contributing functions. It is described in more detail in [1]_. Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) optimizer : SFO instance Instance of the SFO object in `SFO_admm.py` num_steps : int, optional Number of SFO steps to take Returns ------- theta : array_like The parameter vector found after running `num_steps` iterations of the SFO optimizer References ---------- .. [1] Jascha Sohl-Dickstein, Ben Poole, and Surya Ganguli. Fast large-scale optimization by unifying stochastic gradient and quasi-Newton methods. International Conference on Machine Learning (2014). `arXiv preprint arXiv:1311.2115 (2013) <http://arxiv.org/abs/1311.2115>`_. """ # set the current parameter value of SFO to the given value optimizer.set_theta(x0, float(rho)) # set the previous ADMM location as the flattened paramter array optimizer.theta_admm_prev = optimizer.theta_original_to_flat(x0) # run the optimizer for n steps return optimizer.optimize(num_steps=num_steps)
84a05174b39f89d33b9ab2aeca1e0b3ceecc933d
59,134
def to_camel_case(s): """ Converts a given name to camel case and removes any '_' """ return ''.join(list(map(lambda x: x.capitalize(), s.split('_'))))
b1e3436982f1a5768aa3701bf62bade00bb37984
59,136
def get_overtime(row): """ Whether or not the game was decided in overtime """ return ('OT' in row['Home Score']) or ('OT' in row['Away Score'])
c85592541c36f7db557d957b7c7babfd7666e01f
59,139
def sort_project_list(in_list): """ Sort and clean up a list of projects. Removes duplicates and sorts alphabetically, case-insensitively. """ # replace spaces with underscores in_list_2 = [i.replace(" ", "_") for i in in_list] # remove duplicate values if we ignore case # http://stackoverflow.com/a/27531275/4276230 unique_projects_dict = {v.lower(): v for v in in_list_2}.values() unique_projects_list = list(unique_projects_dict) # lowercase lowercase_list = [i.lower() for i in unique_projects_list] # sort the list sorted_project_list = sorted(lowercase_list) return sorted_project_list
d80ed75c40fa67b068afb807e754d6bf49e3493a
59,141
from pathlib import Path def file_exists(file_path: str) -> bool: """Checks if a file exists at `file_path` :param file_path: path to file whose existence is being checked. :type file_path: str :rtype: bool """ return Path(file_path).is_file()
11b14ccf73ed753ccf46a7100f98d63052ea5fc5
59,143
def errorConceal(interpPackets, pBuffer, receivedIndices, lostIndices, rowsPerPacket): """Performs packet loss concealment on the given data. # Arguments interpPackets: function object corresponding to a particular interpolation kind pBuffer: packets to be interpolated receivedIndices: packets that were retained lostIndices: packets that were lost rowsPerPacket: number of rows of the feature map to be considered as one packet # Returns Tensor whose loss has been concealed """ print("Error Concealment") return interpPackets(pBuffer, receivedIndices, lostIndices, rowsPerPacket)
a6901f365967618923b42dfca41802ab7fe3b97c
59,144
def _consolidate_descriptive_type(descriptive_type: str) -> str: """ Convert type descriptions with "or" into respective type signature. "x or None" or "None or x" -> "Optional[x]" "x or x" or "x or y[ or z [...]]" -> "Union[x, y, ...]" Args: descriptive_type: Descriptions of an item's type. Returns: Type signature for descriptive type. """ types = descriptive_type.split("or") if len(types) == 1: return descriptive_type types = [pt.strip() for pt in types] if len(types) == 2: if types[0] == "None": return f"Optional[{types[1]}]" if types[1] == "None": return f"Optional[{types[0]}]" return f"Union[{','.join(types)}]"
eca98d201fd2dc71ab9d485e967c6d505e4b7d36
59,150
def flat_header_val_to_dict(header_val): """ Transform a header string of comma separated parameters into a dict """ val_dict = {} val_comps = header_val.rsplit(',') if len(val_comps): for val_comp in val_comps: key, sep, val = val_comp.partition("=") if sep != "=": return {} key = key.strip() val = val.strip() val = val.strip('"') if key in val_dict: if isinstance(val_dict[key], list): val_dict[key].append(val) else: val_dict[key] = [val_dict[key], val] else: val_dict[key] = val return val_dict
accf4ff78fa9fb5ee2bbb1321c26d7e313add11b
59,162
def unique_timestamps(data): """ Identify unique timestamps in a dataframe :param data: dataframe. The 'Time' column is used by default :returns: returns a numpy array """ unique_timestamps = data['Time'].unique() return unique_timestamps
0b55cf21bb26c703fc3c7ce310e0b4bb891786b2
59,163
def filter_listkey_args(**kwargs): """ Filter pagination-related keyword arguments. Parameters: **kwargs: Arbitrary keyword arguments. Returns: dict: Keyword arguments relating to ListKey pagination. """ listkey_options = ['listkey_count', 'listkey_start'] listkey_args = {k: kwargs[k] for k in kwargs if k in listkey_options} return listkey_args
af65204eabc1b6442ec254bea153d07532058417
59,165