content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from pathlib import Path import tempfile import pathlib def tempfile_path() -> Path: """Fixture that returns the tempfile path.""" prefix = tempfile.template file_path = pathlib.Path(tempfile.gettempdir()) / pathlib.Path( f"{prefix}nbpreview_link_file" ) return file_path
a19519bc36ff08ada4ae951eea598f67386c2f50
24,276
def load_pizza_data(source): """ This function loads a text file as dataset from a source path. The data structure is supposed to be like: <15 4 2 3 5 6> Parameters ---------- source : String Global path to text file to be loaded. Returns ------- participants : int Number of people to be ordered for. n_types : int Number of pizza types available. n_type_slices : list[int] Number of slices per pizza type. """ print('Load data: {}'.format(source)) with open(source, 'r') as file: content = file.read().splitlines() # Read first line line = content[0].split(' ') participants = int(line[0]) n_types = int(line[1]) # Read second line line = content[-1].split(' ') n_type_slices = [int(val) for val in line] # Check validity of input file if len(n_type_slices) != n_types: raise ValueError('Input file corrupted!') return participants, n_types, n_type_slices
e032619dc01e021f06a9c4eafe2e9f9353bbb66b
24,277
def russian(a, b): """ The Russian Peasant Algorithm: Multiply one integer by the other integer. Input: a, b: integers Returns: a*b """ c = 0 while a > 0: if a % 2 == 1: c = c + b b = b << 1 a = a >> 1 return c
1ef601dbeced430ca7d09093ad7338319bb128d4
24,278
def create_study_params(file_path): """ This function creates parameters based on a daggity information. :param file_path: path to daggity.txt :return: json with study design. """ f = open(file_path, "r") rows = f.read().split("\n") rows = rows[1:-2] nodes = [] outgoing = [] incoming = [] def_parmas = { "constant": False, "distribution": "normal", "mean": 0, "std": 1, "boarders": (-1, 1)} exposures = [] outcomes = [] for row in rows: values = row.split(" ") if values[1][0] not in ["-", "<"]: if "outcome" in values[1]: outcomes.append(values[0]) elif "exposure" in values[1]: exposures.append(values[0]) nodes.append(values[0]) else: outgoing.append(values[0]) incoming.append(values[2]) rows_params = {"exposures": {}, "outcome": {}, "variables": {}, "dependencies": {}} for node in nodes: if node in exposures: rows_params["exposures"][node] = { "gamma": 1, "tau": 1, "treatment_effect": 1} elif node in outcomes: rows_params["outcome"] = { "name": node, "X_0": 0, "sigma_b": 0.1, "sigma_0": 0.1, "boarders": (-1, 1)} else: rows_params["variables"][node] = def_parmas rows_params["dependencies"] = {"{} -> {}".format(out_edge, in_edge): 1 for out_edge, in_edge in zip(outgoing, incoming)} return rows_params
98d47bd67964d3da4e5a19463c76421624ad254d
24,279
def is_number(s): """ Return True if given str is a number, either integer or float, False otherwise. @param --- `s` A symbol in string """ try: float(s) return True except ValueError: return False
cefa6678ac24e3b917b18f5061ad5c15886cbe2f
24,280
import uuid def make_snonce(): """Returns a unique 32 character string""" return str(uuid.uuid4()).replace('-', '')
23827a303caf8a1f24eed1b5fb5e6ce3deff4966
24,281
def spearman_correlation(itr1, itr2): """ Calculates the correlation between two lists. :param itr1: first iterator :param itr2: second iterator :return: Spearman correlation (from 0 to 1) """ dict1 = {element: index for index, element in enumerate(itr1)} dict2 = {element: index for index, element in enumerate(itr2)} sqr_dists_sum = sum(map(lambda diff: diff ** 2, map(lambda element: dict1[element] - dict2[element], itr1))) return 1 - ((6 * sqr_dists_sum) / (len(dict1) * (len(dict1) - 1)))
e491c0d82e015905f5d276348c0ea42f4b2ccfc7
24,282
def service(app): """Vocabularies service object.""" return app.extensions['invenio-vocabularies'].service
d440e300581fdf71a2f36c8f00c6f0e163f646f0
24,284
import time def _get_event_file_active_filter(flags): """Returns a predicate for whether an event file load timestamp is active. Returns: A predicate function accepting a single UNIX timestamp float argument, or None if multi-file loading is not enabled. """ if not flags.reload_multifile: return None inactive_secs = flags.reload_multifile_inactive_secs if inactive_secs == 0: return None if inactive_secs < 0: return lambda timestamp: True return lambda timestamp: timestamp + inactive_secs >= time.time()
6d8faeac218293643f0df9d9ffe198d24323ba52
24,285
import numpy def equalize(levels, within_factor=3.0): """ makes levels all within a factor of within of each other """ new_levels = [] largest = 0.0 for idx in range(len(levels)): new_levels.append(numpy.abs(levels[idx])) if new_levels[idx] < 0.0001: new_levels[idx] = 0.0001 largest = max(largest, new_levels[idx]) least_allowed = largest / within_factor loops = 0 for idx in range(len(new_levels)): while new_levels[idx] < least_allowed and loops < 100: loops += 1 new_levels[idx] *= 2.0 return new_levels
968e252ecdfe3834a3b790e4d9b976f5ff523922
24,286
from sys import version import requests import platform def data_from_petscan(psid, total): """Run the specified PetScan query and return a list of files.""" params = { "format": "json", "output_compatibility": "catscan", "sparse": "on", "psid": psid, "output_limit": total, } headers = { "user-agent": ( "AntiCompositeBot {version} on Toolforge, " "(commons:User:AntiCompositeNumber) " "Requests/{requests_version} Python/{python_version}" ).format( version=version, requests_version=requests.__version__, python_version=platform.python_version(), ) } r = requests.get("https://petscan.wmflabs.org/", params=params, headers=headers) r.raise_for_status() files = r.json()["*"][0]["a"]["*"] return files
ed61febc5fc4a2b3c34531d16247b2b46eab6459
24,287
def rightRow(r, c): """ >>> rightRow(5, 4) [(5, 4), (5, 5), (5, 6), (5, 7), (5, 8)] """ x = [r, ] * (9-c) y = range(c, 9) return zip(x, y)
476ad7f757162c0bd70c41f33564ee8ddf307547
24,288
def _make_controlled_gate_proto(program_proto, control_qubits, control_values): """Turn a gate proto (from above) into a controlled gate proto. inserts control_qubits and control_values into gate args map. """ t = program_proto.circuit.moments[0].operations[0] t.args['control_qubits'].arg_value.string_value = control_qubits t.args['control_values'].arg_value.string_value = control_values return program_proto
4cfb590d7fdccb669a12681d71d0ab98206d4bf6
24,292
import os def get_path(root, nside, ipix): """ Return path of file for given root directory, nside and ipix """ dir_idx = (ipix/10000)*10000; return os.path.join(root, "nside%d/dir%d/npix%d.csv" % (nside, dir_idx, ipix))
486bbbbbc634b080ef0523f72e5fe7d029dbe0eb
24,294
def get_uuids(things): """ Return an iterable of the 'uuid' attribute values of the things. The things can be anything with a 'uuid' attribute. """ return [thing.uuid for thing in things]
c71a659c96ea0bd7dbb22a92ccca46e29fa072b8
24,295
import math def addEdge(start, end, edge_x, edge_y, lengthFrac=1, arrowPos=None, arrowLength=0.025, arrowAngle=30, dotSize=20): """ Parameters --------- start, end: lists defining start and end points edge_x, edge_y: lists used to construct the graph arrowAngle: int, angle etween lines in arrowhead in degrees arrowLength: float, lenght of lines in arrowhead arrowPos: None, 'middle' or 'end', deines where the arrowhead should be postioned dotSize is the plotly scatter dot size you are using (used to even out line spacing when you have a mix of edge lengths) Adds edge of arrow to list of edges """ # Get start and end cartesian coordinates x0, y0 = start x1, y1 = end # Incorporate the fraction of this segment covered # by a dot into total reduction length = math.sqrt((x1-x0)**2 + (y1-y0)**2) dotSizeConversion = .0565/20 # length units per dot size convertedDotDiameter = dotSize * dotSizeConversion lengthFracReduction = convertedDotDiameter / length lengthFrac = lengthFrac - lengthFracReduction # If the line segment should not cover the entire distance, # get actual start and end coords skipX = (x1-x0)*(1-lengthFrac) skipY = (y1-y0)*(1-lengthFrac) x0 = x0 + skipX/2 x1 = x1 - skipX/2 y0 = y0 + skipY/2 y1 = y1 - skipY/2 # Append line corresponding to the edge edge_x.append(x0) edge_x.append(x1) # Prevents a line being drawn from end of this edge to start of next edge edge_x.append(None) edge_y.append(y0) edge_y.append(y1) edge_y.append(None) # Draw arrow if arrowPos is not None: # Find the point of the arrow; assume is at end unless told middle pointx = x1 pointy = y1 eta = math.degrees(math.atan((x1-x0) / (y1-y0))) if y1 != y0 else 90.0 if arrowPos == 'middle' or arrowPos == 'mid': pointx = x0 + (x1-x0)/2 pointy = y0 + (y1-y0)/2 # Find the directions the arrows are pointing signx = (x1-x0)/abs(x1-x0) if x1 != x0 else +1 # verify this once signy = (y1-y0)/abs(y1-y0) if y1 != y0 else +1 # verified # Append first arrowhead dx = arrowLength * math.sin(math.radians(eta + arrowAngle)) dy = arrowLength * math.cos(math.radians(eta + arrowAngle)) edge_x.append(pointx) edge_x.append(pointx - signx**2 * signy * dx) edge_x.append(None) edge_y.append(pointy) edge_y.append(pointy - signx**2 * signy * dy) edge_y.append(None) # And second arrowhead dx = arrowLength * math.sin(math.radians(eta - arrowAngle)) dy = arrowLength * math.cos(math.radians(eta - arrowAngle)) edge_x.append(pointx) edge_x.append(pointx - signx**2 * signy * dx) edge_x.append(None) edge_y.append(pointy) edge_y.append(pointy - signx**2 * signy * dy) edge_y.append(None) return edge_x, edge_y
235ad7848c41568db1330691ec4ef015b53b2745
24,297
def cfg_chk(func_call, cfg_dict): """Function: cfg_chk Description: Checks the configuration of a database for replication. Arguments: (input) func_call -> Method call to return config settings. (input) cfg_dict -> Dictionary of configuration items to check. (output) cfg_flag -> True or False - Configuration passes. """ cfg_dict = dict(cfg_dict) cfg_flag = True cls_cfg_dict = func_call() for item in cfg_dict: if item in cls_cfg_dict: # Does server config not match class config if cfg_dict[item] != cls_cfg_dict[item]: # Read_only will produce a warning, everything else an error if item == "read_only": print("Warning: {0} not set for slave.".format(item)) else: cfg_flag = False print("Error: {0} not set correctly.".format(item)) else: cfg_flag = False print("Error: Missing option in class.") return cfg_flag
c9e8409792bc65deadb1c7c640d35ae12abb6e22
24,299
def summary_table(counts): """Takes a dictionary of dbnames and counts and returns at table""" # Filter for only wikis with non-zero counts entries = {key: value for key, value in counts.items() if value != 0} # Sum the per-wiki counts and count the wikis total_pages = sum(entries.values()) total_wikis = len(entries) # Return all that as a dict. return dict(entries=entries, total_pages=total_pages, total_wikis=total_wikis)
1c071deba2ff8574135d76a4f01b4ddcc8f40fe4
24,300
def all_black_colors(N): """ Generate all black colors """ black = [(0, 0, 0) for i in range(N)] return black
191a6211caa58b88c62fb327b18d8eef023b74c4
24,301
def growTracked(mesh, growSet, allSet): """ Grow a set of verts along edges and faces This function takes a set of vertices and returns two dicts. One that is vert:(edgeAdjVerts..), and another that is vert:(faceAdjVerts..) While I'm growing a vert, if all vertices adjacent to that vert are matched, then I no longer need to grow from there. Args: growSet: A set of Vertex objects to grow. This set will have all useless verts removed and will be changed in-place. allSet: A set of Vertex objects to exclude from the growth Returns: edgeDict: A dictionary with verts as keys and all edge adjacent vertices as the value faceDict: A dictionary with verts as keys and all face adjacent vertices as the value newGrowSet: The original growSet with all used-up vertices removed """ edgeDict = {} faceDict = {} # Verts that are grown, but have no adjacent verts # that aren't in the allSet rem = [] for vert in growSet: edgeFound = False faceFound = False for eadj in mesh.adjacentVertsByEdge(vert): if eadj not in allSet: edgeFound = True edgeDict.setdefault(eadj, []).append(vert) for fadj in mesh.adjacentVertsByFace(vert): if fadj not in allSet: faceFound = True faceDict.setdefault(fadj, []).append(vert) if not edgeFound and not faceFound: rem.append(vert) newGrowSet = growSet - set(rem) return edgeDict, faceDict, newGrowSet
8930e0b5579ce9a009d71340aeed7366184189dc
24,302
def map_s3_path(bucket_prefix: str, request_path: str) -> str: """Map a request URL to an S3 bucket key.""" # decompose the path into the project and whether it is a /v/ edition or # not parts = request_path.split("/") project_name = parts[0].lower() if (len(parts) >= 3) and parts[1].lower() == "v": edition_name = parts[2] edition_path = "/".join(parts[3:]) else: edition_name = "__main" # default edition edition_path = "/".join(parts[1:]) # if edition_path == "" or edition_path.endswith("/"): if request_path.endswith("/"): edition_path = f"{edition_path}index.html" if bucket_prefix == "": path_parts = [project_name, "v", edition_name, edition_path] else: path_parts = [ bucket_prefix, project_name, "v", edition_name, edition_path, ] bucket_path = "/".join(path_parts) bucket_path = bucket_path.rstrip("/") # happens if edition_path is "" return bucket_path
b76ca9d3403f62d95fde679ea474fd09a0eb35d3
24,303
import logging def setup_logger(name, log_file, formatter, level=logging.WARNING): """Method that is used for setting up multiple loggers.""" handler = logging.FileHandler(log_file) handler.setFormatter(formatter) log = logging.getLogger(name) log.setLevel(level) log.addHandler(handler) return log
70cedb1de0e4fc30613ec8fda25c5cfc6ba89336
24,306
import time def timedcall(executable_function, *args, **kwargs): """! @brief Executes specified method or function with measuring of execution time. @param[in] executable_function (pointer): Pointer to a function or method that should be called. @param[in] *args: Arguments of the called function or method. @param[in] **kwargs: Arbitrary keyword arguments of the called function or method. @return (tuple) Execution time and result of execution of function or method (execution_time, result_execution). """ time_start = time.perf_counter() result = executable_function(*args, **kwargs) time_end = time.perf_counter() return time_end - time_start, result
48bff3a1123d1cd3d18f92882baf1464a839b54c
24,308
import time def getlocaltime(): """格式化当前时间 :return: 字符串格式的当前调用时间 """ datetime = time.strftime( "%Y-%m-%d %H:%M:%S", time.localtime(time.time())).encode("utf-8") return datetime
88d57b1859d17ca16618feceb65325c4dfec0cde
24,309
def baraffe_model(request): """Fixture for baraffe model.""" return request.param
d96d471e1aeaf42a7537363e1218889302fb9395
24,310
import hashlib def generate_aes_key(token, secret): """ Generates and returns a 256 bit AES key, based on sha256 hash. """ return hashlib.sha256(token + secret).digest()
f058402ad893c614c9cde8e9c96a2fdb2ff9ea95
24,312
def parse_questions_answered(json): """ Parse the statement to extract answered questions. :param json: A statement containing answered questions. :type json: dict(str, list) :return: The question id of the answered question. :rtype: str """ edges = json['edges'] question_id = {} for question in edges: temp_link = question['node']['statement']['object']['id'] question_id[(temp_link.split('id=')[2])] = question['node']['statement']['result']['success'] return question_id
65666e4a40b6e7e0680ceed03ad55789507ebb8d
24,314
def get_execution_dates(dag, execution_date, future, past): """Returns dates of DAG execution""" latest_execution_date = dag.get_latest_execution_date() if latest_execution_date is None: raise ValueError(f"Received non-localized date {execution_date}") # determine date range of dag runs and tasks to consider end_date = latest_execution_date if future else execution_date if 'start_date' in dag.default_args: start_date = dag.default_args['start_date'] elif dag.start_date: start_date = dag.start_date else: start_date = execution_date start_date = execution_date if not past else start_date if dag.schedule_interval == '@once': dates = [start_date] elif not dag.schedule_interval: # If schedule_interval is None, need to look at existing DagRun if the user wants future or # past runs. dag_runs = dag.get_dagruns_between(start_date=start_date, end_date=end_date) dates = sorted({d.execution_date for d in dag_runs}) else: dates = dag.date_range(start_date=start_date, end_date=end_date) return dates
f4757e5550bf54a45a5da7a77041c4ee1beeea3d
24,315
def extend_sequence(sequence, item): """ Simply add a new item to the given sequence. Simple version of extend_distribution_sequence, since not using distributions here makes the equalization of distributions in sequence irrelevant. """ sequence.append(item) return sequence
da94b67c883e746e3b04629185df2965bc84f3c9
24,317
import os import shutil import subprocess def clone_git(url, dir_name = None, tag = None, reclone = False): """ url: url of the git repository to clone dir_name: name of the folder to give to the repository. If not given, the git repository name is used tag: allows to checkout a specific commit if given reclone: overwrite existing repo """ old_dir = os.getcwd() if dir_name is None: dir_name = os.path.split(url)[1] #use git repo name dir_name = os.path.splitext(dir_name)[0] #remove ".git" if present if reclone and os.path.exists(dir_name): shutil.rmtree(dir_name) if not os.path.exists(dir_name): command = "git clone %s %s" % (url, dir_name) subprocess.run(command, shell = True) os.chdir(dir_name) if tag is not None: command = "git checkout %s" % tag subprocess.run(command, shell = True) git_path = os.path.join(os.getcwd()) os.chdir(old_dir) return git_path
5e032c9ec61862dc0776d46297ba4ba2e9f46278
24,318
def find_data_path(bone_name_list, data_path): """find bone_name from from data_path""" for x in bone_name_list: if data_path == 'pose.bones["' + x + '"].location': return True, x return False, ""
8d5f1a8791c7efc4c0ad997af65ac640a1458808
24,319
import torch def similarity(d_vectors): """ input i.e (14000, 256) """ nominator = torch.mm(d_vectors, d_vectors.T) lengths = d_vectors.norm(dim=-1) denominator = torch.mm(lengths.unsqueeze(0).T, lengths.unsqueeze(0)) return nominator/(denominator + 1e-6)
fd8fd379501a1b421959613664a342369142c0ba
24,320
import logging def round_filters(filters, config): """Returns rounded number of filters based on width coefficient.""" width_coefficient = config.width_coefficient min_depth = divisor = 8 orig_filters = filters if not width_coefficient: return filters filters *= width_coefficient min_depth = min_depth or divisor new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_filters < 0.9 * filters: new_filters += divisor logging.info("round_filter input=%s output=%s", orig_filters, new_filters) return int(new_filters)
73a2b6aaf0a86c6e57344534233349d6644d1726
24,323
import json def parseMatchMS(s): """ Takes a JSON response from a matchMS item type, returns answers :param s: the json structure with responses :return: answer string """ target = None source = None answerlist = [] try: RespDict = json.loads(s) for element in RespDict: #print (type(element)) for k,v in element.items(): if(k=='source'): source=v elif(k=='target'): target=v answerlist.append("{}-{}".format(source, target)) except: return None return answerlist
26059feed0b48b6fc0f8d59488c97baef625b95d
24,325
def is_identical_typedateowner(ev1, ev2): """Check if two events are identical in type, date, and owner. Args: ev1: roxar event ev2: roxar event Returns: True if the two events are identical in type, date, and owner; otherwise False. """ ident = False if ev1.type == ev2.type: if ev1.date == ev2.date: if ev1.owner == ev2.owner: ident = True return ident
13490e918abd38161324836dbe002cee552b86b2
24,327
def get_ele_list_from_struct(struct): """ Get elements list from pymatgen structure objective Parameters ---------- struct : pymatgen objective The structure Returns ------- ele_list : [str] The list of elements """ ele_list = [] for ele in struct.species: ele_list.append(str(ele)) return ele_list
4a6fa05518ff0725d85b270b8d8b0003eb139e5e
24,328
import subprocess import sys def install(package: str) -> bool: """ Allows for dynamic runtime installation of python modules. IMPORTANT: Modules specified will be installed from source, meaning that `package` must be a path to some `.tar.gz` archive. Args: package (str): Path to distribution package for installation """ try: subprocess.check_call([sys.executable, "-m", "pip", "install", package]) return True except: return False
82dea16b5ef6fc6af71f640b6227ad1874db904a
24,329
def get_word_root(arbitrary_node): """Get the word root corresponding to this word. The XML files are grouped such that every word in the same file has the same word root. We therefore do not need any specific node for this function. A minimal example: <vortaro> <kap><rad>salut</rad></kap> </vortaro> """ assert arbitrary_node != None tree = arbitrary_node.getroottree() return list(tree.iter('rad'))[0].text
d3891adedef806c61053cd15f441cbd430127259
24,332
def sample_parameters(): """Return sample positional and keyword arguments.""" return [1, "2"], {"k": 1, "v": "2"}
0cd97bfd1923a7339daf9d186233821de9386fa1
24,334
def params_from_filename(filename): """ Return the parameters inferred from the input filename (based on TBRIDGE formatting).""" filename_no_path = filename.split("/")[len(filename.split("/")) - 1] splits = filename_no_path.split("_") splits = splits[1:len(splits) - 1] params = [] for split in splits: low_high = split.split("-") params.append((float(low_high[0]) + float(low_high[1])) / 2) return params
1b6ddf58e562d42dd7cfae5ed6ff811942341b8b
24,336
from typing import Dict from typing import List def convert_by_vocab(vocab: Dict, items: List) -> List: """Converts a sequence of [tokens|ids] using the vocab.""" output = [] for item in items: output.append(vocab[item]) return output
440f1936baafae06a4a38c205e877e71a7ed37e9
24,337
def get_iou_th_min_th_dila_from_name(folder, mode='str'): """ This function gets the iou_th, min_th and dila values from the folder name :param: folder: The folder name of interest for extraction :param: mode: The format of extraction, currently support either 'str' or 'num' """ iou_th = folder[folder.find('iou_th_')+7:].split('_')[0] min_th = folder[folder.find('min_th_')+7:].split('_')[0] dila = folder[folder.find('dila_')+5:].split('_')[0] if mode is not 'num' and mode is not 'str': exit('Your mode in get_iout_min_th_dila_from_name should be either str or num!! reset the argument pls!') if mode is 'num': return float(iou_th), int(min_th), int(dila) return iou_th, min_th, dila
8dfa3b3b58dc15f72ccb51cd7076c4bc9bd9cf9f
24,338
def ln_adjoint_source_to_structure(py, adjoint_directoy, base_directory): """ cp the adjoint sources to the SEM folders. """ script = f"ibrun -n 1 {py} -m seisflow.scripts.shared.ln_adjoint2sem --adjoint_directoy {adjoint_directoy} --base_directory {base_directory}; \n" return script
4457fc157b2c665882e5e918358259b30888efe6
24,339
from typing import OrderedDict def extractKeyIndexTimeValue(dic={0 : 0}, frameRange=(0, 30), **kwargs): """this is extract animCurve's time and value in any frame range. Parameters ---------- dic : dict(in int or float) key = time, value = value. frameRange : tupple(in int or float) input spicifid range of frameRange. Returns ------- dict ectracted keyframe has index, time and value dictonary. """ range_idtv = OrderedDict() st, end = frameRange idx = 0 for t, v in dic.items(): if st <= t and t <= end : range_idtv[idx] = { t : v } idx += 1 return range_idtv
27cca6ffe38026a4d4b9b5fc732e7c2aa9347e6d
24,340
def GetChromeEnvironment(): """Returns the environment for Chrome to run in headless mode with Xvfb.""" return {'DISPLAY': 'localhost:99'}
65cedbc2d9019fe4d439374d52b074208c34e91d
24,341
def _is_cardinal_token(token, lang_data): """Checks if the given token is a cardinal number and returns token""" if token in lang_data.all_numbers: return token return None
1e561a6f7c886349474a25fa6bb71e0db21ccfbe
24,342
import sys def unicode_string(x): """ When we want unicode strings (e.g. translated exception messages) to appear in an Exception, we must first encode them using a non-strict errorhandler. Because the message of an Exception may not be a unicode string. """ return str(x).encode(sys.getdefaultencoding(), 'backslashreplace') # Python 2.6.6 said "Error in formatting: encode() takes no keyword arguments" #~ return unicode(x).encode(errors='backslashreplace')
1976e4930a34e463ab5be516b56fda5234bf6035
24,345
from typing import List def get_factories_list(db_cnxn) -> List[str]: """ gets distinct factories """ cursor = db_cnxn.cursor() sql = "select distinct factory_id from dbo.classified_widgets" data = [] for row in cursor.execute(sql): data.append(row.factory_id) return data
1bfe6055b21dfd21ebd80cb804f1355f173bac84
24,346
def get_direction(direction_id): """Return boolean direction id""" if direction_id == 'inbound': return 0 elif direction_id == 'outbound': return 1 else: raise ValueError("Cannot determine direction from %s" % direction_id)
ddfd06b4b97ac5f4c78b1eead69613204d15e504
24,349
import torch def image_cdf(img): """ computing the cumulative distribution function of an image input: img: image tensor """ # calculate the histogram hist = torch.histc(img, bins=256, min=0, max=255) # normalize the histogram hist = hist.float() / hist.sum() # cumulative distribution function cdf = torch.cumsum(hist, 0) return cdf
ca22d0517d4951a5e0f8d7d11ff98259fe675792
24,350
def float_chk(list1): """ In python 2 versions, at least one of the lists will need to be in floats (not ints) to ensure correct calculation. """ for i, j in enumerate(list1): list1[i] = float(j) return list1
74013a432d1eae96260e6de69495eb1dce3274c3
24,352
def Union(List1,List2): """Returns union of two lists""" #----------------------------- # Copy List 1 Into Return List #----------------------------- # Put a copy of List 1 into ReturnList. This gives us the base list to # compare against. ReturnList = List1[:] #--------------------------- # For Each Element In List 2 #--------------------------- for Element in List2: #----------------------------------------- # Append Element To Return List If Missing #----------------------------------------- # If Element isn't in the return list, append it. if not Element in ReturnList: ReturnList.append(Element) #----------------- # Return To Caller #----------------- # The return list now contains a union of list 1 and list 2. return ReturnList
a0a28a059eb8efc868f7339f4ddb13db2177f5d2
24,354
def print_matrix(m, rownames, colnames): """Pretty-print a 2d numpy array with row and column names.""" # Max column width: col_width = max([len(x) for x in rownames + colnames]) + 4 # Row-formatter: def fmt_row(a): return "".join(map((lambda x : str(x).rjust(col_width)), a)) # Printing: print(fmt_row([''] + colnames)) for i, rowname in enumerate(rownames): row = [rowname] for j in range(len(colnames)): row.append(round(m[i, j], 2)) print(fmt_row(row))
83b9f6ca8a2683398344c57c0bbd509a776cffa3
24,355
def settingsLink(translator, settings, tag): """ Render the URL of the settings page. """ return tag[translator.linkTo(settings.storeID)]
413e2ec27a27e77b1850f47f20af45ddbb8d4f6f
24,356
import tarfile from typing import Optional def _cache_filter(tar_info: tarfile.TarInfo) -> Optional[tarfile.TarInfo]: """ Filter for ``tarfile.TarFile.add`` which removes Python and pytest cache files. """ if '__pycache__' in tar_info.name: return None if tar_info.name.endswith('.pyc'): return None return tar_info
5dd54cc8c2532cc89bcf2e357803e9036ce18322
24,357
import base64 def b64_decode(s): """ Decode input string with base64url safe without padding scheme. """ mod = len(s) % 4 if mod >= 2: s += (4 - mod) * "=" return base64.urlsafe_b64decode(s)
e82a6cbffe30b9a27bdd0a58c7f7bf11cc4066f4
24,359
def _cast_types(args): """ This method performs casting to all types of inputs passed via cmd. :param args: argparse.ArgumentParser object. :return: argparse.ArgumentParser object. """ args.x_val = None if args.x_val == 'None' else int(args.x_val) args.test_size = float(args.test_size) args.max_depth = int(args.max_depth) args.learning_rate = float(args.learning_rate) args.n_estimators = int(args.n_estimators) args.verbosity = int(args.verbosity) # objective. # booster. # tree_method. args.n_jobs = int(args.n_jobs) args.gamma = float(args.gamma) args.min_child_weight = int(args.min_child_weight) args.max_delta_step = int(args.max_delta_step) args.subsample = int(args.subsample) args.colsample_bytree = float(args.colsample_bytree) args.colsample_bylevel = float(args.colsample_bylevel) args.colsample_bynode = float(args.colsample_bynode) args.reg_alpha = float(args.reg_alpha) args.reg_lambda = float(args.reg_lambda) args.scale_pos_weight = float(args.scale_pos_weight) args.base_score = float(args.base_score) args.random_state = int(args.random_state) args.missing = None if args.missing == 'None' else float(args.missing) return args
5db5f9569849a868bc180e38bd9bcec618b7cb5d
24,360
def rgb_to_hsv(rgb): """ Convert an RGB color representation to an HSV color representation. (r, g, b) :: r -> [0, 255] g -> [0, 255] b -> [0, 255] :param rgb: A tuple of three numeric values corresponding to the red, green, and blue value. :return: HSV representation of the input RGB value. :rtype: tuple """ r, g, b = rgb[0] / 255, rgb[1] / 255, rgb[2] / 255 _min = min(r, g, b) _max = max(r, g, b) v = _max delta = _max - _min if _max == 0: return 0, 0, v s = delta / _max if delta == 0: delta = 1 if r == _max: h = 60 * (((g - b) / delta) % 6) elif g == _max: h = 60 * (((b - r) / delta) + 2) else: h = 60 * (((r - g) / delta) + 4) return round(h, 3), round(s, 3), round(v, 3)
aa2c75b92c9830c7e798b0ca6ee9feac20793de4
24,361
import sys def handle_magic_line(line, cell, parser, namespace=None): """ Helper function for handling magic command lines given a parser with handlers set. """ args = parser.parse(line, namespace) if args: try: return args.func(vars(args), cell) except Exception as e: sys.stderr.write(str(e)) sys.stderr.write('\n') sys.stderr.flush() return None
86e25bf31ed618ba598a51817b283004f162da7c
24,363
import logging import re def get_landsat_angles(productdir): """ Get Landsat angle bands file path. Parameters: productdir (str): path to directory containing angle bands. Returns: sz_path, sa_path, vz_path, va_path: file paths to solar zenith, solar azimuth, view (sensor) zenith and vier (sensor) azimuth. """ img_list = list(productdir.glob('**/*.tif')) logging.info('Load Landsat Angles') pattern = re.compile('.*_solar_zenith_.*') sz_path = list(item for item in img_list if pattern.match(str(item)))[0] pattern = re.compile('.*_solar_azimuth_.*') sa_path = list(item for item in img_list if pattern.match(str(item)))[0] pattern = re.compile('.*_sensor_zenith_.*') vz_path = list(item for item in img_list if pattern.match(str(item)))[0] pattern = re.compile('.*_sensor_azimuth_.*') va_path = list(item for item in img_list if pattern.match(str(item)))[0] return sz_path, sa_path, vz_path, va_path
8b22cb6fc1ea8ae3f3869664aff2f165c418f20c
24,364
import json def save_output(data, filename): """ Method to save script results to a JSON object :param data: Data that is to be written to a file :return: 0 (void) """ results = {"List": data} with open(filename, "w+") as fp: json.dump(results, fp) return 0
509d0281a54b609fad8989ce1378482ae6a52ceb
24,365
import logging def load_log_config(): """ Configure logging @return: """ root = logging.getLogger() root.setLevel(logging.INFO) return root
d4d410dd94227441bee45b7f0c6470544aba00cc
24,367
import time def roundtime(seconds, rounding=None): """ Round the provided time and return it. `seconds`: time in seconds. `rounding`: None, 'floor' or 'ceil'. """ Y, M, D, h, m, s, wd, jd, dst = time.localtime(seconds) hms = {None: (h, m, s), 'floor': (0, 0, 0), 'ceil': (23, 59, 59)} h, m, s = hms[rounding] seconds_rounded = time.mktime((Y, M, D, h, m, s, wd, jd, dst)) return seconds_rounded
8cbbc0c8e684e8ea6d1f58e8f9ffc2a40e185b06
24,368
def getShifts(filename): """ Each line is the file should be of the form: 'start0:7 start1:9 nToShift:1' """ lines = open(filename).readlines() shifts = [] for l in lines: l = l.strip().split() if l: shifts.append([int(x.split(':')[-1]) for x in l]) return shifts
e30f2293ebcc2b52ebcd589e5d88f33573de0807
24,370
def remove_duplicates(data): """ This function removes duplicates from the interactome :param data: Hash filtered data :return: data with duplicates removed """ uniq_proteins = dict() final_data = [] # Proteins for interaction, row in data.items(): p1, p2 = interaction.split("\t") query = p2 + "\t" + p1 if query in uniq_proteins: pass else: uniq_proteins[interaction] = row # Final data is array of array like the one returned by MySQL for interaction, row in uniq_proteins.items(): final_data.append(row) return final_data
58d10452483ac0a5c4a5f5a64524c8ebb319b970
24,371
def envelope_to_geom(env): """convert envelope array to geojson """ geom = { "type": "Polygon", "coordinates": [ [ env[0], env[1], env[2], env[3], env[0] ] ] } return geom
550765136207b898bac9202847fe8b239a91a20d
24,372
def is_json_object(json_data): """Check if the JSON data are an object.""" return isinstance(json_data, dict)
7b72fc51752f9cfd00ba4815230401f1d40e888f
24,373
import typing def naive_ctz2(x: int) -> typing.Tuple[int, int]: """Count trailing zeros, naively. Args: x: An int. Returns: A tuple (zeros, x >> zeros) where zeros is the number of trailing zeros in x. I.e. (x >> zeros) & 1 == 1 or x == zeros == 0. This is a slow reference implementation for checking correctness. Since it never needs to check the bit_length of x or do any multiplication, it may be acceptably fast for numbers that have a very small number of trailing zeros. As it inherently tracks the remaining significant bits of x, this implementation returns them as well as the number of zeros, hence its name "ctz2". >>> naive_ctz2(0) (0, 0) >>> naive_ctz2(1) (0, 1) >>> naive_ctz2(-1) (0, -1) >>> naive_ctz2(2) (1, 1) >>> naive_ctz2(-2) (1, -1) >>> naive_ctz2(40) # 0b101000 = 2**3 * 5 (3, 5) >>> naive_ctz2(-40) # 0b1..1011000 (3, -5) >>> naive_ctz2(37 << 100) (100, 37) """ if x == 0: return 0, 0 else: zeros: int = 0 while x & 1 == 0: x = x >> 1 zeros += 1 return zeros, x
863998c3c7a8b8bd812910d07a911843f43bc549
24,374
import os import json def load_order(note: str, group: str='original') -> dict: """ Load in a pre-constructed order by default if None is specified. Otherwise, load the order from a .yaml file :param group: Group name to find orders (./orders/group/*.json) :param note: The order note (will become note.json) :return: """ filename = 'espa_validation/orders/{group}/{note}.json'.format(group=group, note=note) if not os.path.exists(filename): raise IOError('File does not exist: {}'.format(filename)) try: return json.load(open(filename)) except Exception as exc: msg = "Problem loading file: {}".format(filename) print(msg) raise
210f71fc5183a559017463737e3134044a6de8c6
24,375
import os def get_profile_name(profile): """ Return a koji profile name. :param str profile: profile name, like "koji" or "cbs", or None. If None, we will use return the "KOJI_PROFILE" environment variable. If we could find no profile name, raise ValueError. :returns: str, the profile name """ if profile: return profile profile = os.getenv('KOJI_PROFILE') if profile: return profile raise ValueError('set a profile "koji" argument for this task, or set ' 'the KOJI_PROFILE environment variable')
dd1e07734cbe8145d632c2a401f53096bb988949
24,376
def safe_tile(tile) -> bool: """Determines if the given tile is safe to move onto.""" if tile["type"] == "Blank": return True if tile["type"] == "Doodah": return True if tile["type"] == "SnakeTail" and tile["seg"] == 0: return True return False
ffdd3ace46dfe094b49c28e56bebe10e82311158
24,378
def _field_column_enum(field, model): """Returns the column enum value for the provided field""" return '{}_{}_COL'.format(model.get_table_name().upper(), field.name.upper())
f0653eb6ccf4c0864a05715643021759cf99e191
24,379
def get_reply_message(message): """Message the `message` is replying to, or `None`.""" # This function is made for better compatibility # with other versions. return message.reply_to_message
b2d253047eb1bdcb037b979f1819089ae88fb771
24,380
from typing import Callable from typing import List import inspect def _get_fn_argnames(fn: Callable) -> List[str]: """Get argument names of a function. :param fn: get argument names for this function. :returns: list of argument names. """ arg_spec_args = inspect.getfullargspec(fn).args if inspect.ismethod(fn) and arg_spec_args[0] == "self": # don't include "self" argument arg_spec_args = arg_spec_args[1:] return arg_spec_args
a464f1fb21a454f4e6a854a64810ba48cdb6f9f8
24,381
import re def _parse_name(seq): """Parse a fasta header and remove > and new lines. If [metadata is True] then parse the prophage metadata from the header. Current metadata: phage-circular and prophage-<start>:<end> """ if not seq: return seq clean = seq.replace(">", "").replace("\n", "") clean = clean.replace("phage-circular", "") match = re.search(r"prophage-\d+:\d+", clean) prophage = match[0] if match else "" return clean.replace(prophage, "").strip(), \ "phage-circular" if "phage-circular" in seq else "", prophage
c87d6ed680a4d3e5200a6d555aac68df24e513b7
24,382
def sqrt(number): """ Calculate the floored square root of a number Args: number(int): Number to find the floored squared root Returns: int: Floored Square Root """ if type(number) != int: return "INVALID INPUT, ENTER AN INTEGER!" start, end = 0, number + 1 while start < end: mid = start + (end - start)//2 if mid*mid == number : return mid elif (end - start) == 1: return start elif mid*mid > number: end = mid else: start = mid
c62d063d5a0cdfe740777f1a45133ddc77a2d40d
24,383
from numpy import indices,sqrt,arctan2,arctan,sin,cos,square,argsort def geo_dictionary(image,X0=1991,Y0=1973,distance=185.8,pixelsize=0.089,polarization=1): """Creates a dictionary containing psi, theta, polfactor, and geofactor in the same shape as image. Also includes X0, Y0, sort_indices, and reverse_indices. sort_indices are used to sort any flattened image in increasing r; reverse_indices are used to reconstruct an image from a flattened, r-sorted array.""" # polarization = 1 for Horiz polarization; -1 for Vert polarization. # Compute quantities in same shape as image row,col = image.shape y_indices,x_indices = indices((row,col)) r = pixelsize*sqrt((y_indices-Y0)**2+(x_indices-X0)**2) psi = -arctan2((y_indices-Y0),(x_indices-X0)) theta = arctan(r/distance)/2 polfactor = (1+square(cos(2*theta))-polarization*cos(2*psi)*square(sin(2*theta)))/2 geofactor = cos(2*theta)**3 # Generate sort_indices and reverse_indices sort_indices = argsort(r.flatten()) reverse_indices = argsort(sort_indices) # Assemble dictionary geo_dict = {'X0':X0,'Y0':Y0,'psi':psi,'theta':theta,\ 'polfactor':polfactor,'geofactor':geofactor,\ 'sort_indices':sort_indices,'reverse_indices':reverse_indices,\ 'rpix':r/pixelsize} return geo_dict
bfeba63fe34d2ffc432657bfc3abee24738c9b45
24,384
def check_registration_quality(quality_metrics_thresholds, sub_quality_metrics): """This function checks whether the registration was correct or not for the subject being evaluated. Args: quality_metrics_thresholds (tuple): registration quality metrics; specifically, it contains (p3_neigh_corr_struct_2_tof, p97_mut_inf_struct_2_tof) sub_quality_metrics (tuple): it contains the registration quality metrics for the subject being evaluated; specifically, (struct_2_tof_nc, struct_2_tof_mi) Returns: registration_accurate_enough (bool): it indicates whether the registration accuracy is high enough to perform the anatomically-informed sliding window """ registration_accurate_enough = False # if the reg_metrics of this subject are not outliers, then the registration was most probably correct if sub_quality_metrics[0] > quality_metrics_thresholds[0] and sub_quality_metrics[1] < quality_metrics_thresholds[1]: registration_accurate_enough = True return registration_accurate_enough
b726e98700acbd21a8ac1c5406492fcf65b5ac04
24,385
def drop_duplicate_fill0(result_dict): """ Drop value-0 from detection result """ labels = result_dict['labels'] num_items = len(labels) label_set = set() keep_index = [] for i in range(num_items): if labels[i] not in label_set: label_set.add(labels[i]) keep_index.append(i) new_result_dict = {} for key in result_dict.keys(): new_result_dict[key] = [] for i in keep_index: value = result_dict[key][i] if value is None: value = 0 new_result_dict[key].append(value) return new_result_dict
c0f8b7c7d44bba044cc803516e1b8bb595bbd409
24,387
import decimal def fix_decimals(obj): """Removes the stupid Decimals See: https://github.com/boto/boto3/issues/369#issuecomment-302137290 """ if isinstance(obj, list): for i in range(len(obj)): obj[i] = fix_decimals(obj[i]) return obj elif isinstance(obj, dict): for key, value in obj.items(): obj[key] = fix_decimals(value) return obj elif isinstance(obj, decimal.Decimal): if obj % 1 == 0: return int(obj) else: return float(obj) else: return obj
51e650e0b27da1a6f8822487f7b52a9c1aac9e39
24,389
def get_primary_key(s3_index_row): """ returns the primary key of the s3Index dynamo table. Args: s3_index_row(dict): dynamo table row for the s3Index Returns: (dict): primary key """ primary = {"object-key": s3_index_row["object-key"], "version-node": s3_index_row["version-node"]} return primary
b5ca8ccb82dd1410626f205f0a8ea5486a2bcb8a
24,390
def compute_iou(bbox0, bboxes0): """ bbox0 is (cx, cy, scale, score, x, y, h, w) last 4 bit is the standard bbox. For this ignore score. """ def iou(boxA, boxB): boxA_area = boxA[2] * boxA[3] boxB_area = boxB[2] * boxB[3] min_x = max(boxA[0], boxB[0]) min_y = max(boxA[1], boxB[1]) endA = boxA[:2] + boxA[2:] endB = boxB[:2] + boxB[2:] max_x = min(endA[0], endB[0]) max_y = max(endA[1], endB[1]) w = max_x - min_x + 1 h = max_y - min_y + 1 inter_area = float(w * h) iou = max(0, inter_area / (boxA_area + boxB_area - inter_area)) return iou return [iou(bbox0[-4:], bbox[-4:]) for bbox in bboxes0]
9d29f90e4fbf57cf2bd6372f8fd608cf77799568
24,391
import csv def read_student_names(file): """If file ends in .csv, read the CSV file where each username should reside in the second column and the first column contains "student". If not, the file should contain several lines, one username per line. Return a list of students thus extracted.""" with open(file, 'r') as f: if not file.endswith(".csv"): return f.read().splitlines() return [row[1] for row in csv.reader(f) if row[0] == "student"]
d52716648bba85e776ef431ce879b8ea37068e8d
24,392
def project_list(d, ks): """Return a list of the values of `d` at `ks`.""" return [d[k] for k in ks]
62b82429e4c8e18f3fb6b1f73de72b1163f460bf
24,393
def list_users(iam_client): """ List all IAM users """ return iam_client.list_users()['Users']
da3509377968d642469e384bd6143783528e7f76
24,394
import click def validate_speed_limit(ctx, param, value): """ The speed limit must be a positive integer greater than 10 km/h (lower values might trigger the car-not-moving oracle """ if int(value) < 10: raise click.UsageError( 'The provided value for ' + str(param) + ' is invalid. Choose a value greater than 10') else: return int(value)
6d35caba192fe2190fcf343a4475f350c1e65fe7
24,396
def snip(content): """ This is a special modifier, that will look for a marker in ``content`` and if found, it will truncate the content at that point. This way the editor can decide where he wants content to be truncated, for use in the various list views. The marker we will look for in the content is {{{snip}}} """ return content[:content.find('{{{snip}}}')] + "..."
6d836f610dfe1b49d4f1971cc3c6a471b789f475
24,397
def build_search_query(query, page, per_page) -> dict: """ Build the multi-search query for Elasticsearch. :param str query: :param int page: :param int per_page: """ search_query = { "query": {"multi_match": {"query": query, "fields": ["*"]}}, "from": (page - 1) * per_page, "size": per_page, } return search_query
f82dfdd11b0ad9bf2e3dfce9d723b46944ed2e53
24,399
def white_dwarfs(ds, data): """ Returns slicing array to determine white dwarf particles """ pcut = data['particle_type'] == 12 pcut = pcut * (data['particle_mass'] > 0.0) return pcut
83217024591af0e6f6918227be68251c452a0a08
24,400
import json def key_dumps(obj): """Return serialized JSON keys and values without wrapping object.""" s = json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': ')) # first and last lines will be "{" and "}" (resp.), strip these return '\n'.join(s.split('\n')[1:-1])
c1b303de6ed34d5308aa2486f2e04a1f6d932bb8
24,401
import os def file_name(path): """Get the file name without directory and file type.""" return os.path.basename(path).split('.')[0]
5be1f2a8bb971e3088c9ae5e91cf53342f6d10c7
24,403
def header_string(headers_dict): """Given a dictionary of headers, return a canonical string representation.""" header_list = [] if "Content-Type" in headers_dict: header_list.append(headers_dict["Content-Type"] + "\n") if "Date" in headers_dict: header_list.append(headers_dict["Date"] + "\n") if "Content-MD5" in headers_dict: header_list.append(headers_dict["Content-MD5"] + "\n") return "".join(header_list)
7099ee7169e9fd2df574c8ce367bb7dfd353ba22
24,404
def list_indexer(lst, index): """:yaql:operator indexer Returns value of sequence by given index. :signature: left[right] :arg left: input sequence :argType left: sequence :arg right: index :argType right: integer :returnType: any (appropriate value type) .. code:: yaql> ["a", "b"][0] "a" """ return lst[index]
c13e7ce88a464ba13f6a114cb58daa0e7e051705
24,405
def append_to(element, to=[]): """ Append an element to a list. If no list is provided, create a new list containing only the element. :param element: the thing to append :param to: the list to append it to :return: the list with the appended thing """ to.append(element) return to
736805dc9ed075ffa78f96868a3a4bb51ae6844a
24,407
def getUnique(df): """Calcualtes percentage of unique reads""" return ( df.loc[df[0] == "total_nodups", 1].values[0] / df.loc[df[0] == "total_mapped", 1].values[0] )
d5922fe59b95ec56c7310dce955ff85d97d6d9f1
24,411
import string import math def line_surprise(model, line, char_domain=string.printable): """Calculate the surprise of observing a line under the given character-from-prefix model. Ignores characters that aren't in char_domain. """ surprise = 0 for i, char in enumerate(line): if char in char_domain: surprise -= math.log(model.conditional_probability(line[:i], char), 2) return surprise
6eb230af72e74c1be3edba8cf909cb46e5086a32
24,412
def no_none_get(dictionary, key, alternative): """Gets the value for a key in a dictionary if it exists and is not None. dictionary is where the value is taken from. key is the key that is attempted to be retrieved from the dictionary. alternative is what returns if the key doesn't exist.""" if key in list(dictionary.keys()): if dictionary[key] is not None: return dictionary[key] else: return alternative else: return alternative
68b6f1d3cdd736f5f373093058e630db039be781
24,413
def similar(real, predicted): """ Compare if the captcha code predicted is close to the real one :param real string: Real captcha string :param predicted string: Predicted captcha string :return wrong_letter_count float: Percentage of wrong letter wrong_letter_dict dict: Dict of all wrong letters as key and a counter of failed as value """ wrong_letter_count = 0 wrong_letter_dict = {} for real_letter, preddicted_letter in zip(real, predicted): if real_letter != preddicted_letter: wrong_letter_dict[real_letter] = \ wrong_letter_dict.get(real_letter, 0) + 1 wrong_letter_count += 1 wrong_letter_count /= len(real) wrong_letter_count = 1.0 - wrong_letter_count return wrong_letter_count, wrong_letter_dict
e5595e34b81e770604b383dbd48906efc5a91173
24,414
import random def data_split(src_list): """ Usage: randomly spliting dataset :param src_list: :return: """ counter_list = random.sample(range(0, len(src_list)), 550) return counter_list
298b24f10482cfa3bb613fe742f84ce5228187a9
24,416
from typing import List import requests def get_github_ips() -> List[str]: """Gets GitHub's Hooks IP Ranges Returns: List of IP Ranges """ resp = requests.get( 'https://api.github.com/meta', headers={ 'User-Agent': 'hreeder/security-group-synchronizer' } ) data = resp.json() return data['hooks']
74a5c4fb62b94dc9bf8cba41cf75da4dbb11f15c
24,417