content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def get_cityscapes_path(): """Returns the path to the cityscapes folder.""" filename = "cityscapes_path.txt" cs_path = "/" # Does a file already exist? if os.path.exists(filename): # Read the path with open(filename) as f: cs_path = f.read() # Ask the user for the actual path user_input = input("Enter path to CityScapes folder [%s]: " % cs_path) # Did the user enter something? if user_input != "": # Yes, update the file with open(filename) as f: f.write(user_input) cs_path = user_input return cs_path
a478ee8f2f93b58a93c452eaa4303465f872a0ba
41,372
def center(): """ center() -> array with x, then y Return the center values of a group's display, these values are suitable to be passed to nuke.zoom as the DAG center point. Like so: center = nuke.center() zoom = nuke.zoom() print center[0] print center[1] ## move DAG back to center point without changing zoom. nuke.zoom( zoom, center ) @return: Array of x, y. """ return list()
d5aa8a21ac8453be734e7fbd85432c50ca17b0f3
41,373
import ipaddress def list_all_available_cidr(jnj_root_cidr_list, allocated_cidr_list, subnet_prefix): """ Find all CIDRs of specified size from the provided top level CIDR list in the region Args: jnj_root_cidr_list: top-level CIDRs allocated to region allocated_cidr_list: CIDRs currently in use in region subnet_prefix: requested CIDR size Returns: locked CIDR """ # Initialize result array available_cidr_list = [] # Iterate through root level CIDRs for cidr in jnj_root_cidr_list: # Cast top-level CIDR string to network objet cidr = ipaddress.IPv4Network(cidr) # If top-level CIDR is smaller than requested CIDR, skip this top-level CIDR if int(cidr.prefixlen) > int(subnet_prefix): continue # Iterate through already allocated CIDRs allocated_cidr_in_master_list = [ipaddress.IPv4Network(cidr_block) for cidr_block in allocated_cidr_list if ipaddress.IPv4Network(cidr_block).overlaps(cidr)] # Divide the top-level CIDR into a CIDRs of the requested size cidr_subnets = list(cidr.subnets(new_prefix=int(subnet_prefix))) # Iterate through theoretical subnets and search for overlap for subnet in cidr_subnets: # Search for overlap with already allocated CIDRs subnet_conflict_flag = False for allocated_cidr in allocated_cidr_in_master_list: if subnet.overlaps(allocated_cidr): subnet_conflict_flag = True break # Found a conflict if subnet_conflict_flag: continue # This subnet has no conflicts, append to list of available subnets else: available_cidr_list.append(subnet.with_prefixlen) # Return results return available_cidr_list
caf84de05b7c8b6a7246062e2f34ce57329cf6b7
41,374
import pydantic def object_is_serializable() -> bool: """Provides compatibility abstraction to define whether type object is serializable or not. """ return pydantic.version.VERSION[:3] >= "1.9"
27fa7539df94a1cd931bbb1099cbe6cad12b957c
41,375
def is_unibipartite(graph): """Internal function that returns whether the given graph is a uni-directional bipartite graph. Parameters ---------- graph : GraphIndex Input graph Returns ------- bool True if the graph is a uni-bipartite. """ src, dst, _ = graph.edges() return set(src.tonumpy()).isdisjoint(set(dst.tonumpy()))
d7603408c4a99c8028a944dd699b0728cce57760
41,376
def bss_host_xname(host): """ Retrieves the xname from the BSS host object """ return host["ID"]
08a05819502815b30c71929ee66613ab164210f3
41,377
from typing import OrderedDict def unique(iterable, key=None): """Return unique elements of an iterable.""" if key: odict = OrderedDict() for element in iterable: odict.setdefault(key(element), element) return list(odict.values()) else: return list(OrderedDict.fromkeys(iterable))
ec20ceb5de991ad8828920eeac060c8651ee0da5
41,379
def preprocess_cpydata(pcpy, gisdata, spatial=True): """ creates input dictionary for initializing CanopyGrid Args: canopy parameters gisdata cmask LAI_pine, LAI_spruce - pine and spruce LAI (m2m-2) LAI_conif - conifer total annual max LAI (m2m-2) LAI_dedid - deciduous annual max LAI (m2m-2) cf - canopy closure (-) hc - mean stand height (m) (lat, lon) spatial """ # inputs for CanopyGrid initialization: update pcpy using spatial data cstate = pcpy['state'].copy() if spatial: cstate['lai_conif'] = gisdata['LAI_conif'] cstate['lai_decid_max'] = gisdata['LAI_decid'] cstate['cf'] = gisdata['cf'] cstate['hc'] = gisdata['hc'] for key in ['w', 'swe']: cstate[key] *= gisdata['cmask'] if {'lat','lon'}.issubset(gisdata.keys()): pcpy['loc']['lat'] = gisdata['lat'] pcpy['loc']['lon'] = gisdata['lon'] else: for key in cstate.keys(): cstate[key] *= gisdata['cmask'] pcpy['state'] = cstate return pcpy
8c45a9d32249dd09ea0eda45da0baa6badf04abf
41,380
def get_longest_string(in_list): """ Get the longest string(s) in a list. :param in_list: list of strings :return: single string if there's only one with the max length, or a list of strings if there are several. """ if len(in_list) == 0: return None max_length = max(len(x) for x in in_list) matches = [x for x in in_list if len(x) == max_length] if len(matches) == 1: return matches[0] else: return matches
ccff7bacf938725dccbee93e52cd6fcbe9064c43
41,382
def numpy(tensor): """Convert a torch.tensor to a 1D numpy.ndarray.""" return tensor.cpu().detach().numpy().ravel()
cdea8e80a6129ba846d9f69dc4825bf574e688ac
41,383
def get_coverage_category(format_keys, indivs): """ Parse a vcf entry and set it's coverage category based on the min depth of all samples """ coverages = [0, 1, 10, 15] depth_cat = None idx = format_keys.index("DP") get_cov = lambda x: None if x[idx] == '.' else int(x[idx]) for min_coverage in coverages: min_covered = True for sample in indivs: data = sample.split(':') cov = get_cov(data) if cov is None: continue if cov < min_coverage: min_covered = False if min_covered: depth_cat = min_coverage return depth_cat
df850514a333a090a96a1abb75aa4b051819b465
41,385
import os def isBZ2(fn): """ Tests whether a file is bz2-compressed. :param fn: a filename :type fn: str :returns: True if fn is bz2-compressed otherwise False """ assert os.path.exists(fn) with open(fn, 'rb') as fi: return fi.read(10) == b'BZh91AY&SY'
13bd08390003565084e73e39ee0e9c8692c5662c
41,386
import glob import os def fetch_data_path(data_dir): """ Fetch all data path :param data_dir: the root folder where data stored :return: data path (pet_path, ct_path, mask_path), dtype: tuple """ data_paths = list() paths_list = glob.glob(os.path.join(os.path.dirname(__file__), data_dir, "*")) for i, subject_dir in enumerate(paths_list): if i < 10: pet_path = os.path.join(subject_dir, "STS_00" + str(i + 1) + "_PT_COR_16.tiff") ct_path = os.path.join(subject_dir, "STS_00" + str(i + 1) + "_CT_COR_16.tiff") mask_path = os.path.join(subject_dir, "STS_00" + str(i + 1) + "_MASS_PET_COR_16.tiff") data_paths.append((pet_path, ct_path, mask_path)) else: pet_path = os.path.join(subject_dir, "STS_0" + str(i + 1) + "_PT_COR_16.tiff") ct_path = os.path.join(subject_dir, "STS_0" + str(i + 1) + "_CT_COR_16.tiff") mask_path = os.path.join(subject_dir, "STS_0" + str(i + 1) + "_MASS_PET_COR_16.tiff") data_paths.append((pet_path, ct_path, mask_path)) return data_paths
e511d173806333fac706f8730fbbd1f301915b53
41,387
def _is_dict_match(got_dict: dict, expected_subdict: dict): """Match values and keys from dict (non recursive).""" for expected_key, expected_value in expected_subdict.items(): if expected_key not in got_dict: return False if got_dict[expected_key] != expected_value: return False return True
8f4f91bd7352b3536a5be2f1a70c1bd36aecbe8a
41,389
import os def find_ion_types_from_OUTCAR(cal_loc="."): """ input arguments: -cal_loc (str): the location of the calculation. Default: "." output: a list of ion types which have the same order as the atomic coordinates. Each entry of the list is a list of length, the first element of which is atomic species and the second of which is the integer number of that species. """ with open(os.path.join(cal_loc, "OUTCAR"), "r") as f: ions_per_type = [] ions_types = [] for line in f: if "ions per type" in line: m = line.split("=")[1].strip().split() assert len(m) > 0, "Error: fail to extract ions per type from the line below:\n%s" % line ions_per_type = [int(item) for item in m] if len(ions_types) == len(ions_per_type): break else: continue if "TITEL" in line: m = line.strip().split()[-2] if "_" in m: m = m.split("_")[0] ions_types.append(m) if len(ions_types) == len(ions_per_type): break return [[ion_type, ions] for ion_type, ions in zip(ions_types, ions_per_type)]
a08a5bf9e5947f2455ba687e8b58df33fabcfa3c
41,390
def calc_node_coords(tiling_edge_list, first_node_offset=0): """ For a single tiling path (tiling_edge_list is a list of edges for a particular contig) calculates the genomic coordinate of every node in the path. In case there are cycles in the tiling path, the existing node's coordinate will be overwritten. `first_node_offset` refers to the length of the first node. If not specified, the contig length should not consider the length of the first node. """ if not tiling_edge_list: return {}, 0 coord_map = {} contig_len = 0 edge0 = tiling_edge_list[0] coord_map[edge0.v] = first_node_offset for edge in tiling_edge_list: if edge.v not in coord_map: raise Exception( 'Tiling path is not in sorted order. Node "{v!r}" does not yet have an assigned coordinate.'.format(v=edge.v)) coord = coord_map[edge.v] coord += abs(int(edge.b) - int(edge.e)) coord_map[edge.w] = coord contig_len = max(contig_len, coord) return coord_map, contig_len
f0d2e310bf68328f4edc4ba35619b7b242d9ff10
41,393
def bytes_to_int(byte_string) -> int: """ :param byte_string: a string formatted like b'\xd4\x053K\xd8\xea' :return: integer value of the byte stream """ return int.from_bytes(byte_string, "big")
932b6cb3e41fa0c1afdae2aa1ca765e64ce44986
41,394
from pathlib import Path def path_type(value) -> Path: """argparse type for converting string into a pathlib.Path object""" p = Path(value) if not p.exists(): raise ValueError(f"Given Path not found: {value}") return p
8b54b1c60cdb312f95c655d72ca542de7afdd826
41,396
import math def li_times(i, epsilon, delta): """ Computes li, the optimal number of times to loop while sampling the equivalence oracle. This li is the reason pac-basis is sooo much faster than the original horn1 algorithm. Parameters: ----------------------------------- i : int Number of times the equivalence oracle has been called already epsilon : float (0, 1) Tolerance for error in accuracy for the pac-basis delta : float (0, 1) Tolerance for confidence in confidence for the pac-basis """ return((1.0 / epsilon) * (i - (math.log(delta) / math.log(2))))
006afaaed14902e58d73dbb642d04fc468d684c2
41,397
import torch def size_getter(shape): """ Helper function for defining a size object. :param shape: The shape :type shape: int|tuple[int] :return: Size object :rtype: torch.Size """ return torch.Size([]) if shape is None else torch.Size(shape if isinstance(shape, (tuple, list)) else (shape,))
d6ec148770871ef636cb18a499ef30cf82997ac9
41,399
def user_client_database(database, user_client): """ Returns: Database: Named 'auth_test', with ``user_client`` set as the client in an unauthed state. """ return user_client.get_database("auth_test")
c23b7f8b3f99466ce165d3f26389f8da6599568b
41,401
def create(name, vcpus, ram, disk, **kwargs): """Create flavor(s).""" url = '/flavors' req = {'flavor': {'name': name, 'vcpus': vcpus, 'ram': ram, 'disk': disk}} req['flavor'].update(kwargs) return url, {'json': req}
06f046ab4934bb9fb49da0e413cc666d365795ff
41,402
def list_diff(list1, list2): """ Returns a a list with all the elements of second list that are not contained in the first list :param list1: list :param list2: list :return: list """ return [i for i in list1 if i not in list2]
aa7869d879d2b53fe584f74ced7983ffc9e98710
41,404
def format_percent(x, _pos=None): """ usage plt.gca().yaxis.set_major_formatter(format_percent) """ x = 100 * x if abs(x - round(x)) > 0.05: return r"${:.1f}\%$".format(x) return r"${:.0f}\%$".format(x)
a88436ef9474675b76bef3f82e830c3e3e6c8b71
41,406
def total_weight(graph, path): """Sum the weights of the edges between nodes in path Args: graph (Graph): A graph containing nodes and edges between them path (list of str): A list of strings representing nodes in graph Returns: int: The total weight of all the implied edges in path """ pairs = zip(path, path[1:]) weight = sum(graph.weight(*p) for p in pairs) return weight
ec7c88f913a23bc5bf03fa4f6724c0f6af8fc437
41,409
import re def encode(plain_string): """Return encoded string.""" response = [] groups = re.findall(r"(([a-zA-Z\ ])\2*)", plain_string) for item in groups: count = len(item[0]) char = item[1] if count > 1: response.append(str(count) + char) else: response.append(char) return "".join(response)
1c5ce29d07ed350585ed7c1237b984013fd61651
41,410
def reduce_by_maxcc(result_list, maxcc): """ Filter list image tiles by maximum cloud coverage :param result_list: list of dictionaries containing info provided by Opensearch REST service :type result_list: list(dict) :param maxcc: filter images by maximum percentage of cloud coverage :type maxcc: float in range [0, 1] :return: list of dictionaries containing info provided by Opensearch REST service :rtype: list(dict) """ return [tile_info for tile_info in result_list if tile_info['properties']['cloudCover'] <= 100 * float(maxcc)]
cdc9ad0bdff825a1f58f7211f1c1fd57f4611755
41,411
def prefix(): """A message prefix.""" return 'The start of the message.'
652b5185b3e5aa62a3c54ae33d26e15fd4d8e74c
41,412
from typing import Callable import functools import asyncio def to_thread(func: Callable): """Asynchronously run function `func` in a separate thread""" @functools.wraps(func) async def wrapper(*args, **kwargs): partial = functools.partial(func, *args, **kwargs) coro = asyncio.to_thread(partial) return await coro return wrapper
751f4e9d7bcfa93d36fcdab7478dd251ee2cc7ee
41,414
import re def extract_answer_text(options_text: str, answer_tag: str): """Extracts correct answer's text from all options. Args: options_text: all options as text in various format. answer_tag: correct answers tag a, b, c, ... Returns: parsed option text corresponding to the correct answer. """ if options_text.startswith('[') and options_text.endswith(']'): options = eval(options_text) # pylint: disable = eval-used options = [re.sub('[abcde] \\)', '', x).strip() for x in options] else: options = re.split('[abcde] \\)', options_text) if options[0]: raise ValueError(f'Expects first segment to be empty in {options}.') options = [x.strip().rstrip(',').strip() for x in options[1:]] correct_id = ord(answer_tag) - ord('a') if correct_id >= len(options): raise ValueError(f'Ill parsed dictionary {options} from {options_text}.') return options[correct_id]
63c2027087d405c99831b2e6ea922d1989f51c20
41,415
import os def get_path_seperator(): # type: () -> str """Returns character that seperates directories in the PATH env variable""" if 'nt' == os.name: return ';' else: return ':'
de50170268d453c1495854a3a564e7b80d51aec2
41,416
def _serialize_bls_pubkeys(key): """ The ``pytest`` cache wants a JSON encodable value. Provide the hex-encoded ``key``. """ return key.hex()
1d3bf520311bac8417c64f774bc9e36d2c7d7384
41,417
def pass1(arg, *args, **kwargs): """Return the first positional argument.""" return arg
d0b666ed5a2e0a4c84166dc790845fed126dc57b
41,418
import uuid def is_uuid(obj, version=4): """Check the argument is either a valid UUID object or string. Args: obj (object): The test target. Strings and UUID objects supported. version (int): The target UUID version, set to 0 to skip version check. >>> is_uuid('e682ccca-5a4c-4ef2-9711-73f9ad1e15ea') True >>> is_uuid('0221f0d9-d4b9-11e5-a478-10ddb1c2feb9') False >>> is_uuid('0221f0d9-d4b9-11e5-a478-10ddb1c2feb9', version=1) True """ if not isinstance(obj, uuid.UUID): try: obj = uuid.UUID(obj) except (TypeError, ValueError, AttributeError): return False if version and obj.version != int(version): return False return True
a5f974331d55f5513e8d0cf2e01ce067e91b72c4
41,419
import re def string_is_number(target_str): """ Check whether passed string can accurately be converted to a number. Args: target_str (str): string to validate if parsable to number. Returns: bool """ if target_str is None: return False else: return bool(re.fullmatch('^\\d+$', re.sub('[^0-9]', '', target_str)))
ef477fd6fd7072497ee58f986fc4d73bfa25f2b8
41,420
def _test_has_nr_transcripts(apps): """ Only give this warning for existing installations that have NR transcripts """ Transcript = apps.get_model("genes", "Transcript") return Transcript.objects.filter(identifier__startswith='NR_').exists()
a642b21a62a7b30c0865830c3946d4c0c0c10ff8
41,421
import csv def loadNeumeNames(csvFile): """Load neume names from a CSV file""" with open(csvFile, mode="rU") as infile: reader = csv.reader(infile) return {rows[1]: rows[0] for rows in reader}
fd5d550bf6543ce2f1a8c0bab790ea3e5516f1ef
41,422
def crop_image(image, rect): """ Crops an image using the rectangle passed as parameter Args: image: the image to crop rect: a rectangle in the form of a tuple that defines the area we want to crop (top left x, top left y, width, height) Returns: The cropped image """ point1 = (rect[0], rect[1]) # Top-left point point2 = (rect[0] + rect[2], rect[1] + rect[3]) # Lower right point return image[point1[1]:point2[1], point1[0]:point2[0]]
b062177d187501692e32ef3911f750a183d1cf4c
41,425
def _add_p_tags(raw_body): """Return raw_body surrounded by p tags""" return f"<p>{raw_body}</p>"
27012f6220ed6fb983f5ee9af63e97f2497cf793
41,426
def fapiao_tax_codes(self, offset=0, limit=20): """获取商品和服务税收分类对照表 :param offset: 查询的起始位置,示例值:0 :param limit: 查询的最大数量,最大值20 """ path = '/v3/new-tax-control-fapiao/merchant/tax-codes?offset=%s&limit=%s' % (offset, limit) return self._core.request(path)
130b6524f68ec73d113f0e0ec0d82e87d4240e32
41,429
def check_run(system, dx, work_root, cmd, cwd=None, env=None): """Runs a command |cmd|. Args: system (runtime.System): The System instance. dx (dockcross.Image or None): The DockCross image to use. If None, the command will be run on the local system. work_root (str): The work root directory. If |dx| is not None, this will be the directory mounted as "/work" in the Docker environment. cmd (list): The command to run. Any components that are paths beginning with |work_root| will be automatically made relative to |work_root|. cwd (str or None): The working directory for the command. If None, |work_root| will be used. Otherwise, |cwd| must be a subdirectory of |work_root|. env (dict or None): Extra environment variables (will be applied to current env with dict.update) """ if dx is None: if cmd[0] == 'python': cmd[0] = system.native_python return system.check_run(cmd, cwd=cwd or work_root, env=env) return dx.check_run(work_root, cmd, cwd=cwd, env=env)
1507b5b916253644bf0645b7856e2c27227e084f
41,430
import hashlib def get_sha256(file_name): """ Calculate the sha256 for the file :param file_name: :return: """ s1 = hashlib.sha256() with open(file_name, 'rb') as f: while True: data = f.read(4096) if not data: break s1.update(data) return s1.hexdigest()
057caae3bfa0d2232ed92a3f241375fba0b1b231
41,432
def determine_wager(total_money): """ Determine a wager when given total money When placing bets, one should be careful to bet low amounts to not tip the betting pool too much in one direction. This being said, we should bet at a value high enough to make it worth our time. Args: total_money (int): The total money in our bank Returns: wager (int): The suggested wager to place """ """ First pass algorithm: Linear model Bet: $500, Total: $100k Bet: $1000, Total: $200k Model: y = m * x + b 1000 = m * 200000 + b 500 = m * 100000 + b -> b = 0 -> m = 0.005 """ return 500 #return round(0.005 * total_money)
e8fa67455c9560abc0e73631f2600beec46ecb77
41,433
import pkg_resources import os def get_safety_words(): """ Extract the set of safety words from safety file and return """ safety_words = set() safety_words_path = "{}/{}".format( pkg_resources.resource_filename("droidlet.documents", "internal"), "safety.txt" ) if os.path.isfile(safety_words_path): """Read a set of safety words to prevent abuse.""" with open(safety_words_path) as f: for l in f.readlines(): w = l.strip("\n").lower() if w != "" and w[0] != "<" and w[0] != "#": safety_words.add(w) return safety_words
3185c3ede739f6a7c256fcf1d23d8f19ce0e7b20
41,434
def extract_features(data_tensor, feature_extractor_model, type_tensor=True): """ Extracte features """ return feature_extractor_model(data_tensor.unsqueeze(0).detach().squeeze())
f15ba3bfa8a68b9ad719e0d6c0e9a8dba51db23b
41,435
def lineno(el): """ Get the first line number of ast element :param ast el: :rtype: int """ if isinstance(el, list): el = el[0] ret = el['loc']['start']['line'] assert type(ret) == int return ret
e57bfa22b3e16f39585621edc109f7904ef648d8
41,436
from datetime import datetime import os def get_tdx_date(fname: str) -> datetime: """Parse the date of a TDX DEM from the filename""" # Extract string containing date and time basename = os.path.basename(fname) datetime_str = basename[:17] # Convert to datetime return datetime.strptime(datetime_str, "%Y-%m-%d_%H%M%S")
59fcc75acc8b1732fff2e8f2a58cb837eccb5d36
41,437
def _get_parent_state_from_children_state(parent_state, children_state_list): """ @summary: 根据子任务状态计算父任务状态 @param parent_state: @param children_state_list: @return: """ children_state_set = set(children_state_list) if parent_state == "BLOCKED": if "RUNNING" in children_state_set: parent_state = "RUNNING" if "FAILED" in children_state_set: parent_state = "FAILED" return parent_state
d67ddc2ffb3a1f2878b84a9d43ad168f88467792
41,438
import subprocess def run_cmd(cmd): """Helpful function for running shell command in py scripts.""" exitcode, output = subprocess.getstatusoutput(cmd) return output
2f76bbe7299b9d8fa466c287ec724a72fc497551
41,439
from argparse import ArgumentParser from argparse import RawTextHelpFormatter def get_parser(): """Build parser object""" parser = ArgumentParser( description='OpenfMRI participants sampler, for ASLPREP\'s testing purposes', formatter_class=RawTextHelpFormatter) parser.add_argument('openfmri_dir', action='store', help='the root folder of a the openfmri dataset') parser.add_argument('output_dir', action='store', help='the directory where outputs should be stored') parser.add_argument('sample_file', action='store', help='a YAML file containing the subsample schedule') # optional arguments parser.add_argument('--anat-only', action='store_true', default=False, help='run only anatomical workflow') parser.add_argument('--nthreads', action='store', type=int, help='number of total threads') parser.add_argument('--omp_nthreads', action='store', type=int, help='number of threads for OMP-based interfaces') parser.add_argument('--mem-gb', action='store', type=int, help='available memory in GB') parser.add_argument('--tasks-list-file', default='tasks_list.sh', action='store', help='write output file') parser.add_argument('-t', '--tasks-filter', action='store', nargs='*', help='run only specific tasks') parser.add_argument('--cmd-call', action='store', help='command to be run') return parser
54a0a6c532cb5614ec835a3429de1b8488c2ead5
41,441
def projection(): """ The projection system. """ return "x4i"
e9842157b0990eaaf14c2b88d38f256f27b7ac56
41,442
import os def reformat_csv(csv_fn): """ Amazon SageMaker XGBoost can train on data in either a CSV or LibSVM format. For CSV format, It should: Have the predictor variable in the first column Not have a header row """ new_fn = csv_fn.replace('.csv', '_xgb.csv') # 1. skip the header # 2. replace the first col with the last col # 3. drop the last col with open(csv_fn, 'r') as fin: lines = fin.readlines() new_lines = [] for line in lines[1:]: line = line.strip() fds = line.split(',') fds[0] = fds[-1] fds = fds[0:-1] new_line = ','.join(fds) new_lines.append(new_line) with open(new_fn, 'w') as fout: fout.write(os.linesep.join(new_lines)) return new_fn
d30eb28b4d66e7dadfce252035bf46db712407e5
41,443
def load_best_model_weight(ai_agent): """ :param alpha_zero.agent.model.ChessModel model: :return: """ val=ai_agent.load(ai_agent.config.resource.model_best_config_path, ai_agent.config.resource.model_best_weight_path) try: ai_agent.stats=ai_agent.load_stats(ai_agent.config.resource.model_best_stats_path) except: pass return val
9853c0d39ea7e4e10287295bdc3ef5a4d18cc8ed
41,444
def vep_id_to_colon_id(vep_id): """Converts a specific type of VEP compatible identifier to colon-based one. VEP supports several types of variant identifiers. This function only converts a single type, of the form 'CHROM POS . REF ALT', delimited by spaces and compatible with the first five columns of VCF.""" vep_id_fields = vep_id.split(' ') return ':'.join([vep_id_fields[0], vep_id_fields[1], vep_id_fields[3], vep_id_fields[4]])
22e2e959a3ca62f5d8a80a807f284dffcfc970a1
41,445
import re def filterByTitle(person, titles): """Returns True if the given dictionary of person attributes includes a "Title" field in which one of the given title abbreviations exists (False otherwise). This could appear in several different forms: * "CEO" could appear part of a larger title like "CEO & Director" * "CFO" could appear as an abbreviation of sequential terms, like "Chief Financial Officer" It may be necessary/desirable to return the SPECIFIC title (or index thereof) that is successfully matched. For the time being, however, we only focus on whether the match is found. """ firsts = "".join([w[0] for w in person["Title"].split()]) for title in titles: if re.search(title, person["Title"]): return True if re.search(title, firsts): return True return False
33177c0ebd2706d6276d583e05f9d422aada6192
41,446
def ether2wei(ether: float): """Converts units of wei to Ether (1e18 * wei).""" return ether * 1e18
b826daaa171d24b43b7f901b6498f24f5481ed1c
41,448
import sys import argparse def process_command_line(argv): """ Return settings object. `argv` is a list of arguments, or `None` for ``sys.argv[1:]``. """ if argv is None: argv = sys.argv[1:] parser = argparse.ArgumentParser( description='Filter a kraken DB file according to its domain predictions', formatter_class=argparse.HelpFormatter) parser.add_argument( '--dbfile', default = ":memory:", help='Database file. If not given the database will be written to memory. If file exists use the data in the file') parser.add_argument( '--mindomains', type=int, default=5, help='Minimal number of domains to consider. Less than that will pass') parser.add_argument( '--mindiff', type=float, default=1.0, help='Minimal log probability difference between the MAP and the second to best') parser.add_argument( '--taxonomy', help="path taxonomy directory of kraken2 DB, should contain names.dmp, nodes.dmp and *accession2taxid files") parser.add_argument( '--filter_virus', default=False, action='store_true', help='By default keep all sequences originating from viral genomes, use this to filter them') parser.add_argument( '--filter_archaea', default=False, action='store_true', help='By default keep all sequences originating from archaea genomes, use this to filter them') parser.add_argument( '--filter_enviro', default=False, action='store_true', help='Set to remove environmental samples') parser.add_argument( '--trust_archaea', default=False, action='store_true', help='By default keep all sequences that map to archaea, use this to filter them') parser.add_argument( '--trust_viruses', default=False, action='store_true', help='By default keep all sequences that map to viruses, use this to filter them') parser.add_argument( 'fasta', help='Input fasta file. Headers must be accession numbers or in the format: >kraken:taxid|214684|NC_006670.1') parser.add_argument( 'table', help='The predicted domains table, output of predict_domain.py') settings = parser.parse_args(argv) return settings
c6994ca71d7abfbde85cdb7d34eefe293b819e25
41,449
def mysql_database(run_services, mysql_database_getter, mysql_database_name): """Prepare new test database creation function.""" if run_services: return mysql_database_getter(mysql_database_name)
7c986c98a22f244bdf0155873f6e070f7e56ee12
41,450
def _prepare_chain(structures, pdb_id, pdb_chain, atom_filter, mapping, model=0): """ Prepare PDB chain for distance calculation Parameters ---------- structures : dict Dictionary containing loaded PDB objects pdb_id : str ID of structure to extract chain from pdb_chain: str Chain ID to extract atom_filter : str Filter for this type of atom. Set to None if no filtering should be applied mapping : dict Seqres to Uniprot mapping that will be applied to Chain object model : int, optional (default: 0) Use this model from PDB structure Returns ------- Chain Chain prepared for distance calculation """ # get chain from structure chain = structures[pdb_id].get_chain(pdb_chain, model) # filter atoms if option selected if atom_filter is not None: chain = chain.filter_atoms(atom_filter) # remap chain to Uniprot chain = chain.remap(mapping) return chain
cdc859f9742f31f32879892a30303412001ab612
41,451
def double_middle_drop(progress): """ Returns a linear value with two drops near the middle to a constant value for the Scheduler :param progress: (float) Current progress status (in [0, 1]) :return: (float) if 0.75 <= 1 - p: 1 - p, if 0.25 <= 1 - p < 0.75: 0.75, if 1 - p < 0.25: 0.125 """ eps1 = 0.75 eps2 = 0.25 if 1 - progress < eps1: if 1 - progress < eps2: return eps2 * 0.5 return eps1 * 0.1 return 1 - progress
bfdf14ac75e63b88160f6c511d26c76031f9c663
41,453
import textwrap def clean_text(text): """ Return a cleaned and formatted version of text """ if not text: return text text = text.strip() lines = text.splitlines(False) formatted = [] for line in lines: line = " ".join(line.split()) line = textwrap.wrap(line, width=75) formatted.extend(line) return "\n".join(formatted)
70f4d1bc79574b38bea1228fb8631198f366113a
41,454
def filter_out(self, *args, **kwargs): """Filter like filter but the opposite.""" filter_result = self.filter(*args, **kwargs) result = list(filter(lambda x:x not in set(filter_result.columns), self.columns)) result = self[result] return result
e6be5636a22270c944ab32a7823246f251f344cb
41,455
def get_rightmost_pixel_constraint(rightmost_x): """ Given a rightmost_x (x-coord of rightmost nonzero pixel), return a constraint function that remaps candidate tl/brs such that the right-edge = rightmost_x (Should reduce 2D search to 1D) """ def _f(tl, br, image, window_dim, rightmost_x_=rightmost_x): if tl[1] == br[1]: # We have no room to shift the center-X anyway return tl, br half_dim_x = window_dim[1] // 2 tl = tl.copy() br = br.copy() new_x = rightmost_x_ - half_dim_x tl[1] = new_x - 1 br[1] = new_x return tl, br return _f
7b8f7863d4385cadcd3e3c1b2dd80a4a80e02b3c
41,456
def split_info_from_job_url(BASE_URL, job_rel_url): """ Split the job_rel_url to get the separated info and create a full URL by combining the BASE_URL and the job_rel_url :params: job_rel_url str: contain the Relative Job URL :returns: job_id str: the unique id contained in the job_rel_url job_name str: the name of the job job_full_url str: full URL of the job ads """ splitted_url = [i for i in job_rel_url.split("/") if i] # The first element of the list is 'job' as the structure # of the string is like this: # /job/BJR877/assistant-professor-associate-professor-full-professor-in-computational-environmental-sciences-and-engineering/ if len(splitted_url) != 3: raise job_id = splitted_url[1] job_name = splitted_url[2] job_full_url = BASE_URL + job_rel_url return job_id, job_name, job_full_url
276db11041c18a1675ca10b9581a898787b11321
41,457
def get_long_id(list_session_id): """Extract longitudinal ID from a set of session IDs. This will create a unique identifier for a participant and its corresponding sessions. Sessions labels are sorted alphabetically before being merged in order to generate the longitudinal ID. Args: list_session_id (list[str]): List of session IDs (e.g. ["ses-M00"] or ["ses-M00", "ses-M18", "ses-M36"]) Returns: Longitudinal ID (str) Example: >>> from clinica.utils.longitudinal import get_long_id >>> get_long_id(['ses-M00']) 'long-M00' >>> get_long_id(['ses-M00', 'ses-M18', 'ses-M36']) 'long-M00M18M36' >>> get_long_id(['ses-M18', 'ses-M36', 'ses-M00']) # Session IDs do not need to be sorted 'long-M00M18M36' """ sorted_list = sorted(list_session_id) list_session_label = [session_id[4:] for session_id in sorted_list] long_id = "long-" + "".join(list_session_label) return long_id
f5b0ffa6fe75c9059453d0a2d32456dd88da2a16
41,460
import numpy def cov_w(mat, weights): """ Returns a weighted covariance matrix. """ if (mat.__class__ != numpy.matrix): raise AssertionError num_el = mat.shape[1] total_weights = numpy.sum(weights) #mat_w = _weight(mat.copy(), weights) mat_w = numpy.matrix(mat.A * weights.A) mean = numpy.sum(mat_w, axis=1) / total_weights #tiled_mean = numpy.tile(mean, (1, num_el)) #m_sub = mat - tiled_mean m_sub = (mat.T - mean.A[:,0]).T m_sub_t = m_sub.T #result = (_weight(m_sub, weights) * m_sub_t) / (total_weights - 1) result = (numpy.matrix(m_sub.A * weights.A) * m_sub_t) / (total_weights - 1) return result
5224d049f720afc2227b1adfab23986be1748273
41,462
def get_users_who_rated(item_id, ratings_matrix): """ param item_id: id de l'item recherché param ratings_matrix: matrice d'interactions return: utilisateurs ayant notés l'item d'id item_id """ ratings_matrix_T = ratings_matrix.copy().transpose() item_ratings = ratings_matrix_T.loc[[item_id]] users = item_ratings[item_ratings.columns[~item_ratings.isnull().all()]] return users.columns.values.tolist()
ab0c07ae47bdbdcca83657d88a1f2d2608031de6
41,463
def devicelist_result(): """Device list value test setup.""" return { 'name1': { 'coupling_map': {}, 'deviceArn': 'arn1', 'location': 'us-east-1', 'nq': 30, 'version': '1', 'deviceParameters': { 'name': 'braket.device_schema.rigetti.rigetti_device_parameters', 'version': '1', }, 'deviceModelParameters': { 'name': 'braket.device_schema.gate_model_parameters', 'version': '1', }, }, 'name2': { 'coupling_map': {'1': ['2', '3']}, 'deviceArn': 'arn2', 'location': 'us-east-1', 'nq': 30, 'version': '1', 'deviceParameters': { 'name': 'braket.device_schema.rigetti.rigetti_device_parameters', 'version': '1', }, 'deviceModelParameters': { 'name': 'braket.device_schema.gate_model_parameters', 'version': '1', }, }, 'name3': { 'coupling_map': {'1': ['2', '3']}, 'deviceArn': 'arn3', 'location': 'us-east-1', 'nq': 30, 'version': '1', 'deviceParameters': { 'name': 'braket.device_schema.rigetti.rigetti_device_parameters', 'version': '1', }, 'deviceModelParameters': { 'name': 'braket.device_schema.gate_model_parameters', 'version': '1', }, }, }
d508ad3e3e827ec74d43fd717ae5fb485cee71a8
41,464
import requests def get_knowledge_graph(): """ 利用requests调用neo4j的http接口,获得知识图谱的结构 :return 接受到的数据,已转化为python内部格式 """ url = 'http://localhost:7474/db/data/transaction/commit' headers = {'Content-Type':'Application/json', 'Authorization':'Basic bmVvNGo6cm9vdA=='} data = {"statements" : \ [{"statement" : "match (a)-[r]->(b) return distinct labels(a),labels(b), type(r)"}]} response = requests.post(url, json=data, headers=headers) # json_res = response.json() return response.json()
30680a634e1437631aadf67ce45925524cecb1e8
41,466
def get_attrname(name): """Return the mangled name of the attribute's underlying storage.""" return '_obj_' + name
02d7983a02f7a112479d6289ab1a4ddcb8a104a7
41,468
def _distance_acc(distances, thr=0.5): """Return the percentage below the distance threshold, while ignoring distances values with -1. Note: batch_size: N Args: distances (np.ndarray[N, ]): The normalized distances. thr (float): Threshold of the distances. Returns: float: Percentage of distances below the threshold. If all target keypoints are missing, return -1. """ distance_valid = distances != -1 num_distance_valid = distance_valid.sum() if num_distance_valid > 0: return (distances[distance_valid] < thr).sum() / num_distance_valid return -1
1f75ee6a747fab1f471a771769b033060cac4830
41,469
def get_max_aminochain(bios_path): """ The function return length of minimum fragmnt of core peptide chain. Parameters ---------- bios_path : list Sequences of one of biosynthesis type (e.g. A, B, or C). Returns ------- max(lens_of_varints) : int Maximum possible lenght from possible variants. """ lens_of_varints = [] for var in bios_path: len_of_varint_seq = 0 for cont in bios_path[var]: len_of_varint_seq += len(cont) lens_of_varints.append(len_of_varint_seq) return max(lens_of_varints) # Return maximum lenght from possible variants
5d9af6812a853bbaad14ca858d7e620b286a3938
41,470
import shutil def _get_exec(): """ checks if dmenu is installed on path. returns: string, path to binary """ dmenu = shutil.which("dmenu") if not dmenu: raise EnvironmentError("No dmenu installed") else: return dmenu
779ac5dbe46223b8500d1c3e474bd6c4ce6ed3a4
41,471
def get_time(timestamp, field=None): """return the corresponding value of the attribut corresponding to <field> timestamp: <pd.Timestamp> field: <str> Y, M, W, D, H, T""" field = field or "T" if field == "T": return timestamp.minute elif field == "H": return timestamp.hour elif field == "D": return timestamp.weekday() elif field == "W": return timestamp.week elif field == "M": return timestamp.month elif field == "Y": return timestamp.year return -1
8122c811839dc899710579459b5402c5a09acdcf
41,472
import argparse def get_parser(): """ 解析命令行参数 """ parser = argparse.ArgumentParser(description='Magnets-Getter CLI Tools.') parser.add_argument('-k', '--keyword', type=str, help='magnet keyword.') parser.add_argument('-n', '--num', type=int, default=10, help='magnet number.(default 10)') parser.add_argument('-s', '--sort-by', type=int, default=0, help='0: Sort by date,1: Sort by size. 2: Sort by hot-rank.(default 0)') parser.add_argument('-o', '--output', type=str, help='output file path, supports csv and json format.') parser.add_argument('-p', '--pretty-oneline', action='store_true', help='show magnets info with one line.') parser.add_argument('-v', '--version', action='store_true', help='version information.') return parser
d5561df8ddd2778f9e750f93ba96d4dad6e6e59b
41,473
import re import os def CheckTurbsimFileName(file_name): """Checks if the database name follows the TurbSim naming convention.""" # File naming convention: # <date database generation started>-<time database generation started>- # <unique folder number this file is a part of>- # <0-based iteration of the random seed generator used to create this file> # _<2 digit wind speed>mps_<2 digit wind shear exponent*100>shear.h5 # For example, for the 2nd file within the 006 folder generated at 9 m/s and # a shear exponent of 0.1: 20181011-165912-006-01_09mps_10shear.h5 turbsim_name_re = re.compile(r'\d\d\d\d[0-1]\d[0-3]\d' # date r'-[0-2]\d[0-5]\d[0-5]\d' # time r'-\d\d\d' # online folder identifier r'-\d\d' # iteration number from start seed r'_\d\dmps_\d\dshear\.h5') # wind conditions if turbsim_name_re.match(os.path.basename(file_name)): return True else: return False
fc1b9bc857ccd5a4612fb6f7531748c1a7cbfd4e
41,474
def extract_valid_libs(filepath): """Evaluate syslibs_configure.bzl, return the VALID_LIBS set from that file.""" # Stub only def repository_rule(**kwargs): # pylint: disable=unused-variable del kwargs # Populates VALID_LIBS with open(filepath, 'r') as f: f_globals = {'repository_rule': repository_rule} f_locals = {} exec(f.read(), f_globals, f_locals) # pylint: disable=exec-used return set(f_locals['VALID_LIBS'])
e8e9b60dcd86e6216b7d3d321a079da09efa19e8
41,475
import itertools def normalise_environment(key_values): """Converts denormalised dict of (string -> string) pairs, where the first string is treated as a path into a nested list/dictionary structure { "FOO__1__BAR": "setting-1", "FOO__1__BAZ": "setting-2", "FOO__2__FOO": "setting-3", "FOO__2__BAR": "setting-4", "FIZZ": "setting-5", } to the nested structure that this represents { "FOO": [{ "BAR": "setting-1", "BAZ": "setting-2", }, { "BAR": "setting-3", "BAZ": "setting-4", }], "FIZZ": "setting-5", } If all the keys for that level parse as integers, then it's treated as a list with the actual keys only used for sorting This function is recursive, but it would be extremely difficult to hit a stack limit, and this function would typically by called once at the start of a program, so efficiency isn't too much of a concern. """ # Separator is chosen to # - show the structure of variables fairly easily; # - avoid problems, since underscores are usual in environment variables separator = "__" def get_first_component(key): return key.split(separator)[0] def get_later_components(key): return separator.join(key.split(separator)[1:]) without_more_components = { key: value for key, value in key_values.items() if not get_later_components(key) } with_more_components = { key: value for key, value in key_values.items() if get_later_components(key) } def grouped_by_first_component(items): def by_first_component(item): return get_first_component(item[0]) # groupby requires the items to be sorted by the grouping key return itertools.groupby(sorted(items, key=by_first_component), by_first_component) def items_with_first_component(items, first_component): return { get_later_components(key): value for key, value in items if get_first_component(key) == first_component } nested_structured_dict = { **without_more_components, **{ first_component: normalise_environment( items_with_first_component(items, first_component) ) for first_component, items in grouped_by_first_component(with_more_components.items()) }, } def all_keys_are_ints(): def is_int(string_to_test): try: int(string_to_test) return True except ValueError: return False # pylint: disable=use-a-generator return all([is_int(key) for key, value in nested_structured_dict.items()]) def list_sorted_by_int_key(): return [ value for key, value in sorted( nested_structured_dict.items(), key=lambda key_value: int(key_value[0]) ) ] return list_sorted_by_int_key() if all_keys_are_ints() else nested_structured_dict
b8670458415404d673e530b0a6e15c9a050ea4ca
41,476
def add_it_en_books(): """ 网络教程:每一章当做中文30分钟的页数 英文书籍:每一页乘以2,当做中文页数 """ en_books = dict() # en_books['Java IO'] = 17 * 10 # en_books['Java Network'] = 22 * 10 en_books['Java Servlet'] = 240 * 2 return en_books
b3d24510ecd627a648e6bd30054313f1dcabac38
41,477
def create_commit(mutations): """A fake Datastore commit method that writes the mutations to a list. Args: mutations: A list to write mutations to. Returns: A fake Datastore commit method """ def commit(req): for mutation in req.mutations: mutations.append(mutation) return commit
dc7cfa4c3f79076c2b67e3261058ab5d55e1f189
41,478
def iflag_unique_items(list_): """ Returns a list of flags corresponding to the first time an item is seen Args: list_ (list): list of items Returns: flag_iter """ seen = set() def unseen(item): if item in seen: return False seen.add(item) return True flag_iter = (unseen(item) for item in list_) return flag_iter
98b1a0febaa16e548e56f6f719f38eac0cf9e884
41,479
def is_paired(input_string:str): """ Determine that any and all pairs are matched and nested correctly. :param input_string str - The input to check :return bool - If they are matched and nested or not. """ stack = [] for char in input_string: if char in ['[', '{', '(']: stack.append(char) elif char in [']', '}', ')']: # Closing bracket with no open brackets. if len(stack) == 0: return False element = stack.pop() if element == '[' and char == ']': continue elif element == '{' and char == '}': continue elif element == '(' and char == ')': continue return False return len(stack) == 0
040d79e471831d4444ffded490f39ed90247e39e
41,480
def de_norm(x, low, high): """ De-normalize value :param x: Value to be denormalized :param low: Minimum value :param high: Maximum value :return: """ de_nor = x*(high-low) + low if de_nor > high: return high elif de_nor < low: return low return de_nor
625f9925e99071ae69f231fcd93f02c58c936054
41,483
import random def random_value_lookup(lookup): """Returns a object for a random key in the lookup.""" __, value = random.choice(list(lookup.items())) return value
b1d7b3859cd9232df2a41505ee4f2bdfaa2607a4
41,484
def reverse(seq): """Reverse the sequence of integers.""" next_int = seq.find(' ') if next_int == -1: return "0" num = int(seq[0:next_int]) if num == 0: return str(num) return reverse(seq[next_int + 1:]) + " " + str(num)
30aafdef0791920853e0921e18be1f52264fb97e
41,488
def pred(lis): """ This function moves the list representing a relation (first element of the list) AFTER relational term. """ # Remove all dummy semantic elements. lis = [ele for ele in lis if ele != []] # Put the relational predicate in front of the token lis[0], lis[1] = lis[1], lis[0] return lis
33383aef506e9414540ad36aa3ceee12e3f7540f
41,490
def symbols(vma): """ Obtain the atomic symbols for all atoms defined in the V-Matrix. :param vma: V-Matrix :type vma: automol V-Matrix data structure :rtype: tuple(str) """ return tuple(zip(*vma))[0] if vma else ()
3f2a547e7ec0eb17e681ec2fe5de93057fdaa22a
41,491
import tempfile import os def temp_db_filename(): """Generate a temporary filename for SQLite database.""" # In memory SQLite does not work with current test structure since the tests # expect databases to be retained between individual tests. # TESTDB = ':memory:' # Instead, we use (if we can) /dev/shm try: h, test_db_fname = tempfile.mkstemp("_BioSQL.db", dir="/dev/shm") except OSError: # We can't use /dev/shm h, test_db_fname = tempfile.mkstemp("_BioSQL.db") os.close(h) return test_db_fname
668b0a97f9eb80ddf03a2704215d8653f87ed2b4
41,493
import pickle def decode_dict(dictionary): """Decodes binary dictionary to native dictionary Args: dictionary (binary): storage to decode Returns: dict: decoded dictionary """ decoded_dict = pickle.loads(dictionary) return decoded_dict
158d7db725f1856c276f867bfea3482c3fbe283b
41,494
def form_fastqc_cmd_list(fastqc_fp, fastq_fp, outdir): """Generate argument list to be given as input to the fastqc function call. Args: fastqc_fp(str): the string representing path to fastqc program fastq_fp(str): the string representing path to the fastq file to be evaluated outdir(str): the string representing the path to the output directory Return value: call_args(list): the list of call_args representing the options for the fastqc subprocess call Raises: ValueError is raised when either the fastqc path or the fastqc input files are empty """ # throw exceptions to prevent user from accidentally using interactive fastqc if fastqc_fp is '': raise ValueError('fastqc_fp name is empty') if fastq_fp is '': raise ValueError('fastq_fp file name is empty') # required arguments call_args_list = [fastqc_fp, fastq_fp] # direct output if outdir is not None: call_args_list.extend(["--outdir", outdir]) return call_args_list
ce0ed8eb7d35bdd2565f910bb982da710daa23c5
41,495
from datetime import datetime def get_timestamp_utc() -> str: """Return current time as a formatted string.""" return datetime.utcnow().strftime("%Y-%m-%d-%H%M%S")
05a1cfeeda438a8f5f9857698cb2e97e4bb62e96
41,496
def is_network_appbase_ready(props): """Checks if the network is appbase ready""" if "DPAY_BLOCKCHAIN_VERSION" in props: return False elif "DPAY_BLOCKCHAIN_VERSION" in props: return True
366a92d0fa8de4ac826044c1ad62d2cb4a7d0539
41,497
import os def get_usergid(): """Get current group id""" return os.getgid()
00a3badf5d275a93bff9c1e26f0d942b192d1f99
41,498
def _drop_cols_from_data_segment(): """Columns keys to possibly drop from data DataFrame""" # Other optional columns: ['E Imag', 'ADC Sync Input(V)','I Imag', 'I Real'] _drop_cols = ["E2 Imag", "E2 Real", "E2 Status", "E2(V)", "Z2 Imag", "Z2 Real"] return _drop_cols
4f9580f8fde6406f22f504ba8f30b85b262a85dd
41,499
def rtd10(raw_value): """Convert platinum RTD output to degrees C. The conversion is simply ``0.1 * raw_value``. """ return (float(raw_value) / 10.0, "degC")
5b44908c722ff8298cf2f4985f25e00e18f05d21
41,500
def sentry_handler(): """ Sentry log handler config data. """ return { "sentry_handler": { "dsn": {"https://pub-key:secret-key@app.getsentry.com/app-id"} } }
9b51fd6ed6e8f0d9e49140e5c58b6a1482461925
41,502
def reverse_list(l): """ return a list with the reverse order of l """ return l[::-1]
21bf60edf75a6016b01186efccdae9a8dd076343
41,503
import os def get_current_file_suffix(path): """ Get file extension :param path: path to a source file :returns: file extension for file """ _, file_suffix = os.path.splitext(path) return file_suffix
7967dc08ac985424930aa900702facbfe01dd712
41,505
import requests def query_graphql(query, variables, token): """Query GitHub's GraphQL API with the given query and variables. The response JSON always has a "data" key; its value is returned as a dictionary.""" header = {"Authorization": f"token {token}"} r = requests.post("https://api.github.com/graphql", json={"query": query, "variables": variables}, headers=header) r.raise_for_status() return r.json()["data"]
72da627b5600973303ae4001bf3b07f738212f04
41,506
def get_passive_el(passive_coord, centroids): """ Gets index of passive elements . Args: passive_coord (:obj:`tuple`): Region that the shape will not be changed. centroids (:obj:`numpy.array`): Coordinate (x,y) of the centroid of each element. Returns: Index of passive elements. """ mask = (centroids[:, 0] >= passive_coord[0][0]) & (centroids[:, 0] <= passive_coord[0][1]) & (centroids[:, 1] >= passive_coord[1][0]) & (centroids[:, 1] <= passive_coord[1][1]) return (mask > 0).nonzero()[0]
9983ee9d730ced9f8ce56790c07af22c9dfcdb0d
41,507