content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import sys import re import os def patch_shebang_line(fname, pad=b' ', fLOG=print): """ Remove absolute path to python.exe in shebang lines. @param python python extractor @param pad pad @param fLOG logging function @return boolean, True if patched, False otherwise """ if sys.version_info[0] == 2: shebang_line = re.compile(r"(#!.+pythonw?\\.exe)") # Python2.7 else: shebang_line = re.compile(b"(#!.+pythonw?\\.exe)") # Python3+ with open(fname, 'rb') as fh: content = fh.read() content = shebang_line.split(content, maxsplit=1) if len(content) != 3: return exe = os.path.basename(content[1][2:]) content[1] = b'#!' + exe + (pad * (len(content[1]) - len(exe) - 2)) content = b''.join(content) try: with open(fname, 'wb') as fh: fh.write(content) fLOG("[pymy] patched", fname) return True except Exception: fLOG("[pymy] failed to patch", fname) return False
37eba96befb257fc2d951a6971fcee6a921668a4
695,747
def parse_zone_groups(player): """Creates a list of all Zones with attrbute whether they are a group or a single player""" all_zones = [] for group in player.all_groups: if len(group.members) > 1: all_zones.append({"kind":"G", "master":group.coordinator}) else: all_zones.append({"kind":"P", "master":group.coordinator}) return all_zones
c5074a3b88f661dcc0e310e2447071348ecf346f
695,748
def get_default_memory_overhead(memory): """ The default memory overhead (related to both driver and executor) depends on how much memory is used: 0.10*memory, with minimum of 384 MB. :param memory: driver or executor memory :return: default memory overhead """ MINIMUM_OVERHEAD = 384 << 20 # 384 MB return 0.1*memory if 0.1*memory > MINIMUM_OVERHEAD else MINIMUM_OVERHEAD
25db1cfe67651cf0b32371e2d8a056b5215e264a
695,749
from typing import List from typing import Dict from typing import Any def format_traffic_calming_json( traffic_calming_list: List[Dict[str, Any]], speed_cameras: List[Dict[str, Any]], speed_humps: List[Dict[str, Any]], ) -> str: """ Purpose: Format the json to a text response Args: traffic_calming_json: traffic calmaing data Returns: text: formatted text """ text = "" if len(speed_cameras) > 0: for camera in speed_cameras: text += ( f'There is a {camera["camera_type"]} camera at {camera["address"]} \n' ) text += "\n" else: text += "No speed cameras: \n\n" if len(speed_humps) > 0: for hump in speed_humps: if int(hump["count"]) > 1: text += ( f'There are {hump["count"]} speed humps at {hump["location"]} \n' ) else: text += f'There is {hump["count"]} speed hump at {hump["location"]} \n' text += "\n" else: text += "No speed humps: \n\n" text += "Other Traffic Calming Measures: \n\n" # Maybe just get the max totalbikelanes_max = 0 totalraisedbuffers_max = 0 totalparkinglanes_max = 0 speedlimit_max = 0 for traffic_calming_json in traffic_calming_list: if traffic_calming_json["totalbikelanes"] > totalbikelanes_max: totalbikelanes_max = traffic_calming_json["totalbikelanes"] if traffic_calming_json["totalraisedbuffers"] > totalraisedbuffers_max: totalraisedbuffers_max = traffic_calming_json["totalraisedbuffers"] if traffic_calming_json["totalparkinglanes"] > totalparkinglanes_max: totalparkinglanes_max = traffic_calming_json["totalparkinglanes"] if traffic_calming_json["speedlimit"] > speedlimit_max: speedlimit_max = traffic_calming_json["speedlimit"] # text += f'{traffic_calming_json["nbh_cluster_names"]} - {traffic_calming_json["blockkey"]} \n' text += f"There are {totalbikelanes_max} bike lanes \n" text += f"There are {totalraisedbuffers_max} raised buffers \n" text += f"There are {totalparkinglanes_max} parking lanes \n" text += f"The speed limit is {speedlimit_max} MPH \n" text += f"\n\n" return text
4a997a8663c8deb4781eec07db2aeb163583ba1c
695,750
def _difference(idxs): """ Returns the chained difference of the indexes given. Parameters ---------- idxs : list List of pandas.Index objects. Returns ------- idx : pandas.Index The result of the chained difference of the indexes given. """ idx = idxs[0] for idx_part in idxs[1:]: idx = idx.difference(idx_part) return idx
d48e491f0c145d4286bf6c7675e0b511d39b87a4
695,752
import json def python2js(value, check_list=(None, True, False)): """ 把check_list中的值从python类型转成js(json)类型,以适配前端显示 :param value: :param check_list: :return: """ if check_list is not None and value in check_list: value = json.dumps(value) else: value = str(value) return value
c445ac97ce4b02262f908a9ba10fef5d35a7f452
695,753
def load_stopwords( inpath = "text/stopwords.txt" ): """ Load stopwords from a file into a set. """ stopwords = set() with open(inpath) as f: lines = f.readlines() for l in lines: l = l.strip() if len(l) > 0: stopwords.add(l) return stopwords
5e733e97a3f56867d80edeb3db5a392362a23fe1
695,754
def parse_tweet(tweet): """ Parse elements of tweet into dict Return dict """ tweet_dict = { 'created_at': tweet['created_at'], 'full_text': tweet['full_text'], 'tweet_id': tweet['id_str'], 'source': tweet['source'], 'retweets': tweet['retweet_count'], 'favorites': tweet['favorite_count'], 'geo': str(tweet['geo']), 'coordinates': str(tweet['coordinates']), 'place': str(tweet['place']), 'reply_to': tweet['in_reply_to_status_id'], 'deeplink': f'https://twitter.com/{tweet["user"]["screen_name"]}/status/{tweet["id_str"]}' } return tweet_dict
301e78016972f54c31ae05c9d84de6eda32c441d
695,755
def ppr(b2, b3): """ Plant Pigment Ratio (Metternicht, 2003). .. math:: PPR = (b3 - b2)/(b3 + b2) :param b2: Blue. :type b2: numpy.ndarray or float :param b3: Green. :type b3: numpy.ndarray or float :returns PPR: Index value .. Tip:: Metternicht, G. 2003. Vegetation indices derived from high-resolution \ airborne videography for precision crop management. International \ Journal of Remote Sensing 24(14), 2855-2877. \ doi:10.1080/01431160210163074 """ PPR = (b3 - b2)/(b3 + b2) return PPR
77a071d3c437dc3f202f6c7a35147c7473e17749
695,756
def is_sorted(array): """Write a function called is_sorted that takes a list as a parameter and returns True if the list is sorted in ascending order and False otherwise. You can assume (as a precondition) that the elements of the list can be compared with the relational operators <, >, etc.""" copy_array = array[:] copy_array.sort() return copy_array == array
875afc4d41c82a77bea70899c06e74c40bfe3c36
695,757
def test(r, v, source_body): """This function does something. :param name: The name to use. :type name: str. :param state: Current state to be in. :type state: bool. :returns: int -- the return code. :raises: AttributeError, KeyError """ print('meet me') return None
21842482feb961ea7cc058d72e78086c15fc8880
695,758
def transform_points(points, world_to_image): """ pts are nparray of shape(B, 2) world_to_image is nparray of shape(3,3) Returns nparray of shape(B, 2) """ world_to_image = world_to_image.T return points @ world_to_image[:2,:2] + world_to_image[2,:2]
245080a69a793c39f21654adc92871f743633eb5
695,759
def add(nmbr1, nmbr2): """Add Function""" return nmbr1 + nmbr2
de78b1340134b759f0471a50c76ffe3e3f79b54f
695,761
import random import torch def non_nearest_neighbors(D, nb_neighbors, labels=None): """ Like nearest neighbors, but choose set difference with equal (like nearest neighbors) number of non nearest neighbors. """ N = [] for d in D: N += [random.sample( list(*[torch.sort(d)[1][nb_neighbors + 1:]]), nb_neighbors )] return N
bf168c14bdf0ccfcdba29ca639f15cfadd87b323
695,763
def get_file_extension(file_name): """e.g.: "/home/j/path/my.video.mp4" -> ".mp4" Throws an exception, ValueError, if there is no "." character in file_name :param file_name: <str> any string or path that is the name of a file :return: the file extension of the param """ return file_name[file_name.rindex('.'):]
03789c21b6478f8cfa2707697e74f8d51995923b
695,764
from collections import defaultdict def optimal_weight_defaultdict(maximum, weights): """ Instead of a values matrix, use a defaultdict. TOO MUCH MEMORY! """ values = defaultdict(int) for item in range(len(weights)): for subweight in range(1, maximum + 1): values[subweight, item] = values[subweight, item - 1] weight = weights[item] if weight <= subweight: value = values[subweight - weight, item - 1] + weight values[subweight, item] = max(value, values[subweight, item]) return values[maximum, len(weights) - 1]
7aa6b157610cad9e1fdc7c382e2c3c85b2cb8a45
695,765
def expand_bboxes(bboxes, image_width, image_height, expand_left=2., expand_up=2., expand_right=2., expand_down=2.): """ Expand bboxes, expand 2 times by defalut. """ expand_boxes = [] for bbox in bboxes: xmin = bbox[0] ymin = bbox[1] xmax = bbox[2] ymax = bbox[3] w = xmax - xmin h = ymax - ymin ex_xmin = max(xmin - w / expand_left, 0.) ex_ymin = max(ymin - h / expand_up, 0.) ex_xmax = min(xmax + w / expand_right, image_width) ex_ymax = min(ymax + h / expand_down, image_height) expand_boxes.append([ex_xmin, ex_ymin, ex_xmax, ex_ymax]) return expand_boxes
12d3f4df9f79130898db620a90f80fb200e0b23d
695,766
from typing import get_type_hints def get_handler_original_typehints(handler): """ Retorna a assinatura do handler asyncworker que está sendo decorado. O retorno dessa chamada é equivalente a: typing.get_type_hints(original_handler) Onde `original_handler` é o handler asyncworker original. Ideal para ser usado na pilha de decorators de um handler, ex: .. code-block:: python @deco1 @deco2 @deco3 async def handler(...): pass Nesse caso, se qualquer um dos 3 decorators precisar saber a assinatura original, deve usar essa função passando a função recebida do decorator anterior. """ def _dummy(): pass _dummy.__annotations__ = getattr( handler, "asyncworker_original_annotations", handler.__annotations__ ) return get_type_hints(_dummy)
cde66d18f13ce702c91808b85d3ac6f4c8599a14
695,767
import random def sample_corpus(collections): """ create proportional sizes of subcorpora across event types. randomize when selecting the texts for this sample. :param collections: collection of collections of dictionaries per event type :type collections: dictionary """ lengths_dict = {} for event_type, info_dicts in collections.items(): lengths_dict[event_type] = len(info_dicts) len_smallest_corpus = min(lengths_dict.values()) sampled_collections = {} for event_type, info_dicts in collections.items(): sampled_list = random.sample(info_dicts, len_smallest_corpus) sampled_collections[event_type] = sampled_list return sampled_collections
70efacbce2538ee55d454e7799275317b88f7316
695,768
def pg_utcnow(element, compiler, **kw): """ Postgres UTC timestamp object """ return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
3deb31b98b8c75417ff0ecf5c7b5fa9eb0b91df9
695,769
def assign_value_if_none(value, default): """ Assign a value to a variable if that variable has value ``None`` on input. Parameters ---------- value : object A variable with either some assigned value, or ``None`` default : object The value to assign to the variable ``value`` if ``value is None`` returns ``True`` Returns ------- new_value : object The new value of ``value`` """ return default if value is None else value
5034a65741bb763e632bb8493eb661229e75279a
695,770
def get_dict_val(dictionary, path, splitchar='/'): """ Return the value of the dictionnary at the given path of keys. ---------- INPUT |---- dictionary (dict) the dict to search in. |---- path (str) path-like string for key access ('key1/key2/key3' for ex.). |---- splitchar (str) the character to use to split the path into keys. OUTPUT |---- dictionary (dict or value) the value of the dictionnary at the | provided key path. """ for item in path.split(splitchar): dictionary = dictionary[item] return dictionary
eb58e42edc705f05d9e046e65f4ed823f42c9aac
695,771
import re def acquireCliProgramVersion(s): """ Acquire the version of a cli program param s: The output of the programm version string. example: 'gcc --version' """ regex = r"(\d+)(\.\d+)(\.\d+)?" matches = re.finditer(regex, s, re.MULTILINE) version = None for matchNum, match in enumerate(matches, start=1): version = match.group() return version
0d50e4e1a6fce17af2f61d2de175ee0ec82d8f30
695,773
def construct_order_structure(species_order_list, current_species_string_list): """ Order structure for reaction order operations. Returns the cyclic_dictionary to be used by the order operator. The meta-species objects are the keys of this dictionary and a lists of species strings currently being used in the reaction are the values - Allowing the product to find it's corresponding species-string in a future step Parameters: species_order_list (list of meta-species objects): list of meta-species objects as they appear in the meta-reaction current_species_string_list (list of strings): list of strings in MobsPy format of the species currently in this specific reaction Returns: cyclic_dict (dict) = Dictionary where the keys are meta-species objects and the values are lists of species """ cyclic_dict = {} for species_object, species_string in zip(species_order_list, current_species_string_list): try: cyclic_dict[species_object].append(species_string) except KeyError: cyclic_dict[species_object] = [species_string] return cyclic_dict
a69c6eccffb1d22f5a4006089f6c7f12a5144a3d
695,774
def find_non_base_case_job(jobs): """Return a job that is not a base case.""" for job in jobs: if job.model.base_case is not None: assert not job.model.is_base_case return job raise Exception("Did not find a non-base-case job")
ab80ca1ad2e5293876ffa7973112bf19ae8ab308
695,775
def _get_new_steplist(reqs_to_keep, old_step_data, req_ids): """Returns a list similar to `old_step_data` but with unwanted requests removed. Uses the requests and request components in `old_step_data` and the entitiy ids in `req_ids` to determine which elements in `old_step_data` to keep. Parameters ---------- reqs_to_keep : dict Dictionary of requests and request components to keep old_step_data : list List of all the step data in the results file req_ids : dict Dictionary of entity ids for the entire results file Returns ------- list List of just the step data to keep """ # Start a new list with just the time element from `old_step_data` new_step_data = [old_step_data[1]] # Loop through the desired requests and components to pull elements # from `old_step_data` into `new_step_data` for request in reqs_to_keep: # For each desired request for req_comp in reqs_to_keep[request]: # For each desired request component, add that components # step data to `new_step_data` req_id = int(req_ids[request][req_comp]) new_step_data.append(old_step_data[req_id]) return new_step_data
61e3c88dda3fae29a10c91b4abfc02ed4762f22e
695,776
def slon_e(lon_e, precision=0): """East longitude string. Parameters ---------- lon_e: float Input east longitude (degE). precision: int, optional Displayed float precision. Returns ------- str Formatter longitude (`180°|90°W|0°|90°E|180°|`) """ return (f'{abs(lon_e):.{precision}f}°' f'{"" if abs(lon_e % 180) <= 1.e-2 else "E" if lon_e > 0 else "W"}')
1968def96ac276e23e19991672653acdc00d65d2
695,777
import os def is_css_file(path): """ Return True if the given file path is a CSS file. """ ext = os.path.splitext(path)[1].lower() return ext in [ '.css', ]
370340c819b402903a70d4125dc0b805af21b645
695,778
from math import log def is_power_of_two(x): """ Returns true if x is a power of two, false otherwise. """ if x <= 0: return False log2 = int(log(x, 2)) return x == 2 ** log2
092c13924e076c31c85ff93906c0947b61708c5a
695,779
import pathlib def make_report_file_names(proj_full_path): """ make the directory and file names for a report Args: proj_full_path (string): the path of the results directory Returns: report_dir (pathlib.Path) html_outfile (pathlib.Path) hash_file (pathlib.Path) """ report_dir = pathlib.Path(proj_full_path).joinpath("report") html_outfile = report_dir.joinpath("report.html") hash_file = report_dir.joinpath("results_hash.json") return (report_dir, html_outfile, hash_file)
8d995bb15c2b8710ad2fb16e2476b5a96421f379
695,780
import time import math def __project_gdf(gdf, to_crs=None, to_latlong=False): """ Project a GeoDataFrame to the UTM zone appropriate for its geometries' centroid. The simple calculation in this function works well for most latitudes, but won't work for some far northern locations like Svalbard and parts of far northern Norway. Parameters ---------- gdf : GeoDataFrame the gdf to be projected to_crs : dict if not None, just project to this CRS instead of to UTM to_latlong : bool if True, projects to latlong instead of to UTM Returns ------- GeoDataFrame """ default_crs = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs' assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.' start_time = time.time() # if gdf has no gdf_name attribute, create one now if not hasattr(gdf, 'gdf_name'): gdf.gdf_name = 'unnamed' # if to_crs was passed-in, use this value to project the gdf if to_crs is not None: projected_gdf = gdf.to_crs(to_crs) # if to_crs was not passed-in, calculate the centroid of the geometry to # determine UTM zone else: if to_latlong: # if to_latlong is True, project the gdf to latlong latlong_crs = default_crs projected_gdf = gdf.to_crs(latlong_crs) # log('Projected the GeoDataFrame "{}" to default_crs in {:,.2f} seconds'.format(gdf.gdf_name, time.time()-start_time)) else: # else, project the gdf to UTM # if GeoDataFrame is already in UTM, just return it if (gdf.crs is not None) and ('+proj=utm ' in gdf.crs.to_string()): return gdf # calculate the centroid of the union of all the geometries in the # GeoDataFrame avg_longitude = gdf['geometry'].unary_union.centroid.x # calculate the UTM zone from this avg longitude and define the UTM # CRS to project utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1) utm_crs = '+proj=utm +zone={} +ellps=WGS84 +datum=WGS84 +units=m +no_defs'.format(utm_zone) # project the GeoDataFrame to the UTM CRS projected_gdf = gdf.to_crs(utm_crs) # log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} seconds'.format(gdf.gdf_name, utm_zone, time.time()-start_time)) projected_gdf.gdf_name = gdf.gdf_name return projected_gdf
6b3e67285ff9229fb96d609ffd222296dc1d7ae2
695,781
import re def is_valid_rvpsa_id(rvpsa_id): """ Validates a remote VPSA ID, also known as the remote VPSA "name". A valid remote VPSA name should look like: rvpsa-00000001 - It should always start with "rvpsa-" and end with 8 hexadecimal characters in lower case. :type rvpsa_id: str :param rvpsa_id: The remote VPSA name to be validated. :rtype: bool :return: True or False depending on whether rvpsa_id passes validation. """ if rvpsa_id is None: return False match = re.match(r'^rvpsa-[0-9a-f]{8}$', rvpsa_id) if not match: return False return True
aabe9e64dbc9003f3cc5f18842b91f4a3e34c8a8
695,782
def xyxy2xywh(bbox): """Transform the bbox coordinate to [x,y ,w,h]. :param bbox: the predict bounding box coordinate :type bbox: list :return: [x,y ,w,h] :rtype: list """ _bbox = bbox.tolist() return [ _bbox[0], _bbox[1], _bbox[2] - _bbox[0] + 1, _bbox[3] - _bbox[1] + 1, ]
d81c9f88b8192d0ec519dd185572c9d926971cc1
695,783
import numpy def _pixel_borders(xlim, npix, log=False, base=10.0): """ Determine the borders of the pixels in a vector given the first, last and number of pixels Args: xlim (numpy.ndarray) : (Geometric) Centers of the first and last pixel in the vector. npix (int) : Number of pixels in the vector. log (bool) : (**Optional**) The input range is (to be) logarithmically sampled. base (float) : (**Optional**) The base of the logarithmic sampling. The default is 10.0; use numpy.exp(1.) for the natural logarithm. Returns: numpy.ndarray, float: A vector with the (npix+1) borders of the pixels and the sampling rate. If logarithmically binned, the sampling is the step in :math`\log x`. """ if log: logRange = numpy.log(xlim)/numpy.log(base) dlogx = numpy.diff(logRange)/(npix-1.) borders = numpy.power(base, numpy.linspace(*(logRange/dlogx + [-0.5, 0.5]), num=npix+1)*dlogx) return borders, dlogx dx = numpy.diff(xlim)/(npix-1.) borders = numpy.linspace(*(xlim/dx + numpy.array([-0.5, 0.5])), num=npix+1)*dx return borders, dx
c5473a890266cc47ccb685599fa3e32237f04967
695,784
import argparse def init_args(): """ :return: """ parser = argparse.ArgumentParser() parser.add_argument('--image_path', type=str, help='The image path or the src image save dir') parser.add_argument('--weights_path', type=str, help='The model weights path') parser.add_argument('--is_batch', type=str, help='If test a batch of images', default='false') parser.add_argument('--batch_size', type=int, help='The batch size of the test images', default=8) parser.add_argument('--save_dir', type=str, help='Test result image save dir', default=None) parser.add_argument('--use_gpu', type=int, help='If use gpu set 1 or 0 instead', default=1) return parser.parse_args()
bba33fbb37ff4ed11779298b4056313126b01ba3
695,785
def compute_prior_probability(alpha): """ Calculate the probability of the node being a LeafNode (1 - p(being SplitNode)). Taken from equation 19 in [Rockova2018]. Parameters ---------- alpha : float Returns ------- list with probabilities for leaf nodes References ---------- .. [Rockova2018] Veronika Rockova, Enakshi Saha (2018). On the theory of BART. arXiv, `link <https://arxiv.org/abs/1810.00787>`__ """ prior_leaf_prob = [0] depth = 1 while prior_leaf_prob[-1] < 1: prior_leaf_prob.append(1 - alpha**depth) depth += 1 return prior_leaf_prob
664734536a8a973bf77e6d9e723dc2954f663e21
695,786
def lat_opposite(side): """ Returns the lateral opposite as defined by the keyword pair {"Right", "Left"} """ if side == 'Right': return 'Left' elif side == 'Left': return 'Right' else: raise ValueError("Lateral side error, (%s)" % side)
2273b4e43e37fd206cac52d81591afa12ecf68ee
695,787
def ParseFiles(input, output): """ Makes a bed file out of each sample that is in the header of the input .bed file. """ input_file = open(input, 'r') output_file_dict = dict() header = input_file.readline() samples = header.split()[4:] # Iterates through the sample in the header. for i in range(0, len(samples), 2): sample = samples[i].strip('_methylated') output_file_dict.update({sample: open(output+'/'+sample+'.bed', 'w')}) return [input_file, output_file_dict, samples]
cc040458869edd7712acf43f4f1a72856f0d0dda
695,788
def count_col_nans(col): """ Returns the number of NaNs of a specific column in the dataset. Parameters: col (pandas Series): Column in the dataset Returns: col_count_nans (float): Count of NaNs in col """ col_count_nans = col.isna().sum() return col_count_nans
ea055c003805112dbebd11fa5b9beea8ecc4c127
695,790
def parse_headers(env): """Parse HTTP headers out of a WSGI environ dictionary Args: env: A WSGI environ dictionary Returns: A dict containing (name, value) pairs, one per HTTP header Raises: KeyError: The env dictionary did not contain a key that is required by PEP-333. TypeError: env is not dictionary-like. In other words, it has no attribute '__getitem__'. """ # Parse HTTP_* headers = {} for key in env: if key.startswith('HTTP_'): headers[key[5:]] = env[key] # Per the WSGI spec, Content-Type is not under HTTP_* if 'CONTENT_TYPE' in env: headers['CONTENT_TYPE'] = env['CONTENT_TYPE'] # Per the WSGI spec, Content-Length is not under HTTP_* if 'CONTENT_LENGTH' in env: headers['CONTENT_LENGTH'] = env['CONTENT_LENGTH'] # Fallback to SERVER_* vars if the Host header isn't specified if 'HOST' not in headers: host = env['SERVER_NAME'] port = env['SERVER_PORT'] if port != '80': host = ''.join([host, ':', port]) headers['HOST'] = host return headers
31c2d2eac9a888535d57ecaf57c91748173bd948
695,791
import os import unicodedata def scan_target(path, files, directories): """ Processes given path. Adds files to files list. If path is a directory, all subfiles and directories are added to the files and directories lists as appropriate. Returns list of files and list of directories. """ path = os.path.abspath(path) if not os.path.isdir(path): files.append(path) return files, directories directory_list = [ unicodedata.normalize('NFC', f) for f in os.listdir(path)] for entry in directory_list: entry_path = os.path.join(path, entry) if os.path.isdir(entry_path): directories.append(entry_path) else: files.append(entry_path) return files, directories
f5e17abb6d73ef3d243c801b2a962793fdc24839
695,792
import random def generate_secret_key(): """ Generate a random secret key, suitable to be used as a SECRET_KEY setting. """ return ''.join( [random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)] )
a5d2220c95338d8afdf287354b1f8f538707639a
695,793
import json def load_json_file(json_path): """Function to load a JSON file from the filename path""" with open(json_path) as fp: data = json.load(fp) print('JSON file successfully loaded') return data
e11ad820ce211582e646ba545be7d860718e14d9
695,794
from datetime import datetime import os def file_modified_iso8601(filepath): """ Provide a file's ctime in ISO8601 :param filepath: path to file :returns: string of ISO8601 """ return datetime.fromtimestamp( os.path.getctime(filepath)).strftime('%Y-%m-%dT%H:%M:%SZ')
ca06314f64243a56aed2700a4af43bac829719fa
695,795
import requests import json def build_link_list(client_id, num_of_images): """ builds a list of image links. """ i = 1 cnt = 0 url_list = [] url_list_len = [] try: while(cnt < num_of_images): # get request response = requests.get( f'https://api.imgur.com/3/gallery/random/random/{i}', headers={'Authorization': f'Client-ID {client_id}'}, stream=True ) # control if response.status_code == 200: data_list = json.loads(response.content)['data'] url_list.extend([ i['link'] for i in data_list if 'type' in i and i['type'] in ('image/png', 'image/jpeg') and i['link'] not in url_list ]) cnt = len(url_list) url_list_len.append(cnt) i += 1 # control if api doesn't return anything new if set(url_list_len[-10:]) == 1: break elif response.status_code == 429: print('too many requests, enough, or you can choose to put time.sleep() in here...') break else: break except: print('api limit reached!') return url_list
f83be601467649179992c7de621ffd777d76a4a9
695,796
import os def remove_file(filename: str) -> bool: """ removes the file "filename" from disk >>> remove_file('foobar') True """ if os.path.isfile(filename): try: os.remove(filename) except(IOError, OSError): print('Can not delete {}'.format(filename)) return False return True
c97b1d5418d086766c86f5ad6ebd07f91efb57e6
695,797
def get_node_proto(graph_proto, node_name): """Get a `NodeProto` from `GraphProto` by node name. Args: graph_proto: A `GraphProto`. node_name: Name of the node. Returns: A `NodeProto` or None. """ for node_proto in graph_proto.nodes: if node_proto.name == node_name: return node_proto return None
ddc1aebeb3450de8dd6dfa85b988997a163601b4
695,798
import warnings def warn(action): """Set warnings filter""" warnings.simplefilter(action) return action
143fb081685769b9d189c1600ca6a51c4a084d70
695,799
def fetch_project(api, project_name=None, project_id=None): """ fetch a project from the sb api. :param api: API object generated by sevenbridges.Api() :type api: Sevenbridges API Object :param project_name: name of a project to return e.g. 'forsure' :type project_name: string :param project_id: username/project name pair - e.g. 'doesnotexist/forsure' :type project_id: string :return: Project object from the sevenbridges api :rtype: sevenbridges.models.project.Project """ if project_id: project = api.projects.get(id=project_id) if not project.id: print( f"""Project {project_id} not found. Check spelling (especially trailing spaces)""" ) raise KeyboardInterrupt else: return project elif project_name: project_list = api.projects.query(name=project_name) if not project_list: print( f"""Project {project_name} not found. Check spelling (especially trailing spaces)""" ) raise KeyboardInterrupt else: return project_list[0] else: print("No project passed.") raise KeyboardInterrupt
f33d70367bd4a52ae0099a83110182afdb8862d5
695,800
def num_generator(num): """ We must ensure that we always send 2 bytes to control the frames.""" num = str(num) if len(num) == 1: return '0'+num elif len(num) == 2: return num else: print('There was a problem with the number generator')
74bd94124e149ad803cecdedb2e826dc64ad0ecb
695,801
import itertools def get_global_pbm_set(notes): """全ノート内のピッチ形状が同一か否かを返す。 """ # 各ノートからピッチ形状の項目を取り出して2次元リストにする。 all_pbm_2d = [note.pbm for note in notes if 'PBM' in note] # 1次元にする。 all_pbm_1d = itertools.chain.from_iterable(all_pbm_2d) # 選択範囲に含まれるピッチ形状の種類を列挙 pbm_types = set(all_pbm_1d) # 何種類あるかを返す。pbmが含まれない場合は0になるはず。 return pbm_types
fd5e4dbf797131ddb5cf93f5ae67e30e8e978fa6
695,802
def calculate_heartbeats(shb, chb): """ Given a heartbeat string from the server, and a heartbeat tuple from the client, calculate what the actual heartbeat settings should be. :param shb: server heartbeat numbers :param chb: client heartbeat numbers """ (sx, sy) = shb (cx, cy) = chb x = 0 y = 0 if cx != 0 and sy != '0': x = max(cx, int(sy)) if cy != 0 and sx != '0': y = max(cy, int(sx)) return x, y
e97569838c90c4f204204b3c23ce7871aa093c8a
695,803
def filter_any_answer(group): """Filter questions answered by anyone in group.""" answers = set() for person in group: for question in person: answers.add(question) return answers
5dca4f80e069bb3e9d9541145cbbc18eb22daf3f
695,804
def jokbo(realcards): """ return cards' priority written with integer :param realcards: dealer or player's cards :return: priority (written with integer) """ cards = realcards[:] for i in range(len(cards)): if cards[i][1] == "J": cards[i][1] = 11 if cards[i][1] == "Q": cards[i][1] = 12 if cards[i][1] == "K": cards[i][1] = 13 if cards[i][1] == "A": cards[i][1] = 14 if cards[0][1] == cards[1][1] != cards[2][1] == cards[3][1] == cards[4][1] or cards[0][1] == cards[1][1] == \ cards[2][1] != cards[3][1] == cards[4][1]: return 9 # 풀하우스 if cards[0][1] != cards[1][1] == cards[2][1] == cards[3][1] == cards[4][1] or cards[0][1] == cards[1][1] == \ cards[2][1] == cards[3][1] != cards[4][1]: return 30 # 포카드 if cards[0][1] == 2 and cards[1][1] == 3 and cards[2][1] == 4 and cards[3][1] == 5 and cards[4][1] == 14: if cards[0][0] == cards[1][0] == cards[2][0] == cards[3][0] == cards[4][0]: return 50 # 스트레이트 플러시 return 4 # A 2 3 4 5 (스트레이트) if cards[0][0] == cards[1][0] == cards[2][0] == cards[3][0] == cards[4][0]: return 5 # 플러시(같은 문자) if int(cards[1][1]) - int(cards[0][1]) == int(cards[2][1]) - int(cards[1][1]) == int(cards[3][1]) - int( cards[2][1]) == int(cards[4][1]) - int(cards[3][1]) == 1: if cards[0][1] == 10 and cards[1][1] == 11 and cards[2][1] == 12 and cards[3][1] == 13 and cards[4][1] == 14: return 100 # 로얄 스트레이트 플러시 if cards[1][1] - cards[0][1] == cards[2][1] - cards[1][1] == cards[3][1] - cards[2][1] == cards[4][1] - \ cards[3][1] == 1: return 50 # 스트레이트 플러시 return 4 # 스트레이트 for i in range(0, 3): if cards[i][1] == cards[i + 1][1]: if cards[i + 1][1] == cards[i + 2][1]: return 3 # 트리플 for j in range(i + 1, 4): if cards[j][1] == cards[j + 1][1]: return 2 # 투페어일경우 return 1 # 원페어일경우 else: return 0
ab81e38b3c2620e74ee0154df445df2088111d0a
695,805
import operator def sort(word_freq, func): """ Takes a dictionary of words and their frequencies and returns a list of pairs where the entries are sorted by frequency """ return func(sorted(word_freq.iteritems(), key=operator.itemgetter(1), reverse=True), None)
829c9bf83cb1058b4af3301444793924f919d0f9
695,806
from typing import Iterable def clean_key(k: Iterable[str]) -> str: """ Utility function for formatting keys. This is a no-op if the input is a string, otherwise expects an iterable of strings, which it joins with a period. """ return k if isinstance(k, str) else '.'.join(k)
78f241056141a2549ae981a7ab18268e23fd2b0a
695,807
def _get_scaffold(captured_scaffold_fn): """Retrieves the Scaffold from `captured_scaffold_fn`.""" scaffold_fn = captured_scaffold_fn.get() if not scaffold_fn: return None scaffold = scaffold_fn() if scaffold is None: raise ValueError( 'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed') return scaffold
dbbb3184a765d785169f66fa3385f15552aa34d4
695,808
def getFieldShortName(field_name): """ Simplifies `field_name` in the exported dataframe by removing Watson Assistant prefixes """ return field_name.replace('request.','').replace('response.','').replace('context.system.','').replace('context.','')
42c05ebef5d6ec0fe23ffa789f67a8aa37f364bd
695,809
def hyperlink_title(body, docpath, docname): """ Hyperlink titles by embedding appropriate a tag inside h1 tags (which should only be post titles). """ body = body.replace("<h1>", '<h1><a href="%s.html">' % (docpath + docname), 1) body = body.replace("</h1>", "</a></h1>", 1) return body
ae316226ef64a45c97cd6d094617edc2624d1cc8
695,810
def calc_host_nums(netmask: str) -> int: """ Calculates the number of possible IP addresses and the number of hosts and returns an int type number. """ return 2 ** sum([i.count("0") for i in netmask])
7429efa501999d1c59a38ac7ecad0a6830dedfbe
695,811
async def run_query(query_runner, sample, semaphore, pool): """Run query with limit on concurrent connections.""" async with semaphore: return await query_runner.run(sample, pool)
d9005ee36ab308d6d7611af4644198ba7ccf8b5b
695,812
def Home(): """List all available api routes.""" return ( f"Available Routes:<br/><br/>" f"-Date and Precipitation information <br/>" f"/api/v1.0/precipitation<br/><br/>" f"-Station information<br/>" f"/api/v1.0/stations<br/><br/>" f"Temperature Observation for previous year for most active station<br/>" f"/api/v1.0/tobs<br/><br/>" f"-min, average, max temperature for a starting date e.g. /api/v1.0/2016-01-01<br/>" f"/api/v1.0/&ltstart_date&gt<br/><br/>" f"-min, average, max temperature for a date range e.g. /api/v1.0/2016-01-01/2016-07-21<br/>" f"/api/v1.0/&ltstart_date&gt/&ltend_date&gt" )
c41f3409c1b90db5345fd1f5824dffaaee156911
695,813
import numpy def gaussian_smooth(read_list, degree=5): """Smoothing function for raw bamliquidator output.""" window = degree * 2 - 1 weight = numpy.array([1.0] * window) weight_gauss = [] for i in range(window): i = i - degree + 1 frac = i / float(window) gauss = 1 / (numpy.exp((4 * (frac)) ** 2)) weight_gauss.append(gauss) weight = numpy.array(weight_gauss) * weight smoothed = [0.0] * (len(read_list) - window) for i in range(len(smoothed)): smoothed[i] = sum(numpy.array(read_list[i:i+window]) * weight) / sum(weight) smoothed = [0, 0, 0, 0, 0] + smoothed + [0, 0, 0, 0] # return an array of the same length return smoothed
9c1e21480c2faea403e512325bd748dcc8f70405
695,814
import sys def is_tty_supported() -> bool: """ Taken with appreciation from Django codebase https://github.com/django/django/blob/0d67481a6664a1e66d875eef59b96ed489060601/django/core/management/color.py Permissable use under the Django BSD License """ # isatty is not always implemented, https://code.djangoproject.com/ticket/6223. return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
4d62694a919b2bbf3b293ee633bf1d94ef440643
695,815
import toml def get_version(poetry="pyproject.toml") -> str: """Get the version of the package from pyproject file""" with open("pyproject.toml", "r", encoding="utf-8") as f: data = toml.loads(f.read()) return data["tool"]["poetry"]["version"].strip()
56c4d658dbca656bd5964080036c9ca3df079f0d
695,817
def cp_lt_stage_aware(cp0, cp1): """ Less-than comparison of two CriticalPath objects in stage-aware mode """ if cp0.stage is None: # CriticalPath with None stage is always shorter than any other # critical path return True elif cp1.stage is None: return False return (cp0.stage < cp1.stage) or (cp0.stage == cp1.stage and cp0.comb_latency < cp1.comb_latency)
783ee58a893bb52cd515605728ab4df743661052
695,818
import requests def load_dynamic_nba_2022_html(): """Load NBA 2022 team summary html from Basketball Reference""" html_url = 'https://www.basketball-reference.com/leagues/NBA_2022.html' result = requests.get(html_url).content return result
0b856dc170337fabf13fa65cd19966b043edc39d
695,819
def copyfile(infile, outfile, chunksize=8192): """Read all data from infile and write them to outfile. """ size = 0 while True: chunk = infile.read(chunksize) if not chunk: break outfile.write(chunk) size += len(chunk) return size
62d9d475ac7d9d93f92c2631ec405a8c4981d65e
695,820
def nextGreaterElement_optimal(nums1, nums2): """ Sizes of both arrays are small enough, so we just can do brute-force solution in O(m * n), where n is size of nums2 and m is size of nums1. If we want to solve this problem in O(n) time, it is not so simple. The idea is to traverse nums2 and keep stack with decreasing order of elements. When we try to add element, if it is less than last element of stack, we just add it. If it is more than the last element, we extract it from stack and also put it inside dic: correspondence between numbers and its next greater element: we need it, because we have also nums1, which we need to traverse after. Next, when we traverse nums1 we can use function .get(num, -1), which will return answer for num if it is inside dictionary and -1 if it was not found. Complexity: Time and space complexity is O(n + m). """ hashmap, stack = {}, [] # Variant 1 - From RHS to LHS for num in nums2[::-1]: while stack and num > stack[-1]: stack.pop() if stack: hashmap[num] = stack[-1] stack.append(num) # Variant 2 - From LHS to RHS for num in nums2: while stack and num > stack[-1]: hashmap[stack.pop()] = num stack.append(num) return [hashmap.get(num, -1) for num in nums1]
6c0bff5f6e320c53134c13bddd74b8ad60e794c5
695,821
def mul(value, arg): """Multiplication >>> mul(2, 2) 4 """ return value * arg
50e8f39e52c754c8448f5ce32c040e3e5106af75
695,822
from typing import Optional from typing import Dict def get_first_api_gateway(api_gateway_client, api_gateway_name: str) -> Optional[Dict]: """ Get the first API Gateway with the given name. Note, that API Gateways can have the same name. They are identified by AWS-generated ID, which is unique. Therefore this method lists all API Gateways and returns the first one with matching name. If no matching name is found, None is returned. Args: api_gateway_client: API Gateway V2 Client. api_gateway_name: Name of the API Gateway function. Returns: Either a dictionary containing the get_api response, or None if it doesn't exist """ response = api_gateway_client.get_apis() apis = response.get("Items", []) # Limit the number of times we page through the API. for _ in range(10): # Try finding the match before getting the next batch of api gateways from AWS for api in apis: if api.get("Name") == api_gateway_name: return api # Break out of the loop if there's no next batch of api gateways next_token = response.get("NextToken") if not next_token: break # Get the next batch of api gateways using next_token response = api_gateway_client.get_apis(NextToken=next_token) apis = response.get("Items", []) # Return None if API Gateway with such name was not found return None
fee768f319f2670ecaf9f8c6c66fffc62bcd66f3
695,823
def _arg_ulen1(dvi, delta): """Unsigned length *delta*+1 Read *delta*+1 bytes, returning the bytes interpreted as unsigned.""" return dvi._arg(delta+1, False)
11c34f27864b75f3b56f1a023a5226598b3845ac
695,824
def is_sorted(items): """Return a boolean indicating whether given items are in sorted order. Running time: O(n) because at most loop through the entire array Memory usage: O(1) because not creating any new space and everything is done in place""" for i in range(len(items) - 1): # if next item is smaller than current, then list not sorted if items[i+1] < items[i]: return False return True
59094cd421c104509e54d7c8a40e27b4fcb97d63
695,825
def remove_empty(data): """Removes empty items from list""" out = [] for item in data: if item == '': continue out.append(item) return out
9ef46381bb76846c92375f47eb481a26359b1d92
695,827
def api_settings(settings): """ Pytest fixture that sets a few default Django settings for the API tests in this module. Returns the `settings` object. Doing setup like this here via a fixture seems slightly better than putting this in the `test` settings module--the relevant settings are closer to the tests that use them. Just have to make sure to include this fixture in all of the tests that need them. """ settings.REST_FRAMEWORK['PAGINATE_BY_PARAM'] = 'limit' settings.REST_FRAMEWORK['PAGINATE_PARAM'] = 'offset' settings.REST_FRAMEWORK['SEARCH_PARAM'] = 'search' settings.REST_FRAMEWORK['SEARCHTYPE_PARAM'] = 'searchtype' settings.REST_FRAMEWORK['MAX_PAGINATE_BY'] = 500 settings.REST_FRAMEWORK['PAGINATE_BY'] = 500 return settings
3a0ef2592400b45205279a6039a7d56750893ddf
695,828
import os import sys def resource_path(relative_path: str) -> str: """ Get the absolute path to a resource in a manner friendly to PyInstaller. PyInstaller creates a temp folder and stores path in _MEIPASS which this function swaps into a resource path so it is available both when building binaries and running natively. """ deposit_cli_path = os.getenv('DEPOSIT_CLI_PATH') if deposit_cli_path != None: return os.path.join(deposit_cli_path, 'lib/python3.8/site-packages', relative_path) try: base_path = sys._MEIPASS # type: ignore except Exception: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path)
5abb43eca1c241590867542693a71461074e068c
695,829
import re def MatchPattern(file_path, pattern): """Looks for matching specified pattern in the file line by line. Args: file_path: file path. pattern: pattern for matching. Returns: the match or None. """ try: with open(file_path, "r") as f: prog = re.compile(pattern) for line in f: match = prog.match(line) if match: return match except IOError: pass except Exception: pass return None
1ab4f7cf675c3be72bd6b01fb6f6c7fee2668275
695,830
import numpy def fractional_anisotropy_from_eigenvalues(evals): """ Taken from dipy/reconst/dti.py see for documentation :return: """ ev1, ev2, ev3 = evals denom = (evals * evals).sum(0) if denom > 1e-9: fa = numpy.sqrt( 0.5 * ((ev1 - ev2) ** 2 + (ev2 - ev3) ** 2 + (ev3 - ev1) ** 2) / (denom) ) else: fa = 0.0 return fa
e5f06b3cad9f6fb74f8f2407faf2851b87e3301a
695,831
def makeAssessList(df): """ makeAssessList produces a list of assessment weightings for the file pointed to by df (a pandas datafile) Output: list of integer values """ nAssess = int(df.iloc[4,2]) # number of assessments #make list of assessment values assList=[] for index in range(4,4+(nAssess)): cwkTxt=df.iloc[9,index] cwkVal=float(cwkTxt.strip("%")) assList.extend([cwkVal]) return assList
0b50cfb4f5d19649eb55080b7e3c6f2fd246163c
695,832
def rem_item_from_list(item, string): """ Removes all occurrences of token from string. If no occurrences of items are in string, nothing is removed.""" return string.replace(item, "")
5b0406c57aed3b786c4f20501be80e18f945928f
695,833
def post_message_commands(channel, text, thread_ts): """ Check slack message """ assert text == "Tracking for following tests:\n*test* (2100-01-01 00:00:00)\n" return {"ok": "ok"}
20e1a74b8e71c77c79fff60488d5401300ea7ab2
695,834
def call_behavior_action_compute_output(self, inputs, parameter): """ Get parameter value from call behavior action :param self: :param inputs: token values for object pins :param parameter: output parameter to define value :return: value """ primitive = self.behavior.lookup() inputs = self.input_parameter_values(inputs=inputs) value = primitive.compute_output(inputs, parameter) return value
48d3167657afaa89c86754e59380b63ab5874607
695,835
from bs4 import BeautifulSoup def strip_html(string: str): """ Use BeautifulSoup to strip out any HTML tags from strings. """ return BeautifulSoup(string, "html.parser").get_text()
796fc52ddd303906c7fd217275cb2a897e76767c
695,836
def email_sort(email): """ Split the given email address into a reverse order tuple, for sorting i.e (domain, name) """ return tuple(reversed(email[0].split('@')))
d841ea1f468d11e89df5d493ac74c28705bd6c27
695,837
from typing import List import math def euclidian_distance(a: List[float], b: List[float]) -> float: """ Returns the euclidian distance between two N-dimensional points """ return math.sqrt(sum((x - y) ** 2 for (x, y) in zip(a, b)))
57aac940e12c64978c7ecc4aea567653d6ee780a
695,838
def canWin2(s): """ :type s: str :rtype: bool """ if not s or len(s)<2: return False for i in range(len(s)-1): if s[i]=='+' and s[i+1]=='+': temp=s s=s[:i]+'--'+s[i+2:] if not canWin2(s): return True s=temp return False
471f21459e0aaf930ae2dd2bef79200523c9f29a
695,839
def cu_gene(obj): """Extracts the gene name from a codon usage object in std. format""" return str(obj.Gene).lower()
5df9facb6de7b954efe8e273f83c4f9cc9b2725a
695,840
import json def load_config(config_filename): """ Load the population config for this simulation. Args: config_filename (str): Filename for the simulation's config. Returns: Dict containing data from the config file. """ if not config_filename: return {} config_data = [] with open(config_filename) as config_file: config_data = json.loads(config_file.read()) if not config_data: raise RuntimeError("No Config Loaded: {}".format(config_filename)) return config_data
fffa9ceade1f83ff142da2b8de6484647e165dd8
695,841
def prune_rare_cats(df): """Remove any categories that have less than 5 restaurants""" new_df = df.copy() categories = [] [categories.append(item) for item in list(df.columns) if 'category_' in item] [new_df.drop(columns=category, inplace=True) for category in categories if df[category].sum() < 5] return new_df
475d3de9b5b3937ce856bb303c6582d29d070d2b
695,842
import torch def extract_slice_from_mri(image, index_slice, view): """ This is a function to grab one slice in each view and create a rgb image for transferring learning: duplicate the slices into R, G, B channel :param image: (tensor) :param index_slice: (int) index of the wanted slice :param view: :return: To note, for each view: Axial_view = "[:, :, slice_i]" Coronal_view = "[:, slice_i, :]" Sagittal_view= "[slice_i, :, :]" """ # reshape the tensor, delete the first dimension for slice-level image_tensor = image.squeeze(0) # sagittal if view == 0: slice_select = image_tensor[index_slice, :, :].clone() # coronal elif view == 1: slice_select = image_tensor[:, index_slice, :].clone() # axial elif view == 2: slice_select = image_tensor[:, :, index_slice].clone() else: raise ValueError("This view does not exist, please choose view in [0, 1, 2]") extracted_slice = torch.stack((slice_select, slice_select, slice_select)) return extracted_slice
84b6120aab497f03347f5a76ba7a42abe8bb4741
695,843
def get_buildings_in_buffer(buf, buildings, ids, idx): """ Input the buffer polygon and building geometries to check if the building intersects with the buffer. Return all buildings within the buffer (based on ID). An R-tree is used to speed up things. """ bld_in_buffer = {} for i in idx.intersection(buf.bounds): if buf.intersects(buildings[i]): bld_in_buffer[ids[i]] = buildings[i] return bld_in_buffer
ffb55879125997f824965998999225311c221c33
695,844
def istag(arg, symbol='-'): """Return true if the argument starts with a dash ('-') and is not a number Parameters ---------- arg : str Returns ------- bool """ return arg.startswith(symbol) and len(arg) > 1 and arg[1] not in '0123456789'
fd8c1edcf4289177883e3fe9efef626128bc583e
695,845
def _jupyter_nbextension_paths(): """Called by Jupyter Notebook Server to detect if it is a valid nbextension and to install the widget Returns ======= section: The section of the Jupyter Notebook Server to change. Must be 'notebook' for widget extensions src: Source directory name to copy files from. Webpack outputs generated files into this directory and Jupyter Notebook copies from this directory during widget installation dest: Destination directory name to install widget files to. Jupyter Notebook copies from `src` directory into <jupyter path>/nbextensions/<dest> directory during widget installation require: Path to importable AMD Javascript module inside the <jupyter path>/nbextensions/<dest> directory """ return [{ 'section': 'notebook', 'src': 'nbextension/static', 'dest': 'graph_notebook_widgets', 'require': 'graph_notebook_widgets/extension' }]
84850ab88ec4f43cd035258ad99070133ec07077
695,846
def sparse_max(A,B): """Max of two sparse matrices ====== Computes the elementwise max of two sparse matrices. Matrices should both be nonegative and square. Parameters ---------- A : (n,n) scipy sparse matrix First matrix. B : (n,n) scipy sparse matrix Second matrix. Returns ------- C : (n,n) scipy sparse matrix Sparse max of A and B """ I = (A + B) > 0 IB = B>A IA = I - IB return A.multiply(IA) + B.multiply(IB)
93a18d367067f7d2f99212f078e4013525414adf
695,847
def refine_positions(position, wfn, x_grid, y_grid): """ Perform a least squares fitting to correct the vortex positions.""" x_pos, y_pos = position # If at edge of grid, skip the correction: if x_pos == len(x_grid) - 1: return x_pos, y_pos if y_pos == len(y_grid) - 1: return x_pos, y_pos x_update = (-wfn[x_pos, y_pos] - wfn[x_pos, y_pos + 1] + wfn[x_pos + 1, y_pos] + wfn[x_pos + 1, y_pos + 1]) / 2 y_update = (-wfn[x_pos, y_pos] + wfn[x_pos, y_pos + 1] - wfn[x_pos + 1, y_pos] + wfn[x_pos + 1, y_pos + 1]) / 2 c_update = (3 * wfn[x_pos, y_pos] + wfn[x_pos, y_pos + 1] + wfn[x_pos + 1, y_pos] - wfn[ x_pos + 1, y_pos + 1]) / 4 Rx, Ry = x_update.real, y_update.real Ix, Iy = x_update.imag, y_update.imag Rc, Ic = c_update.real, c_update.imag det = 1 / (Rx * Iy - Ry * Ix) delta_x = det * (Iy * Rc - Ry * Ic) delta_y = det * (-Ix * Rc + Rx * Ic) x_v = x_pos - delta_x y_v = y_pos - delta_y # Return x and y positions: return (y_v - len(y_grid) // 2) * (y_grid[1] - y_grid[0]), (x_v - len(x_grid) // 2) * (x_grid[1] - x_grid[0])
49e7437eaca961d1576887829caf16d90567000d
695,848
def list_(): """Lists the existing entries.""" return "Sorry I forgot it all :("
8fa581b896ecc0118265563d1ca2f7ad3d57601b
695,849
import requests def PostcodeGeocode(postcodes): """" This is to return coordinates for a series of postcodes. Each request can only handle 100 postcodes, so this will paginate through until complete. :param postcodes: An array of postcodes to be geocoded :return: A zipped array of lat/long coordinates """ lat = [] long = [] endpoint = 'http://api.postcodes.io/postcodes/' # Send the request if len(postcodes)>100: start = 0 end = 100 while start < len(postcodes): batch = postcodes[start:end] results = requests.post(endpoint, {"postcodes": batch}).json() # Parse results for i in results['result']: lat.append(i['result']['latitude']) long.append(i['result']['longitude']) start += 100 end += 100 if len(postcodes)-start < 100: end = len(postcodes) return lat, long else: results = requests.post(endpoint, {"postcodes":postcodes}).json() # Parse results for i in results['result']: lat.append(i['result']['latitude']) long.append(i['result']['longitude']) return lat, long
d9274025d1b834fa6d33b6341fde31c408bcd825
695,850
def get_traces_from_log(log): """ Desc. read the traces from the log Used perform_pattern_abstraction(), abstraction_suppot_functions.py Input log, object of log imported by PM4py Output list of traces BY SAUD """ traces = [] for trace in log: t = [l['Activity'] for l in trace] traces.append(t) return traces
ea0a17dba6eb06668dab4e99b191f1c3e19c058f
695,851
def custom_feature_set(): """ Return a custom set of feature operators. Use this function to expose any custom ruleset logic you want to add that doesn't rely on the Operator model system. This function will be called by all agents created by the system so you can use it for custom logic but we should probably turn it off if we ever have a production server. """ return []
195f53ec296138d0018fa0886d97f3ad7fc031c7
695,852
def weeks_elapsed(day1: int, day2: int) -> int: """ def weeks_elapsed(day1, day2): (int, int) -> int day1 and day2 are days in the same year. Return the number of full weeks that have elapsed between the two days. >>> weeks_elapsed(3, 20) 2 >>> weeks_elapsed(20, 3) 2 >>> weeks_elapsed(8, 5) >>> weeks_elapsed(40, 61) """ max_day = max(day1, day2) min_day = min(day1, day2) days_in_between = max_day - min_day return days_in_between // 7
856fd5439dbdb50581e9a41f30c66b6fce3a7c00
695,853