content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Sequence def chg_modelqa(models: Sequence, ordinal: int, **kwargs) -> int: """ Information on the quality of the curve fit the intercept the ordinal date. Args: models: sorted sequence of CCDC namedtuples that represent the pixel history ordinal: standard python ordinal starting on day 1 of year 1 Returns: curve_qa or 0 """ if ordinal <= 0 or not models: return 0 for m in models: if m.start_day <= ordinal <= m.end_day: return m.curve_qa return 0
9d370dcecccd67204d2c88edf5d8d426f72ef2dd
30,265
def masktodecimal(upper_limit, lower_limit, num, k): """ This function converts mask to decimal using equation m = a +( b − a/2 k − 1) · m  paper link = "https://www.researchgate.net/publication/277604645_Genetic_Algorithm_using_Theory_of_Chaos" ... Attributes ---------- lower_limit:int lower limit for binary bits upper_limit:int upper_limit for binary bits num:int defined as m* in the equation k:int defined as k in the equation Returns ------- int: decimal number """ up = (num - lower_limit) * (2 ** k - 1) down = upper_limit - lower_limit return up / down
2836733796a2137576fe52f4e505ed64d894303a
30,266
import ipaddress def _ipv6_mac(network): """ Get the MAC address from an auto generated IPv6 address """ # Example address used in comments: fe80::a021:27ff:fe00:d8 address = ipaddress.ip_interface(network).ip # Fully written out fe80::a021:27ff:fe00:d8 → fe80:0:0:0:a021:27ff:fe00:d8 # Remove the first 64 bits binary_digits = bin(int(address))[2:].zfill(128) removed_prefix = binary_digits[64:] # We now have a021:27ff:fe00:d8 # Verify it is auto generated, bits 24-40 are 'fffe': if hex(int(removed_prefix[24:40], 2)).lower() != "0xfffe": return None # Flip bit 6 using a mask flipped_bit = "0" if removed_prefix[6] == "1" else "1" bit_flipped = removed_prefix[0:6] + flipped_bit + removed_prefix[7:] # Remove the inserted ff:fe in the middle mac_in_binary = bit_flipped[0:24] + bit_flipped[40:] mac_in_hex = hex(int(mac_in_binary, 2)) mac_in_hex_zero_padded = mac_in_hex[2:].zfill(12) mac_parts = list() for i in range(0, len(mac_in_hex_zero_padded), 4): mac_parts.append(mac_in_hex_zero_padded[i : i + 4]) return ".".join(mac_parts)
b72a21a1d8513fc1766f9efb43a2b33718575c7a
30,267
def eco(): """ EcoSim output options """ return { 1: ("ECO_1", "EcoSim outputs") }
718317f2366f0938107558355e202fb8a1f1f5bb
30,269
def add_python_data(tuple): """This function returns a Python data structure as well.""" return [ tuple.get(0), tuple.get(1), [ 'first', { 'key' : 'value' } ]]
d7a5fcf17205f31affdd56e258b9ca0379390f5b
30,270
import time def update_delay(alarm_time: str) -> int: """ Take an alarm time and calculates the time interval between the current time and the alarm time Keyword arguments: alarm_time (string) : Time of alarm in hh:mm as fetched from parameters in url Returns: delay (int) : time in seconds between current time and time of alarm """ alarm_seconds = (int(alarm_time[0:2]) *60*60) + (int(alarm_time[3:5])*60) ct_seconds = ((time.gmtime().tm_hour)*60*60)+((time.gmtime().tm_min)*60)+ time.gmtime().tm_sec if ct_seconds > alarm_seconds : delay = (24*60*60) - ct_seconds + alarm_seconds else: delay = alarm_seconds - ct_seconds return delay
6d938e4bef38fe53950613a487dd17d02f066235
30,275
def get(path): """ Read & return the contents of the provided `path` with the given `content`.""" with open(path, encoding="utf-8") as file: return file.read()
41bf610c2251e2b3bc31969a9a96a0de14a5c123
30,276
import yaml def yaml_read(path): """ Reads yaml from disk """ with open(path, "r") as fp: return yaml.load(fp, Loader=yaml.FullLoader)
838b286792dfa0a38a385fe38aafdef92945e263
30,278
import functools def handle_exception(function): """ :param function: :return: """ @functools.wraps(function) def func(*args, **kwargs): try: return function(*args, **kwargs) except Exception as exe: return {"function": function.__name__, "message": exe} return func
94e15674622409b873af142b8f72dca72639ec90
30,279
def expand_gray(x): """Reverse the effect of function compress_gray() Given a compressed 'gray' Golay code or cocode word, the function returns the original 'gray' word in code or cocode representation. """ return (x & 0x0f) + ((x & 0x30) << 6)
0c7078733eff4f57f2463d49f755de131817a463
30,281
def chunks(obj, size, start=0): """Convert `obj` container to list of chunks of `size`.""" return [obj[i : i + size] for i in range(start, len(obj), size)]
c523a346906b85c121bf67a56a806d51f639eeb3
30,282
def http_verb(dirty: str) -> str: """ Given a 'dirty' HTTP verb (uppercased, with trailing whitespaces), strips it and returns it lowercased. """ return dirty.strip().lower()
74601bb0d5e22f632612fbf63924e27777ce8bf3
30,283
def color_index(image, color): """Find the color index""" palette = image.getpalette() palette_colors = list(zip(palette[::3], palette[1::3], palette[2::3])) index = palette_colors.index(color) return index
57668438bc6569d4892b9a5d2cf7230b9b72f555
30,284
def isEdgeLocalizedWithPostProcessingAndLaundering(edge_id, edge, operation): """ APPLIES: SELECTION A 'inclusion rule' that includes AntiForensic, PostProcessing and Laundering Masks. Excludes Output, TimeAlteration, Transforms, Donors, and DeleteAudioSample. 'Blue' links override. :param edge_id: :param edge: :param operation: :return: @type Operation """ return edge['recordMaskInComposite'] == 'yes' or \ (edge['op'] not in ['TransformSeamCarving', 'TransformCrop', 'Donor', 'TransformDownSample', 'TransformReverse', 'DeleteAudioSample'] and \ ('empty mask' not in edge or edge['empty mask'] == 'no') and \ operation.category not in ['Output','TimeAlteration'])
4bf6513e6702618cf71c167483aa259622ee462a
30,286
import copy def trim_audio(A, start, stop): """Trim copy of MultiSignal audio to start and stop in seconds.""" B = copy.deepcopy(A) assert start < len(B) / B.fs, 'Trim start exceeds signal.' for c in range(B.channel_count): B.channel[c].signal = B.channel[c].signal[ int(start * B.fs): int(stop * B.fs)] return B
722a6b40bed4b6e441cddc5e0a0c5ca75207f701
30,288
import math def fermats_composite (n): """ Use fermats sum of squares algorithm to quickly derive a composite factor of n assuming n is neither odd or prime. for the first condition, it may sometimes be necessary to run a certain amount of trial divisions and then ensure that n is now odd before running a round of fermat. Arguments: n (:int) - the number to pull a composite from Returns: (:int) - one composite that evenly divides n Examples: >>> fermats_composite(932179818119999999999999999999) 965510746754507 NOTES: Domain error for 40 or more digits, also very slow for 30 digits or more. To fix, use a JIT decorator so that this function bypasses the byte-code compiler and runs directly as a C extension. Author: p4r4xor """ if not n % 2: return x = math.ceil(math.sqrt(n)) # this binomial seems to work the best # out of all the ones tested y = x*x - n while not math.sqrt(y).is_integer(): # add one to x and re evaluate the binomial # essentially moving up from the square root # of x until we find a valid factor # without testing divisibility because # of the sum of squares theory proved by fermat x += 1 y = x*x - n return x + math.sqrt(y)
1d24df09100f5aebe106ee016fc4edf46bfad199
30,289
def StatsAlerter(values, maxThreshold, email_led_alert): """ This function checks if max is above threshold and raises the alerts :param values: Max values which need to be checked with the threshold value :param maxThreshold: Threshold value when crossed raises the flags :param email_led_alert: list of email and led alert :return: The dictionary which has the relevant alert flag for respective max value """ checkAndAlert_dict = {} for value in values: email_led_alert_dict = {} # reset for every max value if value > maxThreshold: email_led_alert_dict["emailAlert"] = email_led_alert[0] # Set the default values called from alert function email_led_alert_dict["ledAlert"] = email_led_alert[1] # Set the default values called from alert function checkAndAlert_dict[value] = email_led_alert_dict # append the dictionary else: email_led_alert[0] = email_led_alert[1] = False # max value is less than the Threshold, manually reset the alerts email_led_alert_dict["emailAlert"] = email_led_alert[0] email_led_alert_dict["ledAlert"] = email_led_alert[1] checkAndAlert_dict[value] = email_led_alert_dict # append the dictionary return checkAndAlert_dict
11971ac990f1d966dbe6a7cc298a25b668a52d08
30,290
from sys import version def get_release_number(): """ Gets the current release version. """ return version
dc4dcf9a9e3b714c34af159e409e593f733984f9
30,294
def all_values_unique(d): """Return whether no value appears more than once across all value sets of multi-valued mapping `d`.""" seen = set() for multivalue in d.values(): if any(v in seen for v in multivalue): return False seen.update(multivalue) return True
a4663130a72e88b77dc47878165149fc35b50cec
30,295
def calculate_future_value(present_value, interest_rate, compounding_periods, years): """ Calculates the future value of money given the present_value, interest rate, compounding period, and number of years. Args: present_value (float): The present value interest_rate (float): The interest rate periods (int): The compounding period years (int): The number of years Returns: The future value of money. """ future_value = present_value * ((1 + (interest_rate / compounding_periods))**(compounding_periods * years)) future_value_formatted = round(future_value, 2) return future_value_formatted
80578a52a7339e647846b342ff44e0593351a204
30,296
def wayburnSeader2Function(x): """ subject to −500 ≤ 500. The global minimum is located at x∗ = f{(0.2, 1), (0.425, 1)}, f(x∗) = 0. """ x1 = x['IndPosition0'] x2 = x['IndPosition1'] wayburn = (1.613 - 4*(x1 - 0.3125)**2 - 4*(x2 - 1.625)**2)**2 + (x2 - 1)**2 return wayburn
98e75f819e54edaede607b4c7776e2a81851cd26
30,297
import argparse def get_parameters(): """ Parse script arguments """ parser = argparse.ArgumentParser(prog='fs_seqs_executer.py') parser.add_argument('--exec_dir', type=str, default="build_test") parser.add_argument('--path_to_json', type=str, default="seqs") # Assume we are on exec dir parser.add_argument('--log_folder_path', type=str, default="logs") # This folder must exist parser.add_argument('--protocol', type=str, default="tcp") parser.add_argument('--save_log', type=bool, default=True) return parser.parse_args()
d42875100e0dc237946eb4eae6b2db0231921733
30,298
import os def get_reference_from_pysam_alignmentFile( pysam_AlignmentFile, ignore_missing=False): """Extract path to reference from pysam handle Args: pysam_AlignmentFile (pysam.AlignmentFile) ignore_missing(bool) : Check if the file exists, if not return None Returns: path : path to bam file (if exists or ignore_missing is supplied) or None """ try: for x in pysam_AlignmentFile.header.as_dict()['PG']: if x.get('ID') != 'bwa': continue for argument in x.get('CL').split(): if (argument.endswith('.fa') or argument.endswith('.fasta') or argument.endswith( '.fasta.gz') or argument.endswith('.fa.gz')) and (ignore_missing or os.path.exists(argument)): return argument except Exception as e: pass
e9bcb799bdc34f0d2f1266bf5f7c57a14494047f
30,299
def align(value, m): """ Increase value to a multiple of m """ while ((value % m) != 0): value = value + 1 return value
9171b62c71ae21b51b2f6cffe9e9e2a6d4778446
30,300
def get_ip(request): """ Attempts to extract the IP number from the HTTP request headers. """ key = "REMOTE_ADDR" meta = request.META # Lowercase keys simple_meta = {k.lower(): v for k, v in request.META.items()} ip = meta.get(key, simple_meta.get(key, "0.0.0.0")) return ip
2ccb542312257a955b0e2a34c2f63693c818955d
30,303
def format_scraped_json(data: str) -> str: """Formatting hacks to be able to parse the JSON from the script. """ return '\n'.join( map(lambda x: x.strip().replace('\'', '"'), data.splitlines())).replace(',\n}', '\n}').replace(', }', '}')
e5c81bc0527f7514ef5baae61734c5a0cea69019
30,304
import re def sanitize_title(title): """Generate a usable anchor from a title string""" return re.sub("[\W+]","-",title.lower())
bb6b1cf9a5e0e9b9e896d35e6c7b77beb631ac64
30,305
def get_tag_wrapper(save_attrs, tag_class): """Creates and returns wrapper for tags Args: save_attrs: If parameter is true, attributes of tag will be save, default False. tag_class: Tag class. Returns Wrapper for tags. """ class Wrapper: def __init__(self): self._tag = tag_class self._save_attrs = save_attrs def starttag_string(self, tag): return tag.starttag_string(save_attrs=self._save_attrs) def endtag_string(self, tag): return tag.endtag_string() def create(self, name, attrs): return self._tag(name, attrs) return Wrapper()
bb8a8a12e205e86e3cb0d4c3a600d87ed0bafe8d
30,306
def sort_variables(var): """Returns sorted variables in an alphabetical order """ variables_list = list(var) variables_name_list = [variable.name for variable in variables_list] max_str_length = max(set(map(len, variables_name_list))) for variable in variables_list: variable_name = variable.name variable.name = variable_name.zfill(max_str_length) sorted_variables = sorted(variables_list, key=lambda x: x.name) for variable in sorted_variables: variable.name = variable.name.lstrip('0') return sorted_variables
41ac1606d3d2385fd3d55f1f0f9d0be49eb94313
30,307
from os.path import dirname, join def read_key(file_name): """Retrieves a GitHub API key from a file. Parameters ---------- file_name : str Returns ------- key : str """ # Reads the first line of a file containing the GitHub API key # Usage: key = read_key('gh_key') current_dir = dirname(__file__) file2 = "./" + file_name file_path = join(current_dir, file2) with open(file_path, 'r') as kf: key = kf.readline().rstrip() # remove newline & trailing whitespace return key
1df7773c5304d4170d8f60641ac99928f7390c68
30,308
from typing import Dict from typing import Any from typing import Tuple def set_process(node_id: str, process_def: Dict[str, Any], process_ref: str, process_name: str) \ -> Tuple[str, Dict[str, Any], str]: """Set the PLACEHOLDER to the given process name.""" process_def["process_id"] = process_name process_ref = process_ref.replace("PLACEHOLDER", process_name) return node_id, process_def, process_ref
811f54692df5d32307143b4a27c4337a89d0dbee
30,309
def dct2channels_last(image): """ Rearrange DCT image from [H,W] to [H//8, W//8, 64] """ assert len(image.shape) == 2, f"{image.shape}" assert image.shape[0] % 8 == 0, f"{image.shape}" assert image.shape[1] % 8 == 0, f"{image.shape}" block_view = (image.shape[0] // 8, 8, image.shape[1] // 8, 8) dct_shape = (image.shape[0] // 8, image.shape[1] // 8, 64) block_permute = 0, 2, 1, 3 result = image.reshape(block_view).transpose(*block_permute).reshape(dct_shape) return result
7752d50f08da1d5b63044138f50c52cc4187b49f
30,310
def IsGoodTag(prefixes, tag): """Decide if a string is a tag @param prefixes: set of prefixes that would indicate the tag being suitable @param tag: the tag in question """ for prefix in prefixes: if tag.startswith(prefix): return True return False
7c772b24c0b17257654fca0fb1e5d6c41ffbf9a9
30,311
import requests def get_submissions(course_id, quiz_id, headers): """Creates a small list of jsons that can be made into a file very easily then converted to a dataframe for exploration This particular design is just for grabbing pagniated information from the api and returning it in a useful way. Usage: This function requires that you have an API predefined. Most users won't notice this in the front end as we'll build the key into the program. If you're servicing in the future headers should look like: headers = {"Authorization":f"Bearer {api_key}"} Example ids used, not real jsons = get_submissions(400, 1000) with open("some_json_title.json", "w") as outfile: json.dump(data_set, outfile) """ r = requests.get(f"https://lambdaschool.instructure.com/api/v1/courses/{course_id}/quizzes/{quiz_id}/submissions", headers=headers) data_set = [] raw = r.json() for submission in raw["quiz_submissions"]: submission["course_id"] = course_id submission["quiz_id"] = quiz_id data_set.append(submission) if "next" in r.links.keys(): while "next" in r.links.keys(): r = requests.get(r.links["next"]["url"], headers=headers) raw = r.json() for submission in raw["quiz_submissions"]: submission["course_id"] = course_id submission["quiz_id"] = quiz_id data_set.append(submission) if "last" in r.links.keys() and r.links['current']['url'] == r.links['last']['url']: print('Done!') else: print("Just one page!") return data_set
ed7d813f80a5adce0d6450b565ecad0fb48fa200
30,312
import random def unique_order_id_generator(instance, range_from=100000000000, range_until=1000000000000): """ This is for a Django project with order_id field """ new_order_id = random.randrange(range_from, range_until) Klass = instance.__class__ qs_exists = Klass.objects.filter(unique_order_id=new_order_id).exists() if qs_exists: return unique_order_id_generator(instance) return new_order_id
76a999defa43a664698bf747d1272331e6c756ed
30,314
import random def integer(start, end, steps=1): """ Function integer Return a random integer Inputs: - start: minimum allowed value - end: maximum allowed value (inclusive) - steps: the interval from which to select the random integers Output: a string containing a random integer """ if type(steps) == float or steps < 1: steps = 1 return str(int(random.randrange(start, end + 1, steps)))
99333decb006a547229eaf7b950a803cc8a450b0
30,315
def bit_board_split(board): """Split a given board in a bit board for each player # Arguments board: matrix (7x7), required # Return Return a bit board of 56 of length, one for each player, which represents (in binary) the positions of the player on the board. """ p1_board, p2_board = 56*['0'], 56*['0']#56*['0'], 56*['0'] bit_index = 0 for _ in board: for value in _: if value == 1: p1_board[bit_index] = '1' elif value == -1: p2_board[bit_index] = '1' bit_index += 1 bit_index += 1 p1_board = int(str().join(p1_board), 2) p2_board = int(str().join(p2_board), 2) return p1_board, p2_board
776f334e5f122dd4df50bd6614a02a6de14675e5
30,316
import re def cleanlines(lines): """Remove comments and blank lines from splitlines output.""" # Clean comments. matchRE = re.compile('(.*?)(//|%|#)') for i in range(len(lines)): line = lines[i] match = matchRE.match(line) if match is not None: lines[i] = match.group(1) # Clean blank lines. return [x.strip() for x in lines if len(x.strip()) > 0]
2971936b5b7098983aad3af40c82d337f998f5a1
30,317
import torch def apply_trans(x, trans): """Apply spatial transformations to input Attributes: x (torch.Tensor): Input Tensor trans (torch.nn.Module): Spatial Transformer module Returns: torch.Tensor: Output Tensor """ x = x.transpose(2, 1) x = torch.bmm(x, trans) x = x.transpose(2, 1) return x
152210b5f544d524aff94039ca1cb33dfdf1f9f5
30,318
import zlib def _adler32(fname): """Compute the adler32 checksum on a file. :param fname: File path to the file to checksum :type fname: str """ with open(fname, 'rb') as f: checksum = 1 while True: buf = f.read(1024*1024*8) if not buf: break checksum = zlib.adler32(buf, checksum) return checksum
7e7b37d39cdd7dbd1795aa25b48460350e121dae
30,319
def event_is_virtual(event): """ Determine if event is virtual. Produces a boolean.""" return 'location' in event and \ '@type' in event['location'] and \ event['location']['@type'] == "VirtualLocation"
6fb246f65ff87e38fc19a7f619b7f1a521f49060
30,320
def time_str_from_datetime_str(date_string: str) -> str: """ Extracts the time parts of a datetime. Example: 2019-12-03T09:00:00.12345 will be converted to: 09:00:00.12345 :param date_string: :return: """ return date_string.split('T')[1]
cd08fc6cb55854cacf1620aea4b02692b7925cd7
30,322
def create_regcontrol_settings_commands(properties_list, regcontrols_df, creation_action='New'): """This function creates a list of regcontrol commands, based on the properties list and regcontrol dataframe passed Parameters ---------- properties_list regcontrols_df creation_action Returns ------- list """ regcontrol_commands_list = [] if properties_list is None: properties_list = ["vreg", "band"] for index, row in regcontrols_df.iterrows(): command_string = f"{creation_action} RegControl.{row['name']}" for property_name in properties_list: command_string = command_string + f" {property_name}={row[property_name]}" regcontrol_commands_list.append(command_string) return regcontrol_commands_list
5095ad7a3aa5ee1e8ada5f7e9333f5a7008511ca
30,323
def scope_guard(target): """Scope guard that forwards the target by the first argument""" def _decorator(wrapped_fn): def _wrapper(*args, **kwargs): with target: wrapped_fn(target, *args, **kwargs) return _wrapper return _decorator
6cbc9a9187b5af7982d935ff642956353e15e57c
30,324
def _getProfile(APIConn, screenName=None, userID=None): """ Get data of one profile from the Twitter API, for a specified user. Either screenName string or userID integer must be specified, but not both. :param APIConn: authenticated API connection object. :param screenName: The name of Twitter user to fetch, as a string. :param userID: The ID of the Twitter user to fetch, as an integer. Cannot be set if screenName is also set. :return tweepy.User: instance for requested Twitter user. """ assert ( screenName or userID ), "Expected either screenName (str) or userID (int) to be set." assert not ( screenName and userID ), "Cannot set both screenName ({screenName}) and userID ({userID}).".format( screenName=screenName, userID=userID ) if screenName: print("Fetching user: @{screenName}".format(screenName=screenName)) params = {"screen_name": screenName} else: print("Fetching user ID: {userID}".format(userID=userID)) params = {"user_id": userID} return APIConn.get_user(**params)
d41f48197c741a4a4f69dad47fc50c2f28fc30ee
30,325
def slicer(shp, idxs, var=None): """Obtain a list of slicers to slice the data array according to the selected data Parameters ---------- shp : tuple Data shape idxs : iterable Indexes of the selected data var : int, optional Data to be selected, in case of multidimensional sample, by default None Returns ------- slice Slices of the data """ # It is assumed that the first dimension is the samples slc = [] # Iterate over the datasets for idx in idxs: idx.sort() if not var: slc += [tuple([idx] + [slice(None)] * (len(shp) - 1))] else: slc += [tuple([idx] + [slice(None)] * (len(shp) - 2) + [var])] return tuple(slc)
cbe0d3ab7d376a97206dfe899550fd5345a30df4
30,328
def finding_to_json(finding, scan_id, finding_id, detailed=False): """ :param finding: The vulnerability :param scan_id: The scan ID :param finding_id: The vulnerability ID :param detailed: Show extra info :return: A dict with the finding information """ summary = {'id': finding_id, 'href': '/scans/%s/kb/%s' % (scan_id, finding_id)} if detailed: # Get all the data from w3af summary.update(finding.to_json()) # Add the hrefs to the traffic traffic_hrefs = [] for response_id in summary['response_ids']: args = (scan_id, response_id) traffic_href = '/scans/%s/traffic/%s' % args traffic_hrefs.append(traffic_href) summary['traffic_hrefs'] = traffic_hrefs else: # Support findings without a URL url = finding.get_url().url_string if finding.get_url() else None summary.update({'name': finding.get_name(), 'url': url}) return summary
46f34786510937c815e25a282606baf3e1328b99
30,329
def extract_features_in_order(feature_dict, model_features): """ Returns the model features in the order the model requires them. """ return [feature_dict[feature] for feature in model_features]
adc067d35ae61cdd4b5bf0a4199662a6d94ce6f9
30,330
def fix_trailing_doubles(liz): """ Workaround für das "Problem" der doppelten Kommentarzeile am Schluß ... """ last = None for i in range(len(liz)-1, -1, -1): if last is None: if liz[i]: last = liz[i] elif liz[i]: if liz[i] == last: del liz[i] return 1 break return 0
aab0a289fda13fc691728d960e0ed87bb310b3c0
30,331
def _IndentString(source_string, indentation): """Indent string some number of characters.""" lines = [(indentation * ' ') + line for line in source_string.splitlines(True)] return ''.join(lines)
f85c2e18448497edcd764068ae9205ec4bbaec5d
30,335
def create_success_response(return_data): """ Creates a standard success response used by all REST apis. """ count = 1 if isinstance(return_data, list): count = len(return_data) return { 'meta': { 'error': False, 'count': count }, 'data': return_data }
0e1252317ebd838b03d680125b22ce1bf67c2c66
30,338
def get_wage(x): """extracts the hourly wage from the returned HTML; verbose because John sucks at regular expressions """ return float(x.split(">")[1].split("<")[0].replace("$","").replace("/hr",""))
f47bd5a6c30a6334a04e78c29e1ce54878943ceb
30,339
def add_feature(df_features, df_addition, columnName): """ Add a new feature to the df_feature DataFrame (output of assemble_timeseries_input()). df_addition must be a dataframe with the same shape as the nest counts. The index of df_features will be used as indexer into the df_addition DataFrame. """ values = list() for (site_id, species, year) in df_features.index: values.append(df_addition.loc[(site_id, species),year]) df_features = df_features.assign(**{columnName: values}) # http://stackoverflow.com/a/41759638/1922650 return(df_features)
6c497e2de09871fcec0f58827cc2d21739ca8e9e
30,341
def GetMissingOwnerErrors(metrics): """Check that all of the metrics have owners. Args: metrics: A list of rappor metric description objects. Returns: A list of errors about metrics missing owners. """ missing_owners = [m for m in metrics if not m['owners']] return ['Rappor metric "%s" is missing an owner.' % metric['name'] for metric in missing_owners]
97deb68f1412b371ca1055e8a71a53408d8915d1
30,342
import torch def generate_batch(batch): """ Since the text entries have different lengths, a custom function generate_batch() is used to generate data batches and offsets, which are compatible with EmbeddingBag. The function is passed to 'collate_fn' in torch.utils.data.DataLoader. The input to 'collate_fn' is a list of tensors with the size of batch_size, and the 'collate_fn' function packs them into a mini-batch. Pay attention here and make sure that 'collate_fn' is declared as a top level def. This ensures that the function is available in each worker. Output: text: the text entries in the data_batch are packed into a list and concatenated as a single tensor for the input of nn.EmbeddingBag. offsets: the offsets is a tensor of delimiters to represent the beginning index of the individual sequence in the text tensor. label: a tensor saving the labels of individual text entries. """ label = torch.tensor([entry[0] for entry in batch]) text = [entry[1] for entry in batch] offsets = [0] + [len(entry) for entry in text] offsets = torch.tensor(offsets[:-1]).cumsum(dim=0) text = torch.cat(text) return text, offsets, label
c33bd4c78f4715ef08fc18c1eb9f78540301138b
30,343
def is_bigwig(fn): """ checks magic number to see if we're working with a bigWig file """ fh = open(fn, 'rb') magic = fh.read(4) if (magic == '&\xfc\x8f\x88') or (magic == '\x88\x8f\xfc&'): return True
170bb0230a2f166c74bf04a8802177c31c25eb77
30,347
def get_converter_schema(converter: str, *args, **kwargs): """ Get conveter method from converter map https://werkzeug.palletsprojects.com/en/0.15.x/routing/#builtin-converter :param converter: str: converter type :param args: :param kwargs: :return: return schema dict """ if converter == "any": return {"type": "array", "items": {"type": "string", "enum": args}} elif converter == "int": return { "type": "integer", "format": "int32", **{ f"{prop}imum": kwargs[prop] for prop in ["min", "max"] if prop in kwargs }, } elif converter == "float": return {"type": "number", "format": "float"} elif converter == "uuid": return {"type": "string", "format": "uuid"} elif converter == "path": return {"type": "string", "format": "path"} elif converter == "string": return { "type": "string", **{ prop: kwargs[prop] for prop in ["length", "maxLength", "minLength"] if prop in kwargs }, } else: return {"type": "string"}
17106f8d8059ece332137f7b1db997dcde76bf2b
30,348
def kl_loss(mu, covariance, prior_mu=0): """ KL-divergence loss using the shorthand notation in this paper: https://arxiv.org/pdf/1807.06358.pdf """ # inplace v_kl = mu.add(-prior_mu).pow(2).add_(covariance.exp()).mul_(-1).add_(1).add_(covariance) v_kl = v_kl.sum(dim=-1).mul_(-0.5) return v_kl
88d93235ec37df7097ae192ae03111a89ca1b435
30,349
def make_name(image): """Format output filename of the image.""" return image['name'][:-4]
ab51f714052528c7dc16cd68b80c1c9b889adaea
30,351
def to_words(content, words): """将id表示的内容转换为文字""" return ''.join(words[x] for x in content)
657264e24a2fccf49555ecbfe93205ac650ee1a0
30,352
def is_quoted(str): """ whether or not str is quoted """ return ((len(str) > 2) and ((str[0] == "'" and str[-1] == "'") or (str[0] == '"' and str[-1] == '"')))
b079bd4a7f3ac8814250faf522d9e38718fce986
30,353
import hashlib def get_hash_from_file(file_path, byte_output=False): """Taking in a path to a file as an argument, it returns the SHA-256 hash of the file via a string.""" sha256 = hashlib.sha256() with open(file_path, 'rb') as file_to_hash: buffer = file_to_hash.read(100000) while len(buffer) > 0: sha256.update(buffer) buffer = file_to_hash.read(100000) if byte_output: return sha256.digest() else: return sha256.hexdigest()
3ebda4cc0266ea04abfd16349c83608c13e6160f
30,355
def isPub(low): """returns true if column is for video game publishers""" return low.find("publish") != -1
c23fbf5887a43c805c91b6ec83bf7a3ace59d7c6
30,356
from typing import List def _make_maze(size: int) -> List[List[int]]: """Creates a square maze of a given size where each room is connected to the four adjacent ones.""" maze: List[List[int]] = [] for row in range(0, size): thisrow = [] for col in range(0, size): adjacent = [] if row != 0: adjacent.append((size * (row-1)) + col) if row != (size - 1): adjacent.append((size * (row+1)) + col) if col != 0: adjacent.append((row * size) + col - 1) if col != size - 1: adjacent.append((row * size) + col + 1) thisrow.append(adjacent) maze += thisrow return maze
790ba95cda680ed67492d6be2ad737dfc6f1b6c4
30,357
from typing import List from typing import Dict def track_path_to_header( book_id: int, book_url: str, book_title: str, gpx_name: str ) -> List[Dict]: """ Format the header breadcrumbs - refer to templates/layout.html. Args: book_id (int): Book ID based on the 'shelf' database table. book_url (str): Book URL based on the 'shelf' database table. book_title (str): Book title based on the 'shelf' database table. gpx_name (str): Name of the GPX file in the /tracks directory WITHOUT file extension. Returns: List of dicts. """ return [ {"title": "Stories", "url": "/stories"}, { "title": book_title, "url": "/stories/" + str(book_id) + "/" + book_url, }, { "title": gpx_name.replace("_", " ") # no URL because it is the current one }, ]
28c478da73d73fdf089c169e3d8bd570cabcf58d
30,358
import math def is_prime(n: int) -> bool: """ Function to check for a prime number Return TRUE if 'n' is prime number. False otherwise :param n: :return: """ result: bool = True if n <= 1 or (n % 2 == 0 and n > 2) or (n % 3 == 0 and n > 3): result = False if n == 2: result = True for x in range(3, int(math.sqrt(n)) + 1, 2): if n % x == 0: result = False return result
9f777729b2f0e60e47b19dfec2b49e4905c280fb
30,359
def byte_to_str(value) : """ byte 转 str。 :param value: byte 值 :return: str 字符串 """ if isinstance(value, bytes) : value = bytes.decode(value) return value
8dd44045359bdaed0c9fd58c9db33f06b11f37ed
30,360
def decode_one_uni(text, i): """ decode_one implementation for unicode strings """ return ord(text[i]), i+1
7a355f4a937c4999756a846b02dd77d58d5a0ec1
30,361
import os import subprocess def get_keyframes(video_path): """ Uses an FFmpeg command to extract key frames from the specified video file. Saves key frame times to a file. ... Args: video_path: the path to the video file to extract key frames from Returns: key_frame_dir: the directory storing the extracted key frames key_frame_times: a list of the times associated with the key frames """ scene_change_threshold = '0.4' # The metadata of the key frames including their times in seconds will be stored in this text file. metadata_file = "key_frame_metadata.txt" print("Video path before splitting:",video_path) # Store the actual video name without the path and the file extension. video_name = video_path.split("\\")[-1] video_name = video_name.split(".")[0] print("Video name:",video_name) # If the directory for storing the key frames does not exist, create it outside the video_library directory. key_frame_dir = video_name + '_key_frames' if not os.path.exists(key_frame_dir): os.makedirs(key_frame_dir) output_file = key_frame_dir + '/img%03d.jpg' # Run the FFmpeg command to extract video key frames and store their metadata including their times in the text file. command = ['ffmpeg', '-i', video_path, '-filter:v', "select='gt(scene," + str(scene_change_threshold) + ")',metadata=print:file=" + metadata_file + "",'-vsync', '0', output_file] subprocess.run(command) # This array will store the time in seconds of each key frame. key_frame_times = [] # Open the text file and read every line. with open(metadata_file, encoding='utf-8') as f: lines = f.readlines() # Get every second line as only those contain the times of key frames. for i in range(0, len(lines), 2): # Each line contains three elements: frame, pts, and pts_time. # Split around spaces and get pts_time which is the last element. split_line = lines[i].split(' ') pts_time = split_line[len(split_line) - 1] # Remove the string "pts_time:" and strip new line characters from pts_time, # so only the actual time in seconds will remain. pts_time = pts_time[len('pts_time:') :].rstrip('\n') # Add 0.4s to pts_time so the markers are not placed on the progress bar early. pts_time = float(pts_time) + 0.4 pts_time = str(pts_time) print(pts_time) # Make a copy of pts_time original_pts_time = pts_time # Convert pts_time to this format: minutes : seconds. For example: 75 becomes 1:15 # The video player uses this time format. minutes = int(float(pts_time) / 60) seconds = float(original_pts_time) - (minutes * 60) converted_time = str(minutes) + ":" + str(seconds) key_frame_times.append(converted_time) print(key_frame_times) return key_frame_dir, key_frame_times
ec6a427e1334b54d7ec06633169de1cf1435e335
30,362
def calcFallVelocity(rho_s, D_s): """ Calculates fall velocity of sediment. Paramters --------- rho_s : sediment density [kg/m^3] D_s : sediment diameter [m] Returns ------- w - fall velocity of sediment [m/s] Notes ----- Equation used is from Ferguson and Church [2004]. C1 and C2 values correspond to fits for natural grains. """ Sd = (rho_s - 999.97)/999.7 C1 = 18.; C2 = 1.0; w = Sd*9.81*D_s**2 / ( C1*1e-6 + (0.75*C2*Sd*9.81*D_s**3)**0.5 ) return w
74f07f6e3448d36f9adf0feb196605f86d70f1bd
30,363
import torch def get_surface_normalv2(xyz, patch_size=5): """ xyz: xyz coordinates patch: [p1, p2, p3, p4, p5, p6, p7, p8, p9] surface_normal = [(p9-p1) x (p3-p7)] + [(p6-p4) - (p8-p2)] return: normal [h, w, 3, b] """ b, h, w, c = xyz.shape half_patch = patch_size // 2 xyz_pad = torch.zeros((b, h + patch_size - 1, w + patch_size - 1, c), dtype=xyz.dtype, device=xyz.device) xyz_pad[:, half_patch:-half_patch, half_patch:-half_patch, :] = xyz # xyz_left_top = xyz_pad[:, :h, :w, :] # p1 # xyz_right_bottom = xyz_pad[:, -h:, -w:, :]# p9 # xyz_left_bottom = xyz_pad[:, -h:, :w, :] # p7 # xyz_right_top = xyz_pad[:, :h, -w:, :] # p3 # xyz_cross1 = xyz_left_top - xyz_right_bottom # p1p9 # xyz_cross2 = xyz_left_bottom - xyz_right_top # p7p3 xyz_left = xyz_pad[:, half_patch:half_patch + h, :w, :] # p4 xyz_right = xyz_pad[:, half_patch:half_patch + h, -w:, :] # p6 xyz_top = xyz_pad[:, :h, half_patch:half_patch + w, :] # p2 xyz_bottom = xyz_pad[:, -h:, half_patch:half_patch + w, :] # p8 xyz_horizon = xyz_left - xyz_right # p4p6 xyz_vertical = xyz_top - xyz_bottom # p2p8 xyz_left_in = xyz_pad[:, half_patch:half_patch + h, 1:w+1, :] # p4 xyz_right_in = xyz_pad[:, half_patch:half_patch + h, patch_size-1:patch_size-1+w, :] # p6 xyz_top_in = xyz_pad[:, 1:h+1, half_patch:half_patch + w, :] # p2 xyz_bottom_in = xyz_pad[:, patch_size-1:patch_size-1+h, half_patch:half_patch + w, :] # p8 xyz_horizon_in = xyz_left_in - xyz_right_in # p4p6 xyz_vertical_in = xyz_top_in - xyz_bottom_in # p2p8 n_img_1 = torch.cross(xyz_horizon_in, xyz_vertical_in, dim=3) n_img_2 = torch.cross(xyz_horizon, xyz_vertical, dim=3) # re-orient normals consistently orient_mask = torch.sum(n_img_1 * xyz, dim=3) > 0 n_img_1[orient_mask] *= -1 orient_mask = torch.sum(n_img_2 * xyz, dim=3) > 0 n_img_2[orient_mask] *= -1 n_img1_L2 = torch.sqrt(torch.sum(n_img_1 ** 2, dim=3, keepdim=True)) n_img1_norm = n_img_1 / (n_img1_L2 + 1e-8) n_img2_L2 = torch.sqrt(torch.sum(n_img_2 ** 2, dim=3, keepdim=True)) n_img2_norm = n_img_2 / (n_img2_L2 + 1e-8) # average 2 norms n_img_aver = n_img1_norm + n_img2_norm n_img_aver_L2 = torch.sqrt(torch.sum(n_img_aver ** 2, dim=3, keepdim=True)) n_img_aver_norm = n_img_aver / (n_img_aver_L2 + 1e-8) # re-orient normals consistently orient_mask = torch.sum(n_img_aver_norm * xyz, dim=3) > 0 n_img_aver_norm[orient_mask] *= -1 n_img_aver_norm_out = n_img_aver_norm.permute((1, 2, 3, 0)) # [h, w, c, b] # a = torch.sum(n_img1_norm_out*n_img2_norm_out, dim=2).cpu().numpy().squeeze() # plt.imshow(np.abs(a), cmap='rainbow') # plt.show() return n_img_aver_norm_out
14f141579c79ba2c1876ec24d49db2f85a74ccce
30,364
def total_duration_parser(line): """ Parses lines of the following form: Total duration: 5248.89s :param line: string :return: float containing total duration in seconds """ try: return float(line.rstrip('s')[len('Total duration:'):].strip()) except: return 0.
30cc8da46293654b8d4001dbd708160ba208bacb
30,367
import argparse def get_args(): """ Get command line arguments """ parser = argparse.ArgumentParser(description="Arguments for data exploration") parser.add_argument("-m", dest="medinfo", action="store_true", help="Process the medinfo spreadsheet and save the data to json") parser.add_argument("-c", dest="chiqa", action="store_true", help="Process the chiqa Data. Use with options -y or -p") parser.add_argument("-y", dest="copy_files", action="store_true", help="Copy files from the chiqa collection relevant for medinfo") parser.add_argument("-p", dest="parse_files", action="store_true", help="Parse the sections from the relevant chiqa files") return parser
03e75df827d9bcda5be490864b92fe5e112692fd
30,368
from typing import Callable def newton_step(f: Callable[[float], float], f_prime: Callable[[float], float], x_0: float) -> float: """ Performs a single iteration of Newton's Method Parameters ---------- f The function to determine the root of. f_prime The derivative of the function to determine the root of. x_0 The starting point for determining the root from. """ return x_0 - f(x_0)/f_prime(x_0)
b877d0a70ba8557b79f1ae99ef4e1b815d0778ff
30,369
def ratelimit(bucket_name): """Declare a ratelimited REST route. """ # NOTE: ratelimits here are the same as discord's # per-user, per-route, etc. # Needs more thinking. def decorator(handler): async def wrapped(endpoint, request): return await handler(endpoint, request) return decorator
f8730c7b2c4aca25733d55e9a87ac732eb509b39
30,370
from functools import reduce import numpy def combine_dempsters_rule(masses): """ Implements Dempster's rule of combination. The method makes use of the Moebius transformation and Dempster's rule of commonalities for a faster calculation of the combined masses. For more information see Kennes, R., Computational aspects of the Mobius transformation of graphs, Systems, Man and Cybernetics (Volume:22 , Issue: 2 ), http://dx.doi.org/10.1109/21.148425 @param masses: A list of numpy arrays that all have the same length. Each array contains the mass distribution that a source assigns to the available options. @return: The combined masses, the conflict between the sources and the theta (masses that can not be assigned to any option). """ def mtoq(masses): """ Convert mass vector into commonality vector and calculate theta for this mass vector. """ theta = 1.0 - masses.sum() q = masses + theta return q, theta def qtom(q, theta): """ Convert commonality vector into mass vector. """ masses = q - theta return masses #convert each mass-vector into a q-vector and calculate theta for each mass vector masses_as_q, thetas = zip(*map(mtoq, masses)) #combine masses by performing element-wise vector multiplication combined_masses_as_q = reduce(numpy.multiply, masses_as_q) #combine thetas by multiplying the thetas combined_theta = reduce(numpy.multiply, thetas) #convert masses back from q-form to mass-form combined_masses = qtom(combined_masses_as_q, combined_theta) #any remaining mass not assigned to specific target or to theta forms the combined conflict combined_conflict = 1.0 - combined_masses.sum() - combined_theta combined_conflict = max(combined_conflict, 0.0) #rounding errors sometimes lead to conflict -0.0000000001 return combined_masses, combined_conflict, combined_theta
bb5fba0e186a9af1427cac5be8c8c7530353567d
30,371
import argparse def parse_args(args=None, namespace=None): """Constructs an argument parser and parses the arguments. The default behavior is to parse the arguments from `sys.argv`. A dictionary is returned rather than the typical namespace produced by `argparse`.""" formatter = argparse.ArgumentDefaultsHelpFormatter parser = argparse.ArgumentParser(formatter_class=formatter, description=( 'Bundle ELF binary executables with all of their runtime dependencies ' 'so that they can be relocated to other systems with incompatible system ' 'libraries.' )) parser.add_argument('executables', metavar='EXECUTABLE', nargs='+', help=( 'One or more ELF executables to include in the exodus bundle.' )) parser.add_argument('-c', '--chroot', metavar='CHROOT_PATH', default=None, help=( 'A directory that will be treated as the root during linking. Useful for testing and ' 'bundling extracted packages that won\t run without a chroot.' ), ) parser.add_argument('-a', '--add', '--additional-file', metavar='DEPENDENCY', action='append', default=[], help=( 'Specifies an additional file to include in the bundle, useful for adding ' 'programatically loaded libraries and other non-library dependencies. ' 'The argument can be used more than once to include multiple files, and ' 'directories will be included recursively.' ), ) parser.add_argument('-d', '--detect', action='store_true', help=( 'Attempt to autodetect direct dependencies using the system package manager. ' 'Operating system support is limited.' )) parser.add_argument('--no-symlink', metavar='FILE', action='append', default=[], help=( 'Signifies that a file must not be symlinked to the deduplicated data directory. This ' 'is useful if a file looks for other resources based on paths relative its own ' 'location. This is enabled by default for executables.' ), ) parser.add_argument('-o', '--output', metavar='OUTPUT_FILE', default=None, help=( 'The file where the bundle will be written out to. The extension depends on the ' 'output type. The "{{executables}}" and "{{extension}}" template strings can be ' ' used in the provided filename. If omitted, the output will go to stdout when ' 'it is being piped, or to "./exodus-{{executables}}-bundle.{{extension}}" otherwise.' ), ) parser.add_argument('-q', '--quiet', action='store_true', help=( 'Suppress warning messages.' )) parser.add_argument('-r', '--rename', metavar='NEW_NAME', nargs='?', action='append', default=[], help=( 'Renames the binary executable(s) before packaging. The order of rename tags must ' 'match the order of positional executable arguments.' ), ) parser.add_argument('--shell-launchers', action='store_true', help=( 'Force the use of shell launchers instead of attempting to compile statically linked ones.' )) parser.add_argument('-t', '--tarball', action='store_true', help=( 'Creates a tarball for manual extraction instead of an installation script. ' 'Note that this will change the output extension from ".sh" to ".tgz".' )) parser.add_argument('-v', '--verbose', action='store_true', help=( 'Output additional informational messages.' )) return vars(parser.parse_args(args, namespace))
6c421ac37c74ed0384cc3bc84b3b9db3f0a1027c
30,373
from sys import prefix import os def make_filename(title, ext, dir_prefix=None): """Spaces/colons/slahses to underscores; adds extension given. Want to avoid colons in filenames for Windows, fix the URL via the YAML header with a permalink entry. Likewise want to avoid slashes in filenames as causes problems with automatic links when there are child-folders. Again we get the desired URL via the YAML header permalink entry. """ if not dir_prefix: dir_prefix = prefix return os.path.join( dir_prefix, title.replace(" ", "_").replace(":", "_").replace("/", "_") + os.path.extsep + ext)
56d8bbea84199c0349fc91b2d70d9e88f3a3832d
30,374
def _prefix_confound_filter(prefix, all_compcor_name): """Get confound columns by prefix and acompcor mask.""" compcor_cols_filt = [] for nn in range(len(all_compcor_name)): nn_str = str(nn).zfill(2) compcor_col = f"{prefix}_comp_cor_{nn_str}" compcor_cols_filt.append(compcor_col) return compcor_cols_filt
feae338e2f0e70dd3c0311acb45e633772d9e100
30,375
def get_name(tc_code): """ Get the names of testcases :param tc_code: a list containing every test case's .xml text as separate elements (the output of parse_test_cases) :return: a list containing all test case names """ header_block = tc_code.find_all("header")[0] name = header_block.find("tcnameunique").text return name
5da709fe251eafb50f03db813bc4dbfff88c2c91
30,376
import random def random_word(histogram): """Returns a random word from a histogram, which represents word frequency for a source text. Uses UNIFORM DISTRIBUTION, not stochastic. Param: histogram(dict) Return: word(str) """ word = "" words = list(histogram.keys()) random_index = random.randint(0, len(words) - 1) word = words[random_index] return word
ee77b123a6e98299ecadf0cbb8a6f12258c83b56
30,377
def utmify_email_url(url: str, campaign: str) -> str: """ Returns a versioned absolute asset URL (located within PostHog's static files). Example: {% utmify_email_url 'http://app.posthog.com' 'weekly_report' %} => "http://app.posthog.com?utm_source=posthog&utm_medium=email&utm_campaign=weekly_report" """ return f"{url}{'&' if '?' in url else '?'}utm_source=posthog&utm_medium=email&utm_campaign={campaign}"
9ae543d38a68de5481d487b37acafe148e944c2f
30,378
def timestamp_representer(dumper, timestamp): """Custom represented for using Timestamp type in data_regression testing""" return dumper.represent_data(timestamp.strftime("%Y-%m-%d %H:%M:%S.%f"))
1aa891bc2e0b25fc5a7cf8b2f529a7195130080b
30,379
def _unpoly_target(self) -> str: """ Comma-separated string of Target selector names of the element(s) Unpoly is swapping #content_panel,#breadcrumb_bar,.item_list """ return self.META.get('HTTP_X_UP_TARGET', '')
3dfe451ee5c67426ff481b69643dc8da8e35e192
30,380
def lucas(n): """Function that provides the nth term of lucas series.""" x, y = 2, 1 for i in range(n - 1): x, y = y, x + y return x
6d40e67ce8dd682bb26f9bd16d2dd34d3d1bf541
30,381
import sys import shutil import os def _get_drive_usage(path): """ Use Python libraries to get drive space/usage statistics. Prior to v3.3, use `os.statvfs`; on v3.3+, use the more accurate `shutil.disk_usage`. """ if sys.version_info >= (3, 3): usage = shutil.disk_usage(path) return { "total": usage.total, "used": usage.used, "free": usage.free, } else: # with os.statvfs, we need to multiple block sizes by block counts to get bytes stats = os.statvfs(path) total = stats.f_frsize * stats.f_blocks free = stats.f_frsize * stats.f_bavail return { "total": total, "free": free, "used": total - free, }
abb244eeba536fb8a76e5de02b7aed70de77f060
30,382
def gas_constant(R, mu): """ Gas constant of natural gas, J/(kg*K) :param R: (float) Universal gas constant, J/(kmole*K) :param mu: (float) Molar mass of natural gas, kg/kmole :return: (float) Gas constant of natural gas, J/(kg*K) """ return R / mu
05587e637635ea6262541347966ec18e05bf6fba
30,383
def is_even(num): """Test whether a given number is even or not. Tests whether a given number is even or not using the modulo operator. Args: num (int): The number to test whether it is even Returns: bool: True if num is even, otherwise False """ return True if num % 2 == 0 else False
5dc2ef2c629aa642f92bd04c5e0094d10eaa7d79
30,384
def map_link_vector_components_to_node_raster(grid, data_at_link): """Map (x,y) vector components of data_at_link onto nodes. Examples -------- >>> from landlab import RasterModelGrid >>> rmg = RasterModelGrid((3, 4)) >>> link_data = np.arange(rmg.number_of_links) >>> x, y = map_link_vector_components_to_node_raster(rmg, link_data) >>> x[5:7] array([ 7.5, 8.5]) >>> y[5:7] array([ 7.5, 8.5]) """ x = grid.map_mean_of_horizontal_links_to_node(data_at_link) y = grid.map_mean_of_vertical_links_to_node(data_at_link) return x, y
c836159450d15d7c05a137fcdca856e1dcd35b41
30,386
def engine_options_from_config(config): """Return engine options derived from config object.""" options = {} def _setdefault(optionkey, configkey): """Set options key if config key is not None.""" if config.get(configkey) is not None: options[optionkey] = config[configkey] _setdefault('echo', 'SQLALCHEMY_ECHO') _setdefault('pool_size', 'SQLALCHEMY_POOL_SIZE') _setdefault('pool_timeout', 'SQLALCHEMY_POOL_TIMEOUT') _setdefault('pool_recycle', 'SQLALCHEMY_POOL_RECYCLE') _setdefault('max_overflow', 'SQLALCHEMY_MAX_OVERFLOW') return options
9ece915beba58b080ad330f75197c466246fdfe2
30,387
def get_path_dicts(key): """gets the right training data and model directory given the demo""" td_dict = { 'company': 'data/company/company_full.json', } td_lookup_dict = { 'company': 'data/company/company_full_lookup.json', } td_ngrams_dict = { 'company': 'data/company/company_full_ngrams.json', } td_both_dict = { 'company': 'data/company/company_full_both.json', } model_dir_dict = { 'company': 'data/models', } td = td_dict[key] td_lookup = td_lookup_dict[key] td_ngrams = td_ngrams_dict[key] td_both = td_both_dict[key] model_dir = model_dir_dict[key] return td, td_lookup, td_ngrams, td_both, model_dir
1b00e3e7649c31f9d6b19bf7de93be0666ff79bb
30,388
def shift_axis(a11, a12, a22, a1, a2, a0): """ a11*x^2 + a22*y^2 + 2a1 * x + 2a2 * y + a0 = 0 (x, y)' = (x2, y2)' + B b11*x2^2 + b22*y2^2 + 2b1 * x2 + 2b2 * y2 + b0 = 0 return [B, [b11, 0, b22, b1, b2, b0]] """ if a11 != 0: if a22 != 0: return [[a1 / a11, a2 / a22], [a11, 0, a22, 0, 0, a0 - a1 ** 2 / a11 - a2 ** 2 / a22]] if a2 == 0: return [[a1 / a11, 0], [a11, 0, 0, 0, 0, a0 - a1 ** 2 / a11]] return [[a1 / a11, (a0 - a1 ** 2 / a11) / (2 * a2)], [a11, 0, 0, 0, a2, 0]] if a22 != 0: if a1 == 0: return [[0, a2 / a22], [0, 0, a22, 0, 0, a0 - a2 ** 2 / a22]] return [[(a0 - a2 ** 2 / a22) / (2 * a1), a2 / a22], [0, 0, a22, a1, 0, 0]] return [[0, 0], [0, 0, 0, a1, a2, a0]]
858c4547553f23fddcc23268d08790b98ebbdc24
30,391
def df_variables_info(df): """ This function gives information about the df: number of observations, variables, type and number of variables params: df is a dataframe from which we want the information return: NONE """ # Number of variables print('Number of variables:', df.shape[1]) # Number of observations print('Number of observations:', df.shape[0]) # Summary of the type of variables in the dataframe print('Type of variables in the dataframe: \n', df.dtypes.value_counts(),'\n') return None
b766299931398b4973f4bed2a8280b55397203aa
30,392
def movingstats(series, interval): """Calculating stats of a moving range""" movingaverage = [] movingstd = [] for i in range(1,len(series)-interval): movingaverage.append(series[i:i+interval].mean()) movingstd.append(series[i:i+interval].std()) return (movingaverage, movingstd)
746000a011934b6052c57ca4ba63daa148ce8ffe
30,393
import os import stat def write_file(path, blob): """Writes a blob into a file.""" # Files originally installed from CIPD packages are read-only, need to make # them writable before overwriting. try: st = os.stat(path) os.chmod(path, st.st_mode | stat.S_IWUSR) except OSError: pass # doesn't exist yet probably with open(path, 'wb') as f: return f.write(blob)
92e7ec04b6bdbf2e1ccae1c7c6b4ac3f46195df7
30,394
def _delete_duplicates(l, keep_last): """Delete duplicates from a sequence, keeping the first or last.""" seen=set() result=[] if keep_last: # reverse in & out, then keep first l.reverse() for i in l: try: if i not in seen: result.append(i) seen.add(i) except TypeError: # probably unhashable. Just keep it. result.append(i) if keep_last: result.reverse() return result
fa3d8bc3ef32899bcfedcff1376e3ecb75b5600d
30,395
def calculate_final_position_of_sliding_window(num_seeds, maximum_rank=500): """ Calculate the final position where the sliding position will start its calculations. maximum_rank = The maximum number of positions that we want to include in the calculations """ # Calculate the final position of the sliding window: # We know that the right part of the last interval will be: # i + num_seeds / 2 = last_rank # So the final position will be: # i = last_rank - num_seeds / 2 last_rank = maximum_rank final_position = last_rank - int( float(num_seeds) / float(2) ) #print('Final position: {}'.format(final_position)) return final_position
6b394429536ce6f08b59ee475015b440d573241b
30,396
def disable(pihole, time=None) -> dict: """ Permit disable protection """ _disable_url = pihole.url + "api.php" _params = {"disable": 0, "token": pihole.token} if time: _params.update({"disable": time}) return pihole.session.get(_disable_url, params=_params).json() return pihole.session.get(_disable_url, params=_params).json()
a605d2350d308633a20a9e4160c93cd4859fc588
30,397
def replace(val, lst): """ Replace patterns within `val`.""" for pat, repl in lst: val = val.replace(pat, repl) return val
7025918aab2d5a3cc6a588d1d564b6a1de3aadf1
30,398
def rindex(list, value): """Find the last-occurring index of `value` in `list`.""" for i in range(len(list) - 1, 0, -1): if list[i] == value: return i return -1
f4b91f3d8ae531f610806c68054b1075c73b0dcf
30,399