content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import random def add_residential_storage(glmCaseDict, config_file, use_flags, last_key=0): """ This fucntion appends residential battery storage to a feeder Inputs glmCaseDict - dictionary containing the full feeder config_file - dictionary that contains the configurations of the feeder use_flags - dictionary that contains the use flags last_key - Last object key Outputs glmCaseDict - dictionary containing the full feeder """ if use_flags['use_residential_storage'] != 0 and use_flags['use_homes'] != 0: # Check if last_key is already in glm dictionary def unused_key(key): if key in glmCaseDict: while key in glmCaseDict: key += 1 return key # let's determine the next available key last_key = unused_key(last_key) # determine the total number of homes in the feeder control_dict = [] for x in glmCaseDict: if 'object' in glmCaseDict[x] and glmCaseDict[x]['object'] == 'house' and glmCaseDict[x]['groupid'] == 'Residential': control_dict.append([glmCaseDict[x]['name'],glmCaseDict[x]['parent']]) # determine how many EVs to implement total_num_EBs = round(float(config_file["perc_EB"])*len(control_dict)) # adjust the house list with the appropiate number to be implemented at random control_dict = random.sample(control_dict, int(total_num_EBs)) for controlObject in control_dict: # random variables for each EB batterySOC = 0.7 + 0.2 * random.random() # adding the external controller glmCaseDict[last_key] = {'object': 'inverter', 'parent': '{:s}'.format(controlObject[1]), 'name': '{:s}_eb_inveter'.format(controlObject[0]), 'inverter_type': 'FOUR_QUADRANT', # Must be in FOUR_QUADRANT to use the load following control scheme. 'generator_status': 'ONLINE', # set the status of the inverter to online 'charge_lockout_time': '30', # lockout time for charging 'discharge_lockout_time': '30', # lockout time for dischargeing 'four_quadrant_control_mode': 'LOAD_FOLLOWING', # The only mode that works with the battery object. 'sense_object': '{:s}'.format(controlObject[1]), # the sense_object must be a meter, triplex_meter, or transformer. 'rated_power': '3000.0', # The per phase power output rating of the inverter in VA. 'inverter_efficiency': '0.95', 'charge_on_threshold': '1.3 kW', # when the load at the sense_object drops below this value the inverter starts to charge the battery. 'charge_off_threshold': '2.7 kW', # when the battery is charging and the load at the sense_object rises above this value the inverter stops charging the battery. 'discharge_off_threshold': '3.0 kW', # when the battery is discharging and the load at the sense_object drops below this value the inverter stops discharging the battery. 'discharge_on_threshold': '4.5 kW', # when the load at the sense_object rises above this value the inverter starts to discharge the battery. 'max_discharge_rate': '1 kW', # The maximum power output to demand from the battery when discharging. 'max_charge_rate': '1 kW'} # The maximum power input to the battery when charging. last_key = unused_key(last_key) glmCaseDict[last_key] = {'object': 'battery', 'groupid': 'residential_storage', 'parent': '{:s}_eb_inveter'.format(controlObject[0]), 'name': '{:s}_eb_battery'.format(controlObject[0]), 'use_internal_battery_model': 'true', 'battery_type': 'LI_ION', 'state_of_charge': '{:.2f}'.format(batterySOC), 'generator_mode': 'SUPPLY_DRIVEN', 'rfb_size': 'HOUSEHOLD'} last_key = unused_key(last_key) else: if use_flags['use_residential_storage'] != 0: print("You asked for residential battery storage, but you did not implement residential houses so this setting was ignored") return glmCaseDict
a5d2f15975e225533327bad3d00fe61f76b33cd2
16,770
def matrix_wrapper(input_tuple): """ Parallel wrapper for matrix formation. This wrapper is used whenever a pmap/map-type function is used to make matrices for each cell in parallel. Parameters ---------- input_tuple : Tuple Index 0 is the chain (depletion_chain.DepletionChain), index 1 is the reaction rate array (reaction_rates.ReactionRates), index 2 is the cell_id. Returns ------- scipy.sparse.csr_matrix The matrix for this reaction rate. """ return input_tuple[0].form_matrix(input_tuple[1], input_tuple[2])
4a594e6fda9b4916f644a422d1b51969b86fb44e
16,771
def recode(operand, mode): """The function transform the boolean text element to relevant type for work with these types or for print these types. Arguments: operand(string): the operand (text) for transform to the request form mode(int): flag for the diffs type of recoding of the operand Return: int/bool: recode type of the operand """ if mode == 0: if operand.lower() in ('false'): return 0 elif operand.lower() in ('true'): return 1 elif mode == 1: if operand.lower() in ('false'): return False elif operand.lower() in ('true'): return True
1c07921c8a54a3b070a375b6f3d6afb15b9fb1d2
16,772
def short_msg(msg, chars=75): """ Truncates the message to {chars} characters and adds three dots at the end """ return (str(msg)[:chars] + '..') if len(str(msg)) > chars else str(msg)
f807c4e2a032bb05ba5736e955af7c03653bcf80
16,773
def _is_misindexed(T): """ :param T: tree :return: True if indices of T are not in the write order and not in [0, len(T) ), False otherwise """ for x in range(1, len(T) + 1): i, _, _ = T[x -1] if not i == x: return True return False
49ee5696c0558a1fb7f94c24710543aee03d20c5
16,775
import torch def accumarray(I, V, size=None, default_value=0): """ Returns a Tensor by accumulating elements of tensor V using the subscripts I The output tensor number of dimensions is/should be equal to the number of subscripts rows plus the values tensor number of dimensions minus one. Parameters ---------- I : LongTensor the (N,) subscripts tensor V : Tensor the (M,F,) values tensor size : tuple (optional) the size of the output tensor. If None it will be automatically inferred (default is None) default_value : float (optional) the default value of the output tensor (default is 0) Returns ------- Tensor the accumulated tensor """ if size is None: size = list(V.size()) size[0] = torch.max(I).item()+1 return default_value + torch.zeros(size, dtype=V.dtype, device=V.device).scatter_add_(0, I.view(-1, 1).expand_as(V), V)
5e5acb0490f305a4498825260d5852a2fd15ea90
16,776
def parse(html, css): """ inp: SMD string to parse returns: tuple of parsed html string, parsed css string """ return html, css
826a3966613869533b49e5e64bdd011eb74742c9
16,777
def _AsInt(x): """Converts text to int. Args: x: input value. Returns: Integer value of x, or None. """ try: i = int(x) return i except (ValueError, TypeError): return None
393b88a0cc317b34fee1c0748e508ddac9ce5534
16,778
def coerce_entity_dict(v): """Coerce entity ID strings to a dictionary with key "entity".""" if isinstance(v, str): return {"entity": v} return v
6f8265d08bde871b9379fb693cfb385bff0783ce
16,779
import sys def frame(back=2): """Returns a frame""" return sys._getframe(back)
e28b32058292b515afba636a229a3f4b00d29e58
16,780
def make_rect(width, height): """ Makes a rectangle on x-y plane to be drawn with GL_TRIANGLES_FAN """ return [[0, 0, 0], [0, height, 0], [width, height, 0], [width, 0, 0]]
b09c9d7ffa20b2aea4bc58f05eb6a44970567831
16,783
def parse_content(content): """ Removes the starting text and ¶. It removes the starting text from the content because it contains the title of that content, which is redundant here. """ content = content.replace('¶', '').strip() # removing the starting text of each content = content.split('\n') if len(content) > 1: # there were \n content = content[1:] # converting newlines to ". " content = '. '.join([text.strip().rstrip('.') for text in content]) return content
da1237e5becaceccb872b870be70e19ab4b06c7d
16,784
def _digits_of_n(n, b): """ Return the list of the digits in the base 'b' representation of n, from LSB to MSB This helper function is used by modexp_lr_k_ary and was implemented by Eli Bendersky. http://eli.thegreenplace.net/2009/03/28/efficient-modular-exponentiation-algorithms/ :param n: integer :param b: base :return: number of digits in the base b """ digits = [] while n: digits.append(int(n % b)) n /= b return digits
1f1fc93a11cf62f9f8c0e497b344941726df7f23
16,788
from typing import List def cut_on_stop(text: str, stop: List[str]) -> str: """Cuts a text to the first stop sequences. :param text: Text to cut. :type text: str :param stop: List of stop sequences. :type stop: List[str] :return: Cut text. :rtype: str """ items = [text] for _stop in stop: _items = [] for item in items: _items.extend(item.split(_stop)) items = _items return items[0]
b25d4c4172b171ea126dfaa77203691c935001ac
16,789
def datetime_to_str(time): """convert python datetime object to a {hour}:{min}:{second}:{millisecond} string format """ return '{hour}:{min}:{second}:{millisecond}'.format( hour=time.hour, min=time.minute, second=time.second, millisecond=str(int(round(time.microsecond / 1000.0))), )
8d7c5a7b08c32718cb5e284b30ee9cd57b3c2e2e
16,790
def extract_header_spki_hash(cert): """ Extract the sha256 hash of the public key in the header, for cross-checking. """ line = [ll for ll in cert.splitlines() if ll.startswith('# SHA256 Fingerprint: ')][0] return line.replace('# SHA256 Fingerprint: ', '').replace(':', '').lower()
7cd9f38855be4877e14761ab87994df79d251e4e
16,792
def backend_listener_url(testconfig): """ Returns the url of the backend listener """ return f'{testconfig["threescale"]["backend_internal_api"]["route"]["spec"]["port"]["targetPort"]}' \ f'://{testconfig["threescale"]["backend_internal_api"]["route"]["spec"]["host"]}'
98da2a6ea0d421d3a904858573d5d3d4656db9f9
16,793
import imp import sys def pasta_is_frozen(): """Will return True if PASTA is frozen. """ return ( hasattr(sys, "frozen") # new py2exe or hasattr(sys, "importers") # old py2exe or imp.is_frozen("__main__") # tools/freeze )
defa8abd76fab9cff00da9b9b8d26ce8964c69eb
16,794
import random def birthday(n): """Returns true if a random list of ints from 1 to 365 has a duplicate""" birthdays = [] for i in range(n): birthdays.append(random.randint(1, 365)) birthdays.sort() for i in range(n-1): if birthdays[i] == birthdays[i+1]: return True return False
aeb882fc0c329196393b03e14345ec0af49ed8f0
16,795
import json import ast def flatten_json_1_level(event, field_name, field_name_underscore, dump_to_string): """ Flattens a JSON field 1 level. This function is used in flatten JSON :param event: A dictionary :param field_name: The field name to flatten :param field_name_underscore: The field name with an underscore appended :param dump_to_string: If true any remaining dictionaries will be converted to a string with json.dumps :return: An event with the field flattened Examples: .. code-block:: python # Example #1 event = {'my_field': "{\"a\": None, \"b\"}"} event = flatten_json_1_level(event=input_event, field_name='my_field', field_name_underscore='my_field_', dump_to_string=True) output_event = {'my_field_a': None, 'my_field_b': 2} """ # Load strings to JSON when possible, otherwise return the event if type(event[field_name]) is not dict: try: event[field_name] = json.loads(event[field_name]) except: try: event[field_name] = ast.literal_eval(event[field_name]) except: return event # iterate through the dictionary and flatten a single level try: for k, v in event[field_name].items(): if type(v) is dict and dump_to_string: event[field_name_underscore + k.lower()] = json.dumps(v) else: event[field_name_underscore + k.lower()] = v del event[field_name] except: return event return event
348d8b9b8fbd34577b0e1aa2537fd6887f48bd47
16,796
import re def scrub_team(team): """Return a valid team name based on the user input. If there is no valid team name, return nothing.""" string = team.lower() string = string.strip() # if it has more than one ship symbol we are not touching this belunkus #i dont think this is necessary for saso but symbolcount = 0 for x in ['<3<', '<3[^<]', '<>', 'c3<', 'o8<'] : if re.search(x, string): symbolcount = symbolcount + 1 if symbolcount > 1: return string if string == '': return 0 elif re.search('/', string): namelist = string.split('/') shipsymbol = '/' elif re.search('<3<', string): namelist = string.split('<3<') shipsymbol = '<3<' elif re.search('<3', string): namelist = string.split('<3') shipsymbol = '<3' elif re.search('<>', string): namelist = string.split('<>') shipsymbol = '<>' elif re.search('c3<', string): namelist = string.split('c3<') shipsymbol = 'c3<' elif re.search('o8<', string): namelist = string.split('o8<') shipsymbol = 'c3<' elif re.search('sports', string): return 'sports' elif re.search('grandstand', string): # grandstand won't show up, because there would be a ship symbol # unless you ship just... grandstands # no ship, just grandstands # in which case THE CODE CAN'T EVEN HANDLE YOU RIGHT NOW return 'grandstand' else: # then you have some kinda theme team or something whatever return string newlist = [] newstring = '' for name in namelist: name = name.strip() newlist.append(name) newlist.sort() for x in range(0,(len(newlist) -1)): newstring = newstring + newlist[x] + shipsymbol newstring = newstring + newlist[-1] return newstring
4525ad7667f1482e9e3c117e4de4edc56ad64f63
16,797
def _create_imports(): """ Create the import required by the module :return: list containing strings for module imports """ detect_imports = ['import unittest'] detect_imports.append('from ThenWhatTree import ThenWhatTreeNode') return detect_imports
bc14545df36abd11b50f0c8e3f53bb92bbf04ce4
16,798
def size_GB(xr_object): """ How many GB (or GiB) is your xarray object? // Requires an xarray object // Returns: * equivalent GB (GBytes) - 10^9 conversion * equivalent GiB (GiBytes) - 2^ 30 conversion < Thomas Moore - thomas.moore@csiro.au - 10102018 > """ bytes = xr_object.nbytes Ten2the9 = 10**9 Two2the30 = 2**30 GBytes = bytes / Ten2the9 GiBytes = bytes / Two2the30 #print out results print(xr_object.name, "is", GBytes, "GB", 'which is', GiBytes,"GiB") return GBytes,GiBytes
98c07935e02ecd47a4e960f942fca30eb049b9ce
16,799
def clean_intent_labels(labels): """Get rid of `None` intents. sklearn metrics do not support them.""" return [l if l is not None else "" for l in labels]
d1681ac88f3454c33887511aa100bc50a48c8ca2
16,800
import shlex def shlex_quote(string: str) -> str: """Simple wrapper for shlex.quote""" return shlex.quote(string)
d067d5aaaa351a4345d2fb0f63503f0b6ec46860
16,801
def should_keep_road(road, road_shp, record_buffers_index): """Returns true if road should be considered for segmentation :param road: Dictionary representation of the road (with properties) :param roads_shp: Shapely representation of the road :param record_buffers_index: RTree index of the record_buffers """ # If the road has no nearby records, then we can discard it early on. # This provides a major optimization since the majority of roads don't have recorded accidents. if not len(list(record_buffers_index.intersection(road_shp.bounds))): return False if ('highway' in road['properties'] and road['properties']['highway'] is not None and road['properties']['highway'] != 'path' and road['properties']['highway'] != 'footway'): return True # We're only interested in non-bridge, non-tunnel highways # 'class' is optional, so only consider it when it's available. if ('class' not in road['properties'] or road['properties']['class'] == 'highway' and road['properties']['bridge'] == 0 and road['properties']['tunnel'] == 0): return True return False
3deffa4c4f52759fbe38afa6597faf75a8a7284e
16,802
def _is_call(call, func): """ Return whether the first argument is a function call of the second. """ return call.startswith(func + "(") and call.endswith(")")
9c60b3f5ba29e41c1ea91e2d35a08c49a76444ea
16,803
def entity_dict(data): """ YNAB structures things as array rather than dicts. Convert to a dict to make looking things up by entityId easier """ r = {} for d in data: r[d['entityId']] = d return r
f3ad3e6a27c0368f7b0142648c259047dc159968
16,804
def discount_factor(base_price): """Returns the dicount percentage based on price""" if base_price > 1000: return 0.95 else: return 0.98
2ae66ec71003bf7773d245b8f36979cd80a728f6
16,805
import token import requests def verify_card_details(card_bin: str) -> dict: """ Returns a python dict with the info from querying the results. :param card_bin: str :return: dict """ url = f"https://api.flutterwave.com/v3/card-bins/{card_bin}" payload = {} headers = { 'Authorization': f'Bearer {token}' } response = requests.request("GET", url, headers=headers, data=payload) return dict(response.json())
5e16846ef162c8fac5dea53f8c28c226f5ca2a9c
16,806
def EnsureEnabledFalseIsShown(cv_config): """Ensures that "enabled" is shown when printing ContinuousValidationConfig. Explicitly sets ContinuousValidationConfig.enforcementPolicyConfig.enabled to False when it's unset, so the field is printed as "enabled: false", instead of omitting the "enabled" key when CV is not enabled. Args: cv_config: A ContinuousValidationConfig. Returns: The modified cv_config. """ if (not cv_config.enforcementPolicyConfig or not cv_config.enforcementPolicyConfig.enabled): cv_config.enforcementPolicyConfig.enabled = False return cv_config
dc6d535b074621e06cb8d36a9a6a03cb81765a16
16,807
def transposition_num(num): """ transposition axis(y) number. 0 => 8, 1 => 7, ..., 8 => 0 """ return (4 - num) + 4
fa7118f655026d4773cea1d5f387789019a72f75
16,809
import numpy def treat_prolines_in_eigenvectors(master_seq, old_eigenvectors): """ Whenever a proline is encountered in the sequence, only the psi value of the eigenvector is used, as in our model prolines do not have phi movement. We have to take into account that angles is (r*2)-1 (first residue only has psi angle). """ old_eigenvectors = old_eigenvectors.T new_eigenvectors = [] for i, res in enumerate(master_seq): if i == 0: #only psi if in first position new_eigenvectors.append(old_eigenvectors[0]) else: offset = i*2 if res == "PRO": #only psi new_eigenvectors.append(old_eigenvectors[offset]) else: # phi new_eigenvectors.append(old_eigenvectors[offset - 1]) # psi new_eigenvectors.append(old_eigenvectors[offset]) return numpy.array(new_eigenvectors).T
6cf014620c37b96b04832e73a4228b5c029ed45d
16,810
from typing import Mapping def env_validator(passed_kwargs, merged_kwargs): """ a validator to check that env is a dictionary and that all environment variable keys and values are strings. Otherwise, we would exit with a confusing exit code 255. """ invalid = [] env = passed_kwargs.get("env", None) if env is None: return invalid if not isinstance(env, Mapping): invalid.append(("env", "env must be dict-like. Got {!r}".format(env))) return invalid for k, v in passed_kwargs["env"].items(): if not isinstance(k, str): invalid.append(("env", "env key {!r} must be a str".format(k))) if not isinstance(v, str): invalid.append(("env", "value {!r} of env key {!r} must be a str".format(v, k))) return invalid
555c01d4c91a9750f102ca4d3ea17551a4516100
16,814
import itertools def xor(one: bytes, two: bytes) -> bytes: """XOR, re-cycling two if len(one) > len(two).""" assert len(one) >= len(two) return bytes([ a ^ b for a, b in zip(one, itertools.cycle(two)) ])
c308fe0fea62def18fcce7a00b082cd2a959bf38
16,815
def parse_endometrium_mutations(csv_path): """Parses mutation rates from the endometrium raw data.""" data = {} candidates = [] with csv_path.open() as csv_file: header = next(csv_file) for line in csv_file: fields = line.split(";") tumor_id = fields[0] candidate = fields[1] print(tumor_id, fields[32], fields[33], fields[34]) m2 = float(fields[32]) m1 = float(fields[33]) wt = float(fields[34]) if not (tumor_id in data): data[tumor_id] = {} data[tumor_id][candidate] = { "wt": wt, "m1": m1, "m2": m2 } if not (candidate in candidates): candidates.append(candidate) m1 = {} m2 = {} for key in data.keys(): for candidate in candidates: if not (candidate + "_m1" in m1): m1[candidate + "_m1"] = 0.0 m2[candidate + "_m2"] = 0.0 if candidate in data[key]: m1[candidate + "_m1"] += data[key][candidate]["m1"] m2[candidate + "_m2"] += data[key][candidate]["m2"] for candidate in candidates: m1[candidate + "_m1"] /= len(data.keys()) m2[candidate + "_m2"] /= len(data.keys()) return {**m1, **m2}
ac2e5ab1be1447bb5f4c7bcd4562d1d559675229
16,816
def find_node_name(model, name): """ Finds a node by its name. :param model: onnx graph :param name: node name :return: node pointer """ if not hasattr(model, "graph"): raise TypeError( # pragma: no cover "Parameter model is not an ONNX model but " "{}".format(type(model))) for node in model.graph.node: if node.name == name: return node return None
9dc3a308f5134236b12bf79bc76b0d09fc41458d
16,817
def get_fx_filenames(config, fx_var): """Extract fx file names.""" areacello_fxdataset = {} for _, value in config['input_data'].items(): if value['short_name'] == fx_var: print(value['filename']) areacello_fxdataset[value['dataset']] = value['filename'] return areacello_fxdataset
45de1c708d3a62e30ae100a334d4edd72bb32a11
16,819
import difflib def text_compare(text1, text2, output_file): """ Compares two strings and if they match returns True else writes the difference to the output_file. """ if text1 == text2: return True diff = list(difflib.Differ().compare(text1.split(), text2.split())) te = open(output_file, 'w') for line in diff: te.write(line+"\n") te.close() return False
8cad72b9fcf7f213cdd9c337d86c89c664fdc606
16,821
import jinja2 import yaml def parse(filename): """Parse a configuration file. Parameters ---------- filename : str The config file to parse. Should be YAML formatted. Returns ------- config: dict The raw config file as a dictionary. """ with open(filename, "r") as fp: config_str = jinja2.Template(fp.read()).render() config = yaml.load(config_str, Loader=yaml.Loader) return config
055bcca59e2c9d3adad2ca9bdc527fc849ef2290
16,822
def _clean_input(id): """Cleans the char id""" if type(id) is int: id = abs(id) return id
586d6b09b98e69b13ec4ae1c3b23ec096afc5e11
16,825
def tag_sibling_ordinal(tag): """ Given a beautiful soup tag, count the same tags in its siblings to get a sibling "local" ordinal value. This is useful in counting child figures within a fig-group, for example """ return len(tag.find_previous_siblings(tag.name)) + 1
ca3f764c7046ac65e99f6a074145b6afc11d2b2d
16,826
def findStr(string, subStr, findCnt): """ 查找子串第n次出现的位置 """ listStr = string.split(subStr, findCnt) if len(listStr) <= findCnt: return -1 return len(string) - len(listStr[-1]) - len(subStr)
3869ab02def11edfafa2df8bf06a4d2c48433882
16,827
import os import subprocess import random def get_dump_path(main_dump_path, exp_name): """ Create a directory to store the experiment. """ assert len(exp_name) > 0 # create the sweep path if it does not exist if not os.path.isdir(main_dump_path): subprocess.Popen("mkdir %s" % main_dump_path, shell=True).wait() sweep_path = os.path.join(main_dump_path, exp_name) if not os.path.exists(sweep_path): subprocess.Popen("mkdir %s" % sweep_path, shell=True).wait() # randomly generate a experiment ID chars = 'abcdefghijklmnopqrstuvwxyz0123456789' while True: folder_name = ''.join(random.choice(chars) for _ in range(10)) dump_path = os.path.join(sweep_path, folder_name) if not os.path.isdir(dump_path): break # create the dump folder if not os.path.isdir(dump_path): subprocess.Popen("mkdir %s" % dump_path, shell=True).wait() return dump_path
48f6c30b111df6a7bd28c84e30ec3929474d6f54
16,828
import copy def n_max_elements(list1, N): """ Function to compute the N highest numbers of a list """ n_list1 = copy.deepcopy(list1) final_list = [] for i in range(0, N): max1 = 0 for j in range(len(n_list1)): if n_list1[j] > max1: max1 = n_list1[j] n_list1.remove(max1) final_list.append(max1) return final_list
8361994794efe5a3f34723f667c3b83fe75a388e
16,829
def _find_file(searched_file_name, rfiles): """Search for a filename in an array for {fname:fcontent} dicts""" for rfile in rfiles: if rfile.has_key(searched_file_name): return rfile return None
a708960ef0222f4fd964b0078297c330a9ffc32a
16,830
from typing import Any def make_safe(value: Any) -> str: """ Transform an arbitrary value into a string Parameters ---------- value: Any Value to make safe Returns ------- str Safe value """ if isinstance(value, bool): return str(value).lower() return str(value)
4b342105d26458ddffd20712c777c5bc8e221c81
16,831
def smooth_vectors(vectors, strength, iterations): """ Smooths the vector iteratively, with the given number of iterations and strength per iteration Parameters ---------- vectors: list, :class: 'compas.geometry.Vector' strength: float iterations: int Returns ---------- list, :class: 'compas.geometry.Vector3d' The smoothened vectors """ for _ in range(iterations): for i, n in enumerate(vectors): if 0 < i < len(vectors) - 1: neighbors_average = (vectors[i - 1] + vectors[i + 1]) * 0.5 else: neighbors_average = n vectors[i] = n * (1 - strength) + neighbors_average * strength return vectors
2a8dd922e5f10d67bcc1285eddbb6e53f43177c9
16,832
import time def secs2iso(t, fmt='%Y-%m-%dT%H:%M:%SZ'): """Converts time as seconds-from-epoch to ISO (RFC3339_) format .. _RFC3339: http://www.ietf.org/rfc/rfc3339.txt """ return time.strftime(fmt, time.localtime(t))
7c6f1f838d297f0b89e66d65c0cf4a0d854e5412
16,833
def sigma_func(d,freq_i): """ This function compute the sigma_fwhm base on the Limits of Resolution: The Rayleigh Criterion """ lambda_ = (3.0*10**8)/freq_i # frequency in Hertz sigma = 0.44*(lambda_/d) return sigma
2e9a6ebc8c5cdf1d33780df1e9065dd6bfc5e63d
16,834
import platform def autodetect(): """ Returns ------- bool True if current platform matches, otherwise False """ n = platform.node() # Login node if n == "vmschulz8": return True if len(n) == 5: if n[:3] in ['odr', 'rpi']: return True return False
09eccebf4871db4040050b7c4e0f262627e73d5e
16,836
from pathlib import Path def scrape_names(path: Path) -> list[str]: """Scrape names into a list and sort them.""" with path.open() as h: return sorted(eval(next(h)))
d1b001a911abf7b81602b61b4b8a185ad257fe78
16,837
import numpy def superpixel_map(pixel_img:numpy.ndarray) -> numpy.ndarray: """ Map a superpixel (patch) image to a dictionary with (1D) coordinates. Parameters ---------- idx_array : 2d numpy.ndarray (order='C' !) Image with superpixel index in each pixel Returns ------- superpixel_map : 2d numpy.ndarray Array which maps from superpixel index (0-based) to 1D coordinates in the original (flattened) image space, such that superpixel_map[superpixel_idx, 0:superpixel_map[superpixel_idx,-1]] is the list of (flattened) pixels in the image space belonging to superpixel_idx. """ pixel_flat = pixel_img.flatten() spcounts = numpy.bincount(pixel_flat) spcountmax = numpy.amax(spcounts).item() + 1 sp_to_p = numpy.zeros(len(spcounts) * spcountmax, dtype=numpy.int32).reshape(len(spcounts), spcountmax) spcounts = numpy.zeros(len(spcounts), dtype=numpy.int32) for idx in range(pixel_flat.size): pixel_val = pixel_flat[idx] sp_to_p[pixel_val, spcounts[pixel_val]] = idx spcounts[pixel_val] += 1 for idx in range(len(spcounts)): sp_to_p[idx, -1] = spcounts[idx] return sp_to_p
3181cb69797e5456b0fc9a679b13d22f807156f0
16,838
def twiddle(objFunction, args, init=0.5, tolerance=0.00001, domain=(float("-inf"), float("inf"))): """ Optimize a single parameter given an objective function. This is a local hill-climbing algorithm. Here is a simple description of it: https://www.youtube.com/watch?v=2uQ2BSzDvXs @param args (tuple) Arguments necessary for the objective function. @param tolerance (float) Number used to determine when optimization has converged to a sufficiently good score. @param objFunction(function)Objective Function used to quantify how good a particular parameter choice is. @param init (float) Initial value of the parameter. @param domain (tuple) Domain of parameter values, as (min, max). @return (dict) Contains: "parameter" (float) Threshold that returns the largest score from the Objective function. "score" (float) The score from the objective function given the threshold. """ pastCalls = {} x = init delta = 0.1 bestScore = objFunction(x, args) pastCalls[x] = bestScore while delta > tolerance: # Keep x within bounds if x + delta > domain[1]: delta = abs(domain[1] - x) / 2 x += delta if x not in pastCalls: score = objFunction(x, args) pastCalls[x] = score score = pastCalls[x] if score > bestScore: bestScore = score delta *= 2 else: # Keep x within bounds if x - delta < domain[0]: delta = abs(domain[0] - x) / 2 x -= 2 * delta if x not in pastCalls: score = objFunction(x, args) pastCalls[x] = score score = pastCalls[x] if score > bestScore: bestScore = score delta *= 2 else: x += delta delta *= 0.5 print("Parameter:", x) print("Best score:", bestScore) print("Step size:", delta) print() return {"parameter": x, "score": bestScore}
8991c328b2fd45b77bb6fbea5bc2b0f8a5d705ce
16,839
import argparse def arg_parser(): """Parses command line arguments""" parser = argparse.ArgumentParser(description='Train a model for image classification.') parser.add_argument('--data-dir', type=str, default='~/.mxnet/datasets/imagenet', help='training and validation pictures to use.') parser.add_argument('--rec-val', type=str, default='/media/ramdisk/data/val-passthrough.rec', help='the validation data') parser.add_argument('--rec-val-idx', type=str, default='/media/ramdisk/data/val-passthrough.idx', help='the index of validation data') parser.add_argument('--use-rec', action='store_true', help='use image record iter for data input. default is false.') parser.add_argument('--batch-size', type=int, default=32, help='training batch size per device (CPU/GPU).') parser.add_argument('--dataset-size', type=int, default=32, help='training batch size per device (CPU/GPU).') parser.add_argument('--dtype', type=str, default='float32', help='data type for training. default is float32') parser.add_argument('--num_gpus', type=str, default=0, help='number of gpus to use.') parser.add_argument('-j', '--num-data-workers', dest='num_workers', default=4, type=int, help='number of preprocessing workers') parser.add_argument('--num-epochs', type=int, default=3, help='number of training epochs.') parser.add_argument('--lr', type=float, default=0.1, help='learning rate. default is 0.1.') parser.add_argument('--momentum', type=float, default=0.9, help='momentum value for optimizer, default is 0.9.') parser.add_argument('--wd', type=float, default=0.0001, help='weight decay rate. default is 0.0001.') parser.add_argument('--lr-mode', type=str, default='step', help='learning rate scheduler mode. options are step, poly.') parser.add_argument('--lr-poly-power', type=int, default=2, help='if learning rate scheduler mode is poly, then power is used') parser.add_argument('--lr-decay', type=float, default=0.1, help='decay rate of learning rate. default is 0.1.') parser.add_argument('--lr-decay-epoch', type=str, default='40,60', help='epoches at which learning rate decays. default is 40,60.') parser.add_argument('--warmup-lr', type=float, default=0.0, help='starting warmup learning rate. default is 0.0.') parser.add_argument('--warmup-epochs', type=int, default=0, help='number of warmup epochs.') parser.add_argument('--last-gamma', action='store_true', help='whether to initialize the gamma of the last BN layer in each bottleneck to zero') parser.add_argument('--mode', type=str, help='mode in which to train the model. options are symbolic, imperative, hybrid') parser.add_argument('--model', type=str, required=True, help='type of model to use. see vision_model for options.') parser.add_argument('--use-pretrained', action='store_true', help='enable using pretrained model from gluon.') parser.add_argument('--use_se', action='store_true', help='use SE layers or not in resnext. default is false.') parser.add_argument('--batch-norm', action='store_true', help='enable batch normalization or not in vgg. default is false.') parser.add_argument('--log-interval', type=int, default=50, help='Number of batches to wait before logging.') parser.add_argument('--save-frequency', type=int, default=0, help='frequency of model saving.') parser.add_argument('--save-dir', type=str, default='params', help='directory of saved models') parser.add_argument('--logging-dir', type=str, default='logs', help='directory of training logs') parser.add_argument('--kvstore', type=str, default='nccl') parser.add_argument('--top-k', type=int, default=0, help='give 5 for top5 accuracy, if 0 only prints top1 accuracy') return parser.parse_args()
3aee34b1aeb9bd320bd248665c795d67adaf47cf
16,842
def get_field_vocabularies(config, field_definitions, field_name): """Gets IDs of vocabularies linked from the current field (could be more than one). """ if 'vocabularies' in field_definitions[field_name]: vocabularies = field_definitions[field_name]['vocabularies'] return vocabularies else: return False
f84ee591dbcad5e81478e95de67b2e3450bbdb93
16,843
def pypi_link(pkg_filename): """ Given the filename, including md5 fragment, construct the dependency link for PyPI. """ root = 'https://files.pythonhosted.org/packages/source' name, sep, rest = pkg_filename.partition('-') parts = root, name[0], name, pkg_filename return '/'.join(parts)
1f71b2c6c34b52a60c2ead14b40e98ef0c89a8cf
16,844
def create_vector_dictionary(vector_file, multiword=False): """ This function creates a dictionary with vector values from affixoids Args: vector_file (file): File with vector values from FastText multiword (bool): Set to True if the word in vector file has multiple parts Returns: Dictionary with vector values as list Example: >>> create_vector_dictionary('doctests/vectors.txt') {'Bilderbuchabsturz': [-0.25007, -0.16484, -0.34915, 0.44351, 0.17918, 0.17356, 0.32336, 0.19306, 0.40586, 0.58886, -0.55027, 0.15365, -0.28948, -0.096226, 0.91019, 0.24468, -0.20271, 0.5475, 0.36233, 0.20612, -0.17727, 0.054958, 0.16082, -0.1237, -0.057176, 0.18833, 0.11853, 0.19447, -0.13197, -0.18862, -0.17965, -0.13153, 0.27431, -0.68191, -0.35592, -0.13321, 0.16669, -0.42519, 0.11905, 0.15686, 0.26408, -0.35616, -0.26065, -0.0021858, 0.34352, -0.39887, 0.59549, -0.35665, -0.60043, -0.16136, -0.19603, -0.57132, 0.11918, -0.22356, 0.1499, -0.22458, -0.081269, 0.0058904, 0.16639, 0.36866, -0.3294, -0.21821, 0.87304, -0.042374, -0.42687, -0.41224, -0.73979, 0.37495, 0.34696, 0.6927, -0.24696, 0.23713, 0.0004817, -0.67652, 0.36679, 0.52095, -0.059838, 0.3779, -0.15106, -0.31892, -0.084559, -0.067978, 0.45779, 0.45037, -0.19661, -0.14229, 0.097991, 0.26219, 0.41556, 0.43363, 0.060991, 0.15759, 0.055367, -0.10719, -0.38255, -0.3, -0.032207, -0.50483, 0.18746, -0.6391]} """ dictionary = {} with open(vector_file, 'r', encoding='utf-8') as f: for line in f: if multiword: word = line.rstrip().split('\t') else: word = line.strip().split() dict_key = word[0] dict_value = list(word[1:]) dict_value_float = [float(x) for x in dict_value] dictionary.update({dict_key: dict_value_float}) return dictionary
2da775668193b0cd545137f90f416dca4fb166be
16,845
def add_linebreaks(text, max_len=80): """ Add linebreaks on whitespace such that no line is longer than `max_len`, unless it contains a single word that's longer. There are probably way faster methods, but this is simple and works. """ br_text = '' len_cnt = 0 for word in text.split(' '): len_cnt += len(word) + 1 if len_cnt > max_len: len_cnt = len(word) br_text += '\n' + word else: br_text += ' ' + word return br_text[1:]
3d09572d34b67da9b639466478ce2d62d6d54116
16,846
def preprocess_x_y(x, y): """Preprocess x, y input data. Returns list of list style. **中文文档** 预处理输入的x, y数据。 """ def is_iterable_slicable(a): if hasattr(a, "__iter__") and hasattr(a, "__getitem__"): return True else: return False if is_iterable_slicable(x): if is_iterable_slicable(x[0]): return x, y else: return (x,), (y,) else: raise ValueError("invalid input!")
957fbd745f7a06ff8f47bf53e237f97eb5b19b05
16,847
def strftime(date, fmt): """ Apply strftime to `date` object with `fmt` prameter. Returns '' if either is non truthy """ try: if not date or not fmt: return '' return date.strftime(fmt) except: return ''
702688a4c7b4c5bef1ee64ba1804335b617baa2f
16,848
import numpy def calc_pr4(index_hkl, centrosymmetry_position=None): """Calculate PR4. For more details see documentation module "Structure factor". """ h, k, l = index_hkl[0], index_hkl[1], index_hkl[2] if centrosymmetry_position is None: res = numpy.zeros_like(h) else: p_1, p_2, p_3 = centrosymmetry_position[0]/centrosymmetry_position[3], centrosymmetry_position[1]/centrosymmetry_position[3], centrosymmetry_position[2]/centrosymmetry_position[3] res = numpy.exp(-4.*numpy.pi * 1j * (h*p_1 + k*p_2 + l*p_3)) return res
18ea00b4ff945773d06c3102454d98ecc22185ef
16,850
import re def normalize(string): """Normalize whitespace.""" string = string.strip() string = re.sub(r'\s+', ' ', string) return string
a677092aa0deaed5a87958f35e63fbe5538e04f3
16,851
def run_all_analysis_for_a_clustering(clustering_id, clustering, analysis): """ Is the function to be run in parallel. @param clustering_id: Is the id of the clustering we are working with. @param clustering: A Clustering instance. @param analysis: A list of all the analysis we want to perform.s @param observer: An observer to communicate messages. """ analysis_results = {} for a in analysis: analysis_results[a.name] = a.run(clustering) return (clustering_id, analysis_results)
476fa6684867b74fc5520241aecf5efceb92dc0c
16,853
def createFilterForFeature(feature): """ Create weights and a bias value for a 3x3 convolution for the given feature, as defined by feature. Arguments: feature (torch.tensor): 3D tensor of CxHxW coordinates of the shape. Coordinates are relative to an arbitrary starting point and the height and width dimensions should be less than 3. Use chunkifyShapes to chunk larger shapes to the appropriate sizes. Returns: torch.tensor: Weights float : Bias """ # Create a filter that rejects anything that is not this shape. Assume that all # values in the image are either 0 or 1. Any location with a 0 in the chunk will # have a value of -filter_sum so the output of the filter will be 0 or less if # there is any 1 present where it is unexpected for this chunk. filter_sum = feature.sum().item() filter_weight = feature.clone() + ((feature-1.) * filter_sum) # If the filter encounters an exact match then the output will be filter_sum. The # bias should be set to -filter_sum + 1 so that output is supressed unless all of # the expected 1 values are encountered. filter_bias = -filter_sum + 1 return filter_weight, filter_bias
366bf29c50da21546fa9d32d8bfe9d80537c672b
16,854
def filter_queryset_by_m2m(request, queryset, exclude): """ request = e.g. self.request.GET queryset = a Django queryset object exclude = e.g. 'linguisticfield' if want to exclude filter on LinguisticField Returns an appropriately filtered Django queryset object """ # request and queryset are mandatory if request is not None and queryset is not None: if exclude != 'author': author = request.get('advanced_filter_author', '') if author != '': queryset = queryset.filter(author__in=[author]) if exclude != 'linguisticfield': linguisticfield = request.get('advanced_filter_linguisticfield', '') if linguisticfield != '': queryset = queryset.filter(linguisticfield__in=[linguisticfield]) if exclude != 'linguisticnotion': linguisticnotion = request.get('advanced_filter_linguisticnotion', '') if linguisticnotion != '': queryset = queryset.filter(linguisticnotion__in=[linguisticnotion]) if exclude != 'linguistictradition': linguistictradition = request.get('advanced_filter_linguistictradition', '') if linguistictradition != '': queryset = queryset.filter(linguistictradition__in=[linguistictradition]) if exclude != 'reference': reference = request.get('advanced_filter_reference', '') if reference != '': queryset = queryset.filter(reference__in=[reference]) if exclude != 'sanskritword': sanskritword = request.get('advanced_filter_sanskritword', '') if sanskritword != '': queryset = queryset.filter(sanskritword__in=[sanskritword]) if exclude != 'text': text = request.get('advanced_filter_text', '') if text != '': queryset = queryset.filter(text__in=[text]) if exclude != 'textpassage': textpassage = request.get('advanced_filter_textpassage', '') if textpassage != '': queryset = queryset.filter(textpassage__in=[textpassage]) # Only show results that admin approves as published return queryset.filter(admin_published=True)
654bd6f6278c01dd1fdb89a20666601a337d9b55
16,856
def kb(units): """Boltzmann constant Parameters ---------- units : str Units for kb. Supported units ====== ========================= ============== Unit Description Value ====== ========================= ============== J/K Joule per kelvin 1.38064852e-23 kJ/K Kilojoule per kelvin 1.38064852e-26 eV/K Electron volt per kelvin 8.6173303e-5 cal/K Calorie per kelvin 3.2976230e-24 kcal/K Kilocalorie per kelvin 3.2976230e-27 Eh/K Hartree per kelvin 3.1668105e-6 Ha/K Hartree per kelvin 3.1668105e-6 ====== ========================= ============== Returns ------- kb : float Boltzmann constant in appropriate units Raises ------ KeyError If units is not supported. """ kb_dict = { 'J/K': 1.38064852e-23, 'kJ/K': 1.38064852e-26, 'eV/K': 8.6173303e-5, 'cal/K': 3.2976230e-24, 'kcal/K': 3.2976230e-27, 'Eh/K': 3.1668105e-06, 'Ha/K': 3.1668105e-06, } try: return kb_dict[units] except KeyError: err_msg = ('Invalid unit for kb: {}. Use help(pmutt.constants.kb) for ' 'accepted units.'.format(units)) raise KeyError(err_msg)
4f0d4cfa10f617e1a9a0257b1a509606af2d7f18
16,857
def get_one_batch(dataloader): """Returns one batch from a dataloader""" iter_dl = iter(dataloader) #Necessary. You have to tell the fucking thing it's iterable. Why? batch = next(iter_dl) return(batch)
a05bde960649791f6ea8a8a432c5f39af6137275
16,858
def univariate_appended_data_x(): """ A fixture for univariate data. """ return {"x": [98, 102, 94]}
3be55e2c1afdd77a69e0e5fcac124d30a8a23a17
16,859
def discrete_colorscale(markers, colors): """ :param markers: :param colors: :return: color scale """ markers = sorted(markers) norm_mark = [ round((v - markers[0]) / (markers[-1] - markers[0]), 3) for v in markers ] dcolorscale = [] for k in enumerate(colors): dcolorscale.extend( [[norm_mark[k[0]], colors[k[0]]], [norm_mark[k[0] + 1], colors[k[0]]]] ) return dcolorscale
2a4f6b47779f1225bff1fd425da5e0385ef65927
16,860
def alert_get(alert_id=None): # noqa: E501 """obtain alert list get method to obtain all the alerts # noqa: E501 :param alert_id: identifier for the alert :type alert_id: str :rtype: AlertArray """ return 'do some magic!'
bf46a857842d657df50255273d124e864ef59873
16,861
import os def get_html_path(path, use_directory_urls): """ Return the HTML file path for a given markdown file """ parent, filename = os.path.split(path) name_orig, ext = os.path.splitext(filename) # Directory URLs require some different logic. This mirrors mkdocs' internal logic. if use_directory_urls: # Both `index.md` and `README.md` files are normalized to `index.html` during build name = 'index' if name_orig.lower() in ('index', 'readme') else name_orig # If it's name is `index`, then that means it's the "homepage" of a directory, so should get placed in that dir if name == 'index': return os.path.join(parent, 'index.html') # Otherwise, it's a file within that folder, so it should go in its own directory to resolve properly else: return os.path.join(parent, name, 'index.html') # Just use the original name if Directory URLs aren't used else: return os.path.join(parent, (name_orig + '.html'))
47825861f652a4e4563362ac270e72a06aa3a87c
16,863
import os import json def get_json(file, path=''): """ Reads and parses a JSON file. Parameters: file: The name of the JSON file to read. path: The path to the JSON file, if it is not in the working directory. Returns: A dict containing the JSON file's content. """ with open(os.path.join(path, file), 'r', encoding='UTF-8') as j: return json.load(j)
470a8146c7ef30995eea7dc0dd099ca800db62d8
16,865
def make_header(myinfo, myfile=None): """ Formats experiment metadata for (initial) display and to save in the .dat file ARGS : MYHEADER (dict) MYFILE (file, *optional) returns info_str (str) """ info_str = '' for k, v in zip(myinfo.keys(), myinfo.values()): info_str += k + ':' + str(v) + '\n' if myfile is not None: myfile.write(info_str) return info_str
67662c877f3ad54727143a24309582c606d117d5
16,866
import math def calc_stats(base_stats, level): """Calculate a Pokemon's stats based on its base stats and level""" stats = [] stats.append(math.floor((31 + 2 * base_stats[0] + 21) * level/100 + 10 + level)) for i in range(1, 6): stats.append(math.floor((31 + 2 * base_stats[i] + 21) * level/100 + 5)) return stats
a2b94a830d6d6622e8a58ce3831112af31899776
16,867
import requests def _fetch(url: str): """ function to fetch data from api in asynchronous way """ with requests.get(url) as response: return response.json()
368cf893b6f42a305b983cc5ab90f188a1db587e
16,868
def build_edges(src_profile, dst_profiles): """create set of edges, compatible with NX graph format.""" edges = set() for dst_profile in dst_profiles: edges.add((src_profile['uid'], dst_profile['uid'])) return edges
3af864e0b847b530c0c524b28a406cf8a99b71e0
16,872
def nodes_to_int(head, reverse=True): """Converts linked list number structure to number string for testing purposes. :returns Number string representing node structure """ if head is None: return None curr = head num_str = str(curr.data) while curr.next is not None: curr = curr.next num_str += str(curr.data) if reverse: num_str = num_str[::-1] return num_str
bb21051accfa62f431f8c300d15b689e199347ab
16,873
import os def get_ingest_state(): """Get the state code of the active ingest :return: (STARTING, PREPROCESSING, PROCESSING, POSTPROCESSING, COMPLETE) :rtype: str """ return os.getenv('JETA_INGEST_STATE')
1b201693c166ad0c32ca134eec53c43a75ddd8e6
16,874
def check_nulls(df): """ Checks null values within dataframe Param: dataframe Return number of null values """ return df.isnull().sum()
b552d8a5391e230cb47afc70046c794290661c02
16,875
import pathlib import os def first_path_components(p, n): """Extract the first 'n' components of path 'p'""" p = pathlib.Path(p) return pathlib.Path(os.path.join(*p.parts[:n]))
1ea47c7824841fa752d6db6983b87fa5f9c9476b
16,876
import re def get_filename_parts(filename, default_suffix=''): """ Parses a string representing a filename and returns as a 2-element string tuple with the filename stem and its suffix (the shortest string after a dot from the end of the string). If there's no suffix found a default is used instead. Args: filename(string): The input file name expected to contain a GraphViz DOT digraph. default_suffix(string, optional): The suffix to use if one cannot be found from the filename. Defaults to an empty string. Returns (string, string): A 2 element tuple of the filename stem and its suffix. """ m = re.match(r'(.*)(\..*$)', filename) if not m: return (filename, default_suffix) return m.group(1, 2)
a9f7451ab0da7c6dd661959000b8e9d89911d8c1
16,877
def getWalkTag(node): """Get Controller tag Arguments: node (dagNode): Controller object with tag Returns: tag: Controller tag """ tag = node.listConnections(t="controller", et=True) if tag: return tag[0]
87ffee1216d29a23331e0b7411eccf376326ec5a
16,878
import math def area_triangle_sss(side1, side2, side3): """ Returns the area of a triangle, given the lengths of its three sides. """ # Use Heron's formula semiperim = (side1 + side2 + side3) / 2.0 return math.sqrt(semiperim * (semiperim - side1) * (semiperim - side2) * (semiperim - side3))
3c6276d7b4e9f8f0282eec187964112c7b745a7d
16,879
def _FindOrAddSolution(solutions, name): """Find a solution of the specified name from the given list of solutions. If no solution with the specified name is found, a solution with the specified name is appended to the given list of solutions. This function thus always returns a solution. Args: solutions: The list of solutions to search from. name: The solution name to search for. Returns: The solution with the specified name. """ for solution in solutions: if solution['name'] == name: return solution solution = {'name': name} solutions.append(solution) return solution
50d7d93a0a43062ceba2abd8677e6ca77596911e
16,880
def classify(df, w): """ Classify result of linear discriminant analysis for different classifiers. @param df pandas dataframe; @param w dict[classifier: (list of weights, weight threshold)]; @return df with appended result. """ # get input if 'state' in df.columns: x = df.drop('state', axis=1).to_numpy(dtype='float64') else: x = df.to_numpy(dtype='float64') # initialize result new = df.copy() for classifier, wi in w.items(): # evaluate output y = x@wi[0] # append output new[f'lda_{classifier}'] = y - wi[1] # get states states = classifier.split('_') # append output new[f'lda_{classifier}_class'] = [ states[0] if i > 0 else states[1] for i in y ] return new
184d2aa61ef8cb942b8ba69f15148d2b99c22091
16,881
def gen_h_file(file_list): """ generate the c header file for audio tone """ h_file = '' h_file += '#ifndef __AUDIO_TONEURI_H__\r\n#define __AUDIO_TONEURI_H__\r\n\r\n' h_file += 'extern const char* tone_uri[];\r\n\r\n' h_file += 'typedef enum {\r\n' for line in [' TONE_TYPE_' + name.split(".")[0].upper() + ',\r\n' for name in file_list]: h_file += line h_file += ' TONE_TYPE_MAX,\r\n} tone_type_t;\r\n\r\nint get_tone_uri_num();\r\n\r\n#endif\r\n' return h_file
1c1626b3b1b33fcf00bccdcfb9ac3218c7050889
16,882
def merge_names(names): """Merge names of environments by leaving the last non-blank one""" actual_names = [name for name in names if name] if actual_names: return actual_names[-1]
7d58bbbfb7bd4d9851f7475e24903681374f7777
16,883
from pathlib import Path from typing import Dict import csv def read_max_tile_counts_from_csv( csv_path: Path ) -> Dict[str, int]: """ Reads the max tile counts. """ max_tile_counts = {} row_count = 0 with open(csv_path.as_posix()) as file: for row in csv.reader(file, delimiter=',', skipinitialspace=True): row_count += 1 if row_count == 1: continue tile = row[0] max_count = int(row[1]) max_tile_counts[tile] = max_count return max_tile_counts
e4740c430ab42064d8255aca5e8d327b39a31b65
16,885
def id2num(s): """ spreadsheet column name to number http://stackoverflow.com/questions/7261936 :param s: str -- spreadsheet column alpha ID (i.e. A, B, ... AA, AB,...) :returns: int -- spreadsheet column number (zero-based index) >>> id2num('A') 0 >>> id2num('B') 1 >>> id2num('XFD') 16383 >>> """ n = 0 for ch in s.upper(): n = n * 26 + (ord(ch) - 65) + 1 return n - 1
a1966821557324a0e95568bf0f63207d8cd3f350
16,886
def tot(n,ndc): """The modeled CIS availability""" assert (n >0 and ndc <= 1) t=0 coverage=[] for i in range(n): #t = (1 - t) * (np.random.randn() * (ndc/4.) + ndc) +t t = t + (1 - t) * ndc if i == n-1: # the coverage at certain number of nodes # print(t) coverage.append(t) return coverage
a641fe291ebabd1242be63db643237b1bfbb8325
16,888
def create_bbox(boundries): """ BBox serves for the plotting size figures""" BBox = ((boundries[3], boundries[2], boundries[1], boundries[0])) return BBox
28acec93dd6aab6af3192fbf1a82a2ffb96788d3
16,890
def build_conditional(column, row): """Build string for conditional formatting formula for exporting to Excel.""" substring = '' if isinstance(row, list): for country in row: substring = substring + f'$A4="{country}",' substring = 'OR(' + substring[:-1] + ')' # remove last comma, add closing parenthesis formula_string = f'=AND(B$3="{column}",' + substring + ')' else: formula_string = f'=AND(B$3="{column}",$A4="{row}")' return formula_string
3dbd7d572a749769e7e9cb4726eb099e937ee79e
16,891
import numpy as np def normalize(array): """ Normalizes an array by its average and sd """ return (np.array(array) - np.average(array)) / np.std(array)
e7d508515340727c179cb8dc3f6bc2063ebbff76
16,892
def _get_redis_specs(nodes, nb_param_servers): """ Returns the list of ip:port for all redis nodes """ redis_specs = [] redis_nodes = [hostname for hostname in nodes][:nb_param_servers + 1][-1] for node_host, node_ip in nodes.items(): if node_host in redis_nodes: redis_specs += ['%s:%d' % (node_ip, 6379)] return redis_specs
e93d037e7a1d281550c10da313b8083d8ac57949
16,893
def _missing_strs(vba, pcode_strs, verbose=False): """ See if there are any string literals appear in the p-code that do not appear in the decompressed VBA source code. vba - (str) The decompressed VBA source code. pcode_strs - (set) The string literals defined in the p-code. return - (float) % missing items. """ # Check each string. num_missing = 0.0 for curr_str in pcode_strs: if (('"' + curr_str + '"') not in vba) and (("'" + curr_str + "'") not in vba): if verbose: print("P-code string '" + str(curr_str) + "' is missing.") num_missing += 1 if len(pcode_strs) == 0: return 0.0 return num_missing / len(pcode_strs)
7dc45fa2b1bc883bf68929ebcb46277ecdeb6cf9
16,894
def validate_template(cls): """ Checks whether a template class meets the basic expectations for working with ModelManager, to aid in development and testing. Looks for 'to_dict', 'from_dict', and 'run' methods, and 'name', 'tags', 'template', and 'template_version' attributes. Checks that an object can be instantiated without arguments, plus some additional behaviors. See documentation for a full description of ModelManager specs and guidelines. There are many behaviors this does NOT check, because we don't know what particular parameters are expected and valid for a given template. For example, saving a configured model step and reloading it should produce an equivalent object, but this needs to be checked in template-specific unit tests. Parameters ---------- cls : class Template class. Returns ------- bool """ try: m = cls() except: print("Error instantiating object without arguments") raise methods = ['to_dict', 'from_dict', 'run'] for item in methods: if item not in dir(cls): print("Expecting a '{}' method".format(item)) return False try: d = m.to_dict() except: print("Error running 'to_dict()'") raise params = ['name', 'tags', 'template', 'template_version'] for item in params: if item not in d: print("Expecting a '{}' key in dict representation".format(item)) return False if (d['template'] != m.__class__.__name__): print("Expecting 'template' value in dict to match the class name") return False try: cls.from_dict(m.to_dict()) except: print("Error instantiating object with 'from_dict()' method") raise # TO DO - check supplemental objects? (but nothing there with unconfigured steps) return True
2b4af449cef72ca16843d916d99219b851a5dfc1
16,896
def calculate_left_exterior(root, is_exterior): """ Build left exterior """ if is_exterior: ls = [root.val] if root.left is not None: ls.extend(calculate_left_exterior(root.left, True)) ls.extend(calculate_left_exterior(root.right, False)) elif root.right is not None: ls.extend(calculate_left_exterior(root.right, True)) return ls elif root.left is None and root.right is None: return [root.val] return calculate_left_exterior(root.left, False) \ + calculate_left_exterior(root.right, False)
edcf6e6684ca72fcc3afa8e65abcc76c12e80e12
16,898
def sortMoviesBy(movies_names_wl, args): """ This module is used to sortMovies by the dict(arg.sortBy) :param list movies_names_wl: a list of movie_names_with_links movie : [Rank, Link, Title, Year, Rating, Number of Ratings, Runtime, Director] Rank : int Link : str Title : str Year : int NoR : int Runtime : str Director : str :param Namespace args: [ top, csv, sortBy, setup, console_print] top : int csv : bool sortBy : string setup : bool console_print : bool """ try: movies_names_wl = movies_names_wl[:args.top] except: print('**Error** : cannot slice top size') keydictionary = {'Rank': 0, 'Title': 2, 'Year': 3, 'Rating': 4, 'NoR': 5, 'Runtime': 6, 'Director': 7} try: movies_names_wl.sort( key=lambda movies_names_wl: movies_names_wl[keydictionary[args.sortBy]]) except: if(args.sortBy != None): print('**Error** : cannot sortBy **') return movies_names_wl
5b2d20f29e7846799db3e1622e30214065d34252
16,899
def match_prefixes(text, prefixtree): """Return a list of all matching prefixes, with longest sorted first""" longest_prefix = '' current = prefixtree for char in text: if char in current.children: longest_prefix += char current = current.children[char] else: break prefixes = [] for i in reversed(range(len(longest_prefix))): if longest_prefix[:i + 1] in prefixtree: prefixes.append(longest_prefix[:i + 1]) return prefixes
d470dc71e0214c45510db48547dcd10921c43aee
16,900