content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import optparse import os def process_options(): """Process options from the command line.""" prog_desc = 'Downgrade buildsteps in a directory of pickled builds.' usage = '%prog [options] [master name or filename]' parser = optparse.OptionParser(usage=(usage + '\n\n' + prog_desc)) parser.add_option('--list-masters', action='store_true', help='list masters in search path') parser.add_option('--master-dir', help='specify a master directory ' 'instead of a mastername') parser.add_option('-t', '--target-version', default=None, type='int', help='downgrade to specified version') parser.add_option('--commit', action='store_true', help='save downgraded results on top of old files. leaving ' 'out this option is equivalent to a dry-run') parser.add_option('--builder-name', default='builder', help='filename for builder') options, args = parser.parse_args() if len(args) > 1: parser.error('too many arguments specified!') options.filename = None options.mastername = None if args: if os.path.exists(args[0]): options.filename = args[0] options.builderpath = os.path.join(os.path.dirname(args[0]), options.builder_name) else: options.mastername = args[0] return options
4d6d1f0b23d029d09a6e6d4c3b04bad8c623a64b
31,241
def pandas_mode_(self): """ Boolean attribute defining whether a model was trained with pandas data. """ return hasattr(self, 'feature_names_') or hasattr(self, 'target_names_')
4d46d11cabc13c3cb088e9046cd22b7568cdbf70
31,243
def get_rios_coefficient_fieldnames(): """ Returns a dictionary of the field names in the RIOS coefficient table """ rios_fields = {} rios_fields["landuse"] = "lucode" rios_fields["sedimentexport"] = "sed_exp" rios_fields["sedimentretention"] = "sed_ret" rios_fields["nitrateexport"] = "N_exp" rios_fields["nitrateretention"] = "N_ret" rios_fields["phosphateexport"] = "P_exp" rios_fields["phosphateretention"] = "P_ret" rios_fields["roughness"] = "rough_rank" rios_fields["cover"] = "cover_rank" return rios_fields
fb2d5b5fdc6ae80d5be52a5119a857eb9b0d4017
31,244
def get_var_names(component, intent='input'): """ Get a list of input or output variable names from *component* (a BMI-like object). Use the *intent* keyword to specify whether to return input or output variable names. *intent* must be one of *input* or *output*. """ assert(intent in ['input', 'output']) func = getattr(component, 'get_' + intent + '_var_names') try: var_names = func() except TypeError: var_names = getattr(component, '_' + intent + '_var_names') return var_names
3dcb6f1a7f181a42813755f582e1a2ca03de10e0
31,245
import json def _make_defined_data(data, defined_keys): """Takes the incoming tweet data and pulls out all of the items which go into fields of the db and puts the rest into a json string under other_data """ # This will be the data that goes directly into the model defined_data = {} # This will be the data that goes into the other_data field forJson = { } for k in data.keys(): datum = data[k] # check whether it is a dict and encode, if necessary if type(datum) == dict: datum = json.dumps(datum) if k in defined_keys: defined_data[k] = datum else: forJson[k] = datum # Set the data that didn't have a field # to the json field defined_data['other_data'] = json.dumps(forJson) return defined_data
de64349d85b5bf31849038523e91794d34388cf6
31,246
import os def _convert_path_to_function(clipped_path): """ Parameters ---------- clipped_path : str api route with absolute path clipped Returns ------- fn : str -- function name """ components = [comp for comp in clipped_path.split(os.sep) if comp] return "_".join(components)[:-3]
77eb308f1f3e87464a5b19d963546a55c5fd83ba
31,247
def _merge_cipher(clist): """Flatten 'clist' [List<List<int>>] and return the corresponding string [bytes].""" cipher = [e for sublist in clist for e in sublist] return bytes(cipher)
060af5ad69b11592029e4aaf10fdfff8354378a9
31,249
def ABCDFrequencyList_to_HFrequencyList(ABCD_frequency_list): """ Converts ABCD parameters into h-parameters. ABCD-parameters should be in the form [[f,A,B,C,D],...] Returns data in the form [[f,h11,h12,h21,h22],...] """ h_frequency_list=[] for row in ABCD_frequency_list[:]: [frequency,A,B,C,D]=row h11=B/D h12=(A*D-B*C)/D h21=-1/D h22=C/D h_frequency_list.append([frequency,h11,h12,h21,h22]) return h_frequency_list
d4f54e6864a34b8b24b1afe599a9338be52f29fd
31,250
def trimCompressionSuffix(fileName): """ Trim .gz, .bz2, .tar.gz and .tar.bz2 """ try: if fileName[-3:] == ".gz": fileName = fileName[:-3] if fileName[-4:] == ".bz2": fileName = fileName[:-4] if fileName[-4:] == ".tar": fileName = fileName[:-4] except IndexError: pass return fileName
21732f395429a2a45e5a95fa0c6653de09d4c2c4
31,254
def rgb_int2pct(rgbArr): """ converts RGB 255 values to a 0 to 1 scale (255,255,255) --> (1,1,1) """ out = [] for rgb in rgbArr: out.append((rgb[0]/255.0, rgb[1]/255.0, rgb[2]/255.0)) return out
cd008e7b65085154d685cfba125912f3f908dfa8
31,255
def confidence_for_uniform_audit(n,u,b): """ Return the chance of seeing one of b bad precincts in a uniformly drawn sample of size u, from a set of size n. """ miss_prob = 1.0 for i in range(int(u)): miss_prob *= float(n-b-i)/(n-i) return 1.0 - miss_prob
16fc15a6a2bd32f9202ff545ca77c9a65abb0ffc
31,256
import operator def evaluate(paeseTree): """ 递归实现计算二叉树 :param paeseTree: :return: """ opers = {'+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.truediv } leftValue = paeseTree.getLeftNode() rightValue = paeseTree.getRightNode() if leftValue and rightValue: fn = opers[paeseTree.getRootValue()] return fn(evaluate(leftValue), evaluate(rightValue)) else: return paeseTree.getRootValue()
3e22ed9716bd05245779b82fde96636b840a9ea8
31,257
def get_token(request): """Get token from cookie or header.""" token = request.COOKIES.get('jwt-token') if token is None: token = request.META.get('HTTP_AUTHORIZATION') return token
591d6f282969b3994a0d4b86ccf6eda19fe93530
31,258
def tet_f(note, tuning=440): """Returns the frequency of a note given in scientific notation.""" # parse note as nth semitone from A0 letter = note[:-1] octave = int(note[-1]) note_names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"] letter_index = note_names.index(letter) + 1 note_number = octave * 12 + letter_index - 9 # apply formula return tuning * ((2 ** (1 / 12)) ** (note_number - 49))
17dd86af181c9ed0420f6dcb9f0af07e46607c91
31,259
def StripSuffix(s: str, suffix: str): """Strips the provided suffix from s, if present at its end.""" if s.endswith(suffix): return s[:-len(suffix)] return s
bb4457b754cefa8c96df2e668a0a92e8a6713517
31,260
def something(): """ >>> something() False """ return False
3ba6d16e419e7afa79d96b0ebb2ae38a10df73ab
31,261
from typing import Dict from typing import List def iterate_parent(ontology_label: str, ontology: Dict[str, List[str]], family_tree: List[str], mappings: Dict[str, Dict[str, str]]): """ Iterate ontology to find matched mapping. Parameters ---------- ontology_label label to query mappings. ontology name to parents mappings. family_tree list of labels in the family tree. mappings ontology label to hub term/dimension mappings. Returns ------- list of mapped dictionary of label, dimension, and hierarchy, for example iterating Manual segmentation returns [ { "label": "Image Segmentation", "dimension": "Operation", "hierarchy": [ "Image segmentation", "Manual segmentation" ] }, { "label": "Image annotation", "dimension": "Operation", "hierarchy": [ "Image annotation", "Dense image annotation", "Manual segmentation" ] } ] """ family_tree.insert(0, ontology_label) if ontology_label in mappings: return [{ "label": mappings[ontology_label]["label"], "dimension": mappings[ontology_label]["dimension"], "hierarchy": family_tree }] if ontology_label not in ontology: return [] all_families = [] for token in ontology[ontology_label]: all_families.extend(iterate_parent(token, ontology, family_tree.copy(), mappings)) return all_families
da641d14d978c9a6010707dd7bbdfb95bbdd1ea4
31,262
def extract_red_channel(input_im, bayer_pattern='grbg'): """ Extract and return the red channel from a Bayer image. :param input_im: The input Bayer image. :param bayer_pattern: The Bayer pattern of the image, either 'rggb' or 'bggr'. :return: The extracted channel, of the same type as the image. """ d = {'rggb':(0,0), 'bggr':(1,1), 'grbg': (0,1), 'girg':(1,0)} assert bayer_pattern in d, 'Invalid Bayer pattern \'{}\''.format(bayer_pattern) red_idx = d[bayer_pattern][0] red_idy = d[bayer_pattern][1] im = input_im[red_idx::2, red_idy::2, ...] return im
f879f26b42333a82d8f5d8653a6b1bf8479d4732
31,263
def count_bits_set_kernighan(n: int) -> int: """ https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan >>> count_bits_set_kernighan(0b101010101) 5 >>> count_bits_set_kernighan(2 << 63) 1 >>> count_bits_set_kernighan((2 << 63) - 1) 64 """ c = 0 while n: n &= n - 1 c += 1 return c
d676e182733f12bf90dfbc74a5c96f60cc6f5eb0
31,267
import hashlib def generate_mutex_name(target_name, prefix=""): """ A mutex name must be a valid filesystem path, so, this generates a hash that can be used in case the original name would have conflicts. """ if not isinstance(target_name, bytes): target_name = target_name.encode("utf-8") return prefix + (hashlib.sha224(target_name).hexdigest()[:16])
e36f03fc03ff29535bc95adef3c4425efcf7bd3e
31,268
import os import subprocess def run_cmd(cmd, file_out, file_err): """ """ ensure_mkdir = lambda a: os.makedirs( os.path.abspath(os.path.join(a, os.pardir)), exist_ok=True ) ensure_mkdir(file_out) ensure_mkdir(file_err) with open(file_out, "w") as fout, open(file_err, "w") as ferr: proc = subprocess.Popen(cmd, universal_newlines=True, stdout=fout, stderr=ferr) code = proc.wait() return code
da3c88ed7d385a88f4e012e77ea5c073b5eedd26
31,269
import requests def http_get(domain: str, path: str): """ Fetch resource """ accepted_protocol = any( [domain.startswith("http://"), domain.startswith("https://")] ) if not accepted_protocol: raise ValueError("Domain protocol not recognized (eg http://)") print("Getting url %s%s" % (domain, path)) r = requests.get(domain + path) results = r.text return results
4d762eb192ef6721d9c930b2df76c13364c18de9
31,270
import operator def get_out_operands(instruction_form): """Returns a list indicating which operands are written by this instruction form""" return tuple(map(operator.attrgetter("is_output"), instruction_form.operands))
df85c3021268d820f1c7ad0b820d343ae4041a82
31,271
def partition(arr, first, last): """.""" pivot_val = arr[first] leftmark = first + 1 rightmark = last done = False while not done: while leftmark <= rightmark and arr[leftmark] <= pivot_val: # print(f'first while: lm{leftmark}, rm{rightmark}, {arr[leftmark]}, {pivot_val}') leftmark += 1 while arr[rightmark] >= pivot_val and rightmark >= leftmark: # print(f'{arr[rightmark]}, {pivot_val}, rm{rightmark}, lm{leftmark}') rightmark -= 1 if rightmark < leftmark: done = True else: arr[leftmark], arr[rightmark] = arr[rightmark], arr[leftmark] arr[first], arr[rightmark] = arr[rightmark], arr[first] return rightmark
2e52e347c7b1ce41f9aa0823f492a0e7637ba67e
31,273
import re def split_list(separator, mylist, n=0): """ returns a list of string without white space of separator :param separator: the string to remove """ list = [] for item in mylist: if re.search(separator,item)is not None: if n > 0: word = re.split(separator, item, int(n)) else: word = re.split(separator, item) for new_item in word: if new_item.lstrip().rstrip() != '': list.append(new_item.lstrip().rstrip()) return list
50ecd0ac7993097314655fab36ce0625a4fa0920
31,274
import pickle def load_nodes(path): """ load nodes from storage file """ nodes = {} with open(path, 'rb') as file: nodes = pickle.load(file) for node in nodes.values(): # reset old properties node.online = False node.index = None node.clientcount = 0 return nodes
9f444f6f1a010d2822045ee359fa8bc109576bc9
31,275
def generate_places(): """ because all the places are on a positive x y axis, distance is calculable as (x1 - x) * (y1 - y) = euclidean distance where x and y is the child, and x1 and y1 is the place :return: """ places = {} places['park_b'] = [0.8,0.7] places['park_a'] = [0.6,1.0] places['leisure_facility'] = [0.7,0.3] places['museum'] = [0.3,0.3] places['library'] = [0.2,0.2] places['zoo'] = [0.4,0.7] return places
b6e8fde4679099aa0e60e5c6c27d4446250a3c78
31,276
def get_all_countries(cache): """Just gets all possible countries""" countries = set() for tile, attribs in cache.iteritems(): countries.update(attribs['countries']) countries = sorted(list(countries)) return countries
2ab6448ab4c75bfd47b0dc6500a7e5134997995e
31,277
from datetime import datetime import pytz def format_absolute_datetime(date_time: datetime) -> str: """Generate a human-readable absolute time string.""" now = datetime.now(tz=pytz.utc) format_str = "" if date_time.date() != now.date(): format_str += "%Y-%m-%d" time_part = date_time.time() # Only add the relevant time parts if time_part.hour != 0 or time_part.minute != 0 or time_part.second != 0: if time_part.second != 0: format_str += " %H:%M:%S" else: format_str += " %H:%M" else: time_part = date_time.time() # Only add the relevant time parts if time_part.second != 0: format_str = "%H:%M:%S" else: format_str = "%H:%M" return date_time.strftime(format_str)
8d9ead27c415225d211fe45195a6c2df5e3e17d1
31,278
def sum_elements(elems): """Sum numbers stored as sequence of two digits pairs.""" nums = [a * 100 + b for a, b in zip(elems, elems[1:])] if len(nums) == len(set(nums)): return sum(nums) return 0
7116b16c94e2145794a0ea092404bd938349b1eb
31,279
def insertion_sort(l): """ @param {list} l - the list to sort @return {tuple(list, number)} - Tuple(sorted list, number of iterations) """ sweeps = 0 for i in range(1, len(l)): sweeps += 1 current_val = l[i] j = i - 1 while (j >= 0) and (l[j] > current_val): l[j + 1] = l[j] j -= 1 l[j + 1] = current_val return (l, 0)
c8c0bf258c012561096e06d7a1366dd8556b870b
31,281
import gzip def gzip_file(file, file_name): """ Gzip a file :param file file: :param str file_name: :return: the gzipped file :rtype: gzip file """ with gzip.open(file_name + '.gz', 'wb') as gzipped_file: gzipped_file.writelines(file) return gzipped_file
d1569b3ef8ebed46eb7a3fd128479d3999fb228c
31,282
import csv def computeCoverage(filename) : """Parses a jacoco.csv file and computes code coverage percentages. Returns: coverage, branchCoverage. The coverage is instruction coverage. Keyword arguments: filename - The name, including path, of the jacoco.csv """ missed_tested = 0 missed_untested = 0 covered_tested = 0 covered_untested = 0 missedBranches_tested = 0 missedBranches_untested = 0 coveredBranches_tested = 0 coveredBranches_untested = 0 missedLines_tested = 0 missedLines_untested = 0 coveredLines_tested = 0 coveredLines_untested = 0 with open(filename, newline='') as csvfile : jacocoReader = csv.reader(csvfile) for i, row in enumerate(jacocoReader) : if i > 0 : if int(row[4]) > 0: missed_tested += int(row[3]) else: missed_untested += int(row[3]) if int(row[4]) > 0: covered_tested += int(row[4]) else: covered_untested += int(row[4]) if int(row[6]) > 0: missedBranches_tested += int(row[5]) else: missedBranches_untested += int(row[5]) if int(row[6]) > 0: coveredBranches_tested += int(row[6]) else: coveredBranches_untested += int(row[6]) if int(row[8]) > 0: missedLines_tested += int(row[7]) else: missedLines_untested += int(row[7]) if int(row[8]) > 0: coveredLines_tested += int(row[8]) else: coveredLines_untested += int(row[8]) return (covered_tested + covered_untested) / ((covered_tested + covered_untested) + (missed_tested + missed_untested)), \ (coveredBranches_tested + coveredBranches_untested) / ((coveredBranches_tested + coveredBranches_untested) + (missedBranches_tested + missedBranches_untested)), \ (coveredLines_tested + coveredLines_untested) / ((coveredLines_tested + coveredLines_untested) + (missedLines_tested + missedLines_untested)), \ covered_tested / (covered_tested + missed_tested), \ coveredBranches_tested / (coveredBranches_tested + missedBranches_tested), \ coveredLines_tested / (coveredLines_tested + missedLines_tested)
6942732b9017e98e35440b6bd6843ae6ab48c9df
31,286
import json def to_review_json(s): """ Converts the string to json :param s: A string :return: A json formatted string representation of the object """ b = {} if s: b.update({"reviewComment": s}) return json.dumps(b) else: return None
384f8e4f8d625081fa858e184ae7a559f590a420
31,288
def invertMove(move): """--> inverted move. (0 becomes1 and 1 becomes 0)""" if move == 0: return 1 else: return 0
5a382f17a4be441287ffc8f899ab57acd0c1ce27
31,289
def get_my_profile(username): """Funkcija glede na vlogo vrača podatke za kartico osebe.""" return None
52666ea4dc0d89f8c200eaa3b023f53f522309cc
31,290
from typing import Callable def identity(func: Callable) -> Callable: """ Helper function to be used when `settings.CHECK_CSRF` is `False`; in this case the dispatch decorators should operate just as normally and therefore we return the function (or method) itself as if nothing really changed. Arguments --------- func: Callable Function to return directly. Usually this will be `post` methods defined in views. Returns ------- func: Callable Same function as input, nothing changes when CSRF is not enabled. """ return func
7a7966879a82575512f14662a8c3fa22ffd8c3bc
31,291
def INSPUR_HTTP_CONTENT_CHECK(content): """判断浪潮的登录响应报文""" if content.find("SESSIN_COOKIE") >= 0 and content.find("Failure_Login_IPMI_Then_LDAP_then_Active_Directory_Radius") < 0: return 0 else: return -1
85e56bfcc0226306d1666ad4fc6fd69ae1713501
31,292
def _build_index_vcf_command_str(bgzipped_vcf): """Generate command string to index vcf file.""" command = " ".join([ 'tabix -p vcf', bgzipped_vcf ]) return command
dbc1a370b2d97ffa5726d625135a4b484f41b5c6
31,293
def is_from(category, symbol): """Checks if the symbol is from the category given. Args: category (dict): The dictionary of the category to check. symbol (str): The symbol or word given to analyze. Returns: bool: Whether the symbol is part of the category given. """ try: category[symbol] return True except: return False
69a2e9905a86d149aac6aff215e5f07542b3f8ab
31,295
def format_list_of_seq(list_of_seq): """ :param list_of_seq: :return: removes \n and converts to uppercase """ for i, seq in enumerate(list_of_seq): list_of_seq[i] = seq.strip().upper() return list_of_seq
9c4252ea1234ca9dc2a1eb98b690202ddb7f4485
31,296
import re def check_money(msg): """ checkMoney :param msg: :return: """ return re.search(r'经济', msg) or re.search(r'钱', msg) or re.search(r'利润', msg)
d022df389de57cd2ca9ab89f4a29f790fe7101a5
31,297
import os def _get_size(filename): """Return the file size in bytes""" return os.path.getsize(filename)
4baedbe5a412727d19d2a7e61e8c55675959452b
31,301
def varassign(v,X,E,rho, argument): """! Assigns input to specified gate set variables Parameters ------- v : numpy array New set of variables X : numpy array Current gate estimate E : numpy array Current POVM estimate rho : numpy array Current initial state estimate argument : {"X", "E", "rho"} Which part of the gate set is updated Returns ------- [.,.,.]: 3 element list List in the order [X,E,rho] where either X, E or rho is repaced by v, depending on the input to the "arguement" variable """ if argument == "X" or argument == "K": return [v,E,rho] elif argument == "E": return [X,v,rho] elif argument == "rho": return [X,E,v]
ee71ca488883e12fb036acc31a1ec96f184bc4f0
31,302
def newman_conway(num): """ Returns a list of the Newman Conway numbers for the given value. Time Complexity: ? Space Complexity: ? """ if num == 0: raise ValueError arr = [0,1,1] for i in range(3,num + 1): r = arr[arr[i-1]]+arr[i-arr[i-1]] arr.append(r) string_array = [] for num in arr[1:]: string_array.append(str(num)) string_array = ' '.join(string_array) return string_array
77c2f26103a7b2d6ff340e46a8e999fa6d7191a7
31,303
def P_to_a(P, Mstar): """ Convenience function to convert periods to semimajor axis from Kepler's Law Parameters ---------- P : array-like orbital periods [days] Mstar : float stellar mass [solar masses] Returns ------- a : array-like semi-major axis [stellar radii] """ Pearth = 365.24 # [days] aearth = 215.05 # [solar radii] return aearth * ((P/Pearth)**2 *(1/Mstar))**(1/3)
0596930a7f84679db0e050974a44ae8dfa437e10
31,304
def html_spam_guard(addr, entities_only=False): """Return a spam-protected version of email ADDR that renders the same in HTML as the original address. If ENTITIES_ONLY, use a less thorough mangling scheme involving entities only, avoiding the use of tags.""" if entities_only: def mangle(x): return "&#%d;" % ord (x) else: def mangle(x): return "<span>&#%d;</span>" % ord(x) return "".join(map(mangle, addr))
239115425ab9a39fdea1a70ba07e648a63e91d2b
31,305
def person_image_file(person): """Finds primary person image file name. Scans INDI's OBJE records and finds "best" FILE record from those. Parameters ---------- person : `ged4py.model.Individual` INDI record representation. Returns ------- file_name : `str` or ``None`` String with file name or ``None``. Notes ----- OBJE record contains one (in 5.5) or few (in 5.5.1) related multimedia files. In 5.5 file contents can be embedded as BLOB record though we do not support this. In 5.5.1 file name is stored in a record. In 5.5.1 OBJE record is supposed to have structure:: OBJE +1 FILE <MULTIMEDIA_FILE_REFN> {1:M} +2 FORM <MULTIMEDIA_FORMAT> {1:1} +3 MEDI <SOURCE_MEDIA_TYPE> {0:1} +1 TITL <DESCRIPTIVE_TITLE> {0:1} +1 _PRIM {Y|N} {0:1} Some applications which claim to be 5.5.1 version still store OBJE record in 5.5-like format:: OBJE +1 FILE <MULTIMEDIA_FILE_REFN> {1:1} +1 FORM <MULTIMEDIA_FORMAT> {1:1} +1 TITL <DESCRIPTIVE_TITLE> {0:1} +1 _PRIM {Y|N} {0:1} This method returns the name of the FILE corresponding to _PRIM=Y, or if there is no _PRIM record then the first FILE record. Potentially we also need to look at MEDI record to only chose image type, but I have not seen examples of MEDI use yet, so for now I only select FORM which correspond to images. """ first = None for obje in person.sub_tags('OBJE'): # assume by default it is some image format objform = obje.sub_tag("FORM") objform = objform.value if objform else 'jpg' primary = obje.sub_tag("_PRIM") primary = primary.value == 'Y' if primary is not None else False files = obje.sub_tags("FILE") for file in files: form = file.sub_tag("FORM") form = form.value if form is not None else objform if form.lower() in ('jpg', 'gif', 'tif', 'bmp'): if primary: return file.value elif not first: first = file.value return first
fb8682f66a938ae14690486880d62c6ec4e6995f
31,307
from typing import Union import torch def torch_advantage_estimate( signal, non_terminal, value_estimate, *, discount_factor: float = 0.95, tau: float = 0.97, device: Union[str, torch.device] = "cpu", normalise: bool = True, divide_by_zero_safety: float = 1e-10, ): """ Computes advantages and discounted returns. If the advantage is positive for an action, then it yielded a more positive signal than expected. And thus expectations can be adjust to make actions more likely. :param discount_factor: :type discount_factor: :param tau: :type tau: :return: :rtype: @param device: @param tau: @param discount_factor: @param value_estimate: @param non_terminal: @param signal: @param divide_by_zero_safety: @param normalise: """ horizon_length, num_workers, *_ = signal.size() advantages_out = torch.zeros_like(signal, device=device) adv = torch.zeros(num_workers, device=device) for t in reversed(range(horizon_length - 1)): delta = ( signal[t] + value_estimate[t + 1] * discount_factor * non_terminal[t] - value_estimate[t] ) adv = adv * discount_factor * tau * non_terminal[t] + delta advantages_out[t] = adv if normalise: advantages_out = (advantages_out - advantages_out.mean()) / ( advantages_out.std() + divide_by_zero_safety ) return advantages_out
ca538453364afa73f132c3e0b196782dd9747717
31,308
def update_sheet(service, sheet_id, range_string, body): """Fires a spreadsheet update request through the Google API client.""" return service.spreadsheets().values().update(spreadsheetId=sheet_id, range=range_string, body=body, valueInputOption='RAW').execute()
2203b01e430afdf48e1bbac5a33f00ba23ff58a9
31,309
import pandas def combine_wq(wq, external, external_site_col): """ Combines CVC water quality dataframes with the `tidy` attributes of a `bmpdb` or `nsqd` object. wq : pandas.DataFrame A dataframe of CVC water quality data external : nsqd or bmpdb object external_site_col : str The column in `external.datacollection.tidy` that on which the data should be grouped. Analogous to "site" in the CVC data (e.g., ED-1, LV-2, ...) Returns ------- tidy : pandas.DataFrame """ final_cols = [ 'parameter', 'units', 'site', 'concentration', ] exttidy = ( external.datacollection.tidy .rename(columns={external_site_col: 'site', 'ros_res': 'concentration'}) )[final_cols] tidy = pandas.concat([wq[final_cols], exttidy]) return tidy
3fdcd754e257d61ec40a4cd15b55f52a5ad1a2be
31,310
def entity(reference): """Return a numeric (&#reference;) or symbolic: (&reference;) entity, depending on the reference's type """ try: return '&#{0:d};'.format(reference) except ValueError: return '&{0};'.format(reference) #
1b547a9506badd9fc3ddd575c3e67acebe6af539
31,312
import torch def aggregate(d_p, crit_buf, func, kappa=1.0): """ Reusable aggregation function to join current iteration gradient and critical gradients :param d_p: Current-iteration gradient :param crit_buf: Buffer of Critical Gradients :param func: String name of aggregation. Should be "sum", "mid", or "mean" :param kappa: Multiplicative factor for CG buffer :return: Aggregated total gradient """ if "sum" == func: crit_buf_ = crit_buf.gradMean() crit_buf_.mul_(kappa) return torch.add(d_p, crit_buf_) elif "mid" == func: crit_buf_ = crit_buf.gradMean() crit_buf_.mul_(kappa) return torch.mul(torch.add(d_p, crit_buf_), 0.5) elif "mean" == func: crit_buf_ = crit_buf.gradSum() crit_buf_.mul_(kappa) return torch.div(torch.add(d_p, crit_buf_), crit_buf.size() + 1) elif "min" == func: crit_buf_ = crit_buf.getMin() crit_buf_.mul_(kappa) return torch.add(d_p, crit_buf_) elif "median" == func: crit_buf_ = crit_buf.getMedian() crit_buf_.mul_(kappa) return torch.add(d_p, crit_buf_) elif "max" == func: crit_buf_ = crit_buf.getMax() crit_buf_.mul_(kappa) return torch.add(d_p, crit_buf_) elif "min-mean" == func: crit_buf_ = crit_buf.getMin() crit_buf_.mul_(kappa) return torch.mul(torch.add(d_p, crit_buf_), 0.5) elif "median-mean" == func: crit_buf_ = crit_buf.getMedian() crit_buf_.mul_(kappa) return torch.mul(torch.add(d_p, crit_buf_), 0.5) elif "max-mean" == func: crit_buf_ = crit_buf.getMax() crit_buf_.mul_(kappa) return torch.mul(torch.add(d_p, crit_buf_), 0.5) else: raise ValueError("Invalid aggregation function")
96d3a22724cf866079c90d94bc668910456abe1e
31,313
import time def retry(function, exception, max_attempts=5, interval_seconds=5): """ Retry function up to max_attempts if exception is caught """ for attempt in range(max_attempts): try: return function() except exception as e: assert attempt < (max_attempts - 1), "%s failed after %d attempts!\n%s" % (function, max_attempts, str(e)) time.sleep(interval_seconds)
5d33e2d2909a8b2a377e2b5127f52f5f2c23eabd
31,314
def obstacle_generator(obstacle_map): """ Generates a grid map with obstacles for testing Args: obstacle_map: Numpy array of zeros of shape atleast 100x100x100 Returns: Numpy array with obstacles marked as 1 """ for i in range(20, 41): for j in range(20, 41): for k in range(20, 41): obstacle_map[i][j][k] = 0 for i in range(60, 81): for j in range(20, 41): for k in range(20, 41): obstacle_map[i][j][k] = 0 for i in range(20, 41): for j in range(20, 41): for k in range(60, 81): obstacle_map[i][j][k] = 0 for i in range(60, 81): for j in range(20, 41): for k in range(60, 81): obstacle_map[i][j][k] = 0 for i in range(60, 81): for j in range(60, 81): for k in range(20, 41): obstacle_map[i][j][k] = 0 for i in range(20, 41): for j in range(60, 81): for k in range(20, 41): obstacle_map[i][j][k] = 0 for i in range(60, 81): for j in range(60, 81): for k in range(60, 81): obstacle_map[i][j][k] = 0 for i in range(20, 41): for j in range(60, 81): for k in range(60, 81): obstacle_map[i][j][k] = 0 return obstacle_map
8d9a8cbf0a61fe3918d18ee575a9fad9e464e1a8
31,315
import pandas def getDataFrame(arr): """ Convenience method to print out wide 2D matrix using pandas :param arr: 2D array """ pandas.set_option('display.width', 500) pd = pandas.DataFrame(arr) return pd
9d9015de07330044da4f37234a6044f1051a0e5a
31,316
def remove_added_loadgen(net_t, loadorgen): """ Removes load or sgen namned Cap test INPUT net_t (PP net) - Pandapower net loadorgen (str) - 'sgen' or 'load' for generation or load for additional capacity connected OUTPUT net_t (PP net) - Updated Pandapower net """ if loadorgen == "load": net_t.load = net_t.load.drop(net_t.load[net_t.load.name == 'Cap test'].index) elif loadorgen == "sgen": net_t.sgen = net_t.sgen.drop(net_t.sgen[net_t.sgen.name == 'Cap test'].index) return net_t
73ccf198c096454fef4676a18355c6359a076b2c
31,317
def Echovaluate(operation): """ Evaluate an expression """ return eval(operation)
52cc43c31997f410f409c4d5f0ca40ce7c90d492
31,319
def get_b(pvalue,siglevel): """Bonferroni correction""" pvalue = sorted(pvalue, key = lambda x: x[-1]) #y=0 pp=1.0 for i in pvalue: p=float(i[-1])*len(pvalue) if p<=siglevel: i.append(p) i.append('yes') if p<pp: pp=p else: i.append(p) i.append('no') return pvalue
a8ad34eeb0a93cc1165aa87c160738cdbf04ed39
31,321
from typing import Optional def _term_converter(term: Optional[int]) -> Optional[int]: """ Converter function for ``term`` in :class:`~.BaseProduct``. :param term: The number of months that a product lasts for if it is fixed length. """ if term is None: return None else: return int(term)
65277a31fe4666d410c9aa46a8b01fe59dbcb4fc
31,322
def millerORweber(ITN): """ Determine which kind of coordinates to use """ if ITN > 194 or ITN < 143: return "m" #"rest" return "w" #"hP, hR"
cf8001cd61f98dddff790bc2e628d3ced267590a
31,323
def generate_block_data(data, index, blocktype): """Generates the largest block possible starting at index in data. returns (output, new_index) where output is the generated block and new_index points to where the next block in data would be extracted""" output = bytearray() output += b'\x3c' output.append(blocktype) sz = len(data) - index if (sz > 0xff): sz = 0xff output.append(sz) output += data[index:index + sz] checksum = sum(output[1:]) & 0xff output.append(checksum) return (output, index + sz)
ddf6af222b22be7e1c73ee98c3ebd982ea23fe5b
31,325
def sort_tasks(tasks): """sort task with task_priority each task contain type & priority sort task with task_priority Args: tasks (List[function]): task contain type & priority Returns: List[function]: sorted tasks """ return sorted(tasks, key=lambda task: task._task_priority)
487d60c6508df3346852d93b424cad7a50589c7b
31,327
import json def to_json_fragment(Entries, stripdepth=1): """Return a json fragment representing the provided objects""" return json.dumps(Entries, indent=4)[stripdepth:][:-stripdepth]
0b5cfe4ce9cec7899b0b14612e7898c29d76f862
31,328
from warnings import warn def pc2in(picas): """ Converts picas to inches Parameters ---------- picas : int or float dimension in picas Returns ------- inches : float dimensions in inches """ if picas not in [19, 27, 33, 39, 68]: warn("Not a standard AMS width") return picas / 6.
4fee36743e00b263f0f46fca90bf07edca083002
31,329
def validate_reading(data): """Some sensors read "Disabled".""" return data != 'Disabled'
4ce007332c000d679d2c067009e901f02d38a407
31,331
def support_count(itemset, transactions): """ Count support count for itemset :param itemset: items to measure support count for :param transactions: list of sets (all transactions) >>> simple_transactions = ['ABC', 'BC', 'BD', 'D'] >>> [support_count(item, simple_transactions) for item in 'ABCDE'] [1, 3, 2, 2, 0] >>> some_transactions = [set(['beer', 'bread', 'milk']), set(['beer']), set(['milk'])] >>> support_count(set(['beer']), some_transactions) 2 """ return len([row for row in transactions if set(itemset) <= set(row)])
2d533a81a646b1973980386c9b85998d8fb65be0
31,332
from typing import Counter def total_useful_clusters(model): """A useful cluster here is defined as being a cluster with > 1 member.""" clusters = Counter(model.labels_) useful_clusters = 0 for cluster_num, total_members in clusters.items(): if total_members > 1: useful_clusters += 1 return useful_clusters
93df3354c37f5f252f2071b50b7937c61ce81946
31,333
def hours(time_str): """ Get hours from time. time_str: str (hh:mm:ss) """ h, m, s = [int(x) for x in time_str.split(':')] return(h + m/60.0 + s/3600.0)
c5f95ac4bed1198eba616f959595d6ce7a9c79fd
31,335
def update_params(paramDict, keys, indices, values): """paramDict: gets modified and returned. keys: indicate which items in paramDict to be modified. indices: parallel to keys; indicate which element in paramDict[key] to be modified. values: parallel to above; indicate new value to substitute.""" assert len(keys) == len(indices) and len(keys) == len(values) for i in range(len(keys)): thisKey, thisIndex, thisValue = keys[i], indices[i], values[i] paramDict[thisKey][thisIndex] = thisValue if 'split' in thisKey: bnkey = ('bn_times', thisKey[1]) nekey = ('Ne_times', thisKey[1]) paramDict[bnkey][0] = thisValue-1 paramDict[nekey][-1] = thisValue - 1 return paramDict
d988f090dcaad9232c6a2af334fbdacc8a0cd412
31,336
from typing import Mapping from typing import Dict from typing import List def convert_mime_db(mime_db: Mapping) -> Dict[str, List[str]]: """Convert mime-db value 'extensions' to become a key and origin mimetype key to dict value as item of list. """ converted_db: Dict[str, List[str]] = {} for mimetype, opts in mime_db.items(): extensions = opts.get('extensions') if extensions is not None: for extension in extensions: mimetypes = converted_db.setdefault(extension.lower(), []) mimetypes.append(mimetype.lower()) return converted_db
65a6332e0463f32be1b27b39b44f95917995118d
31,338
def get_vector(tensor): """ - input: torch.tensor(n_time_steps, n_samples, n_features) - output: np.ndarray(n_time_steps * n_samples, n_features) where: output[0:n_samples, :] = input[0, 0:n_samples, :], output[n_samples:2*n_samples,:] = input[1, 0:n_samples, :], ...""" n_features = tensor.size(2) if len(tensor.shape) > 2 else 1 return tensor.reshape((-1, n_features)).numpy()
89da4391bbd8e99212f7bebb0f93a511e1712ff5
31,339
import redis # lazy import so that redis need not be available import sys import subprocess import os import time def redis_connect(host="localhost", port=6379, maxmemory=4.0, **kwargs): """ Open a redis connection. If host is localhost, then try starting the redis server. If redis is unavailable, then return a simple dict cache. """ # ensure redis is running, at least if we are not on a windows box try: cache = redis.Redis(host=host, port=port, **kwargs) # first, check to see if it is already running: cache.ping() except redis.exceptions.ConnectionError: # if it's not running, and this is a platform on which we can start it: if host == "localhost" and not sys.platform == 'win32': subprocess.Popen(["redis-server"], stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT) time.sleep(10) cache = redis.Redis(host=host, port=port, **kwargs) cache.ping() else: raise # set the memory settings for already-running Redis: cache.config_set("maxmemory", "%d" % (int(maxmemory*2**30),)) cache.config_set("maxmemory-policy", "allkeys-lru") return cache
25a7aa5585fe1698391c1ecd7735825d622f4191
31,340
import re def get_nom_val(atrv): """Given a string containing a nominal type, returns a tuple of the possible values. A nominal type is defined as something framed between braces ({}). Parameters ---------- atrv : str Nominal type definition Returns ------- poss_vals : tuple possible values Examples -------- >>> get_nom_val("{floup, bouga, fl, ratata}") ('floup', 'bouga', 'fl', 'ratata') """ r_nominal = re.compile('{(.+)}') m = r_nominal.match(atrv) if m: return tuple(i.strip() for i in m.group(1).split(',')) else: raise ValueError("This does not look like a nominal string")
4b059ff48779ce631ed95c4bc9471eafdf617ecb
31,341
def to_array(an_object): """convert a reader iterator to an array""" array = [] for row in an_object: array.append(row) return array
6bd242eb4d33f8aacb50df7434a0452af8d8c33f
31,342
import re import string from collections import Counter def count_words(phrase): """ Returns a dict with count of each word in a phrase keys are the words and values the count of occurrence. """ phrase = phrase.lower() tokens = re.findall(r'[0-9a-zA-Z\']+', phrase) tokens = [word.strip(string.punctuation) for word in tokens] counts = Counter(tokens) return counts
b8b7abaa7330906335ed38c59fdc58a6b47dc48b
31,344
def IsTrivialAttrSpec(attr): """Determines whether a given attr only has its name field set. Args: attr: an AttrSpec instance. Returns: true iff the only field that is set is the name field. """ return (attr.DESCRIPTOR.full_name == 'amp.validator.AttrSpec' and attr.HasField('name') and len(attr.ListFields()) == 1)
1ff22c6f7cb5d6457430a8d4220fb6caa798cc1f
31,349
def _generate_mark_list(data_list): """Generate a mark list to filter a specific operation :param data_list: :return: """ mark_list = [] name_set = {'q3007', 'q2994', 'q3006', 'q2972', 'q3003', 'q2963', 'q2989', 'q2901', 'q2994', 'q3001'} for d in data_list: if d.name[:5] in name_set: mark_list.append(True) else: mark_list.append(False) return mark_list
fba3335cdfc882845414adbd91617e10942bd4f1
31,350
import traceback def format_traceback_the_way_python_does(type, exc, tb): """ Returns a traceback that looks like the one python gives you in the shell, e.g. Traceback (most recent call last): File "<stdin>", line 2, in <module> NameError: name 'name' is not defined """ tb = ''.join(traceback.format_tb(tb)) return f'Traceback (most recent call last):\n{tb}{type.__name__}: {exc}'
e11783b2215bcd22cd08f7e8657d6ff7a5fc7f35
31,352
def _get_xml_declaration(version='1.0', encoding='UTF-8'): """Gets XML declaration (for the specified version and encoding). :param version: XML version :param encoding: encoding :return: XML declaration :rtype: str """ return '<?xml version="' + version + '" encoding="' + encoding + '"?>'
2f9125ca02624cd9c74a80fe9668a6801220f898
31,353
def get_study_collections(studies, irods_backend): """Return a list of all study collection names.""" return [irods_backend.get_path(s) for s in studies]
53ff6851d12612467d045604c676ea7ed6b0081c
31,354
def _set_stats(context, key): """ Users select stats to run from config field in manifest.json. All string options are handled by the "function_options" field. Other types (with input values) are handled by this method. Args: key (str): Statistic requested, which is not handled by the "function_options" field Returns: list of values (option and value) to be appended to the fslstats command """ custom_tag_dict = { "Mean intensity": "-m", "Mean intensity (nonzero)": '-M', "Stdev": "-s", "Stdev (nonzero)": "-S", "Upper threshold": "-u", "Lower threshold": "-l", "Percentile": "-p", "Percentile (nonzero)": "-P", "nbins for histogram": "-h", "Windowed nbins for histogram": "-H", "Robust min/max": "-r", "Min/max" : "-R", "Entropy": "-e", "Entropy (nonzero)": "-E", "Volume": '-v', "Volume (nonzero)": '-V', "ROI stats": "-w", "Max voxel coords": "-x", "Min voxel coords": "-X", "Center of gravity (mm)": "-c", "Center of gravity (voxels)": "-C", "Absolute values?": "-a", "NaN/Inf as zero?": "-n" } return [custom_tag_dict[key]]
c4f7ba90172f5ad85e41ac15d8b948bc1e5170d9
31,356
def discrete(items): """Sweeps over discrete variable.""" return items
7fc0a6393bc404ee0dfa3e72953a6a13af6a15d6
31,357
import logging def get_logger(logger_name: str) -> logging.Logger: """Return the logger with the name specified by logger_name arg. Args: logger_name: The name of logger. Returns: Logger reformatted for this package. """ logger = logging.getLogger(logger_name) logger.propagate = False ch = logging.StreamHandler() ch.setFormatter(logging.Formatter("[%(name)s] [%(levelname)s] %(message)s")) logger.addHandler(ch) return logger
505f6c89c46dd95c86a5b9193389b64142c31d81
31,358
import math def wien(x): """ Wien's displacement constant is defined by b = h * c / (k_B * x), where x is described by this nonlinear equation. """ return 5 * math.exp(-x) + x - 5
6db7749651dd72fbaee1971bc940b59944a52db3
31,362
def asfolder(folder): """ Add "/" at the end of the folder if not inserted :param folder: the folder name :type folder: str :return: file names with / at the end :rtype: str """ if folder[-1] != "/": return (folder + "/") else: return (folder)
07340a8c9b21bcc1d7be210bc7f9fa80378cb3a8
31,364
def tieBreak(sets, l, i, j, distM): """ Tie-breaking operation betwen two cometing sets for inclusion of new image Input: {sets, l, i, j, distM}: {sets = collection of all different sets (clusters) as a list, l = current images to be assigned clusters, i,j = index of clusters competing for the new image, distM = see getDistM} Output: {sets}: Return sets by filling in tie-breaks in place """ if len(sets[i].intersection(l)) > len(sets[j].intersection(l)): sets[i] = sets[i].union(l) sets[j] = sets[j].difference(l) elif len(sets[i].intersection(l)) < len(sets[j].intersection(l)): sets[j] = sets[j].union(l) sets[i] = sets[i].difference(l) elif len(sets[i].intersection(l)) == len(sets[j].intersection(l)): # Tie break based on average distance temp_l = list(l) temp_i = list(sets[i].difference(l)) temp_j = list(sets[j].difference(l)) for a in temp_l: d = 0 for b in temp_i: d += distM[a][b] x = d/len(temp_i) d = 0 for c in temp_j: d += distM[a][c] y = d/len(temp_j) l = set() l.add(a) if x > y: sets[i] = sets[i].union(l) sets[j] = sets[j].difference(l) else: sets[j] = sets[j].union(l) sets[i] = sets[i].difference(l) return sets
2957858a97201ca12029e9a4c69b9300d441f2a6
31,365
import torch def split(value, num_or_size_splits, axis=0): """ Splits a tensor into sub tensors. Parameters ---------- value : tensor The Tensor to split. num_or_size_splits : list Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or Python list containing the sizes of each output tensor along split_dim. axis : int The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0. num : int used to specify the number of outputs when it cannot be inferred from the shape of size_splits. Returns ------- Tensor objects resulting from splitting value. """ return torch.split(value, num_or_size_splits, dim=axis)
b997285da46db1e20ca92916f46ebc51b8840786
31,366
def get_indicators(): """Get recommendations from db.""" return { "appearance": { "grammar": { "name": "Grammar", "values": { "0": {"label": "Poor"}, "1": {"label": "Decent"}, "2": {"label": "Proper"}, }, }, "link_domain": { "name": "Link/Domain", "values": { "0": {"label": "Unrelated"}, "1": {"label": "Related/Hidden/Spoofed"}, }, }, "logo_graphics": { "name": "Logo/Graphics", "values": { "0": {"label": "Plain Text"}, "1": {"label": "Visual Appeal"}, }, }, }, "behavior": { "curiosity": {"name": "Curiosity", "values": {"1": {"label": "Yes"}}}, "duty_obligation": { "name": "Duty or Obligation", "values": {"1": {"label": "Yes"}}, }, "fear": {"name": "Fear", "values": {"1": {"label": "Yes"}}}, "greed": {"name": "Greed", "values": {"1": {"label": "Yes"}}}, }, "relevancy": { "organization": { "name": "Organization", "values": {"0": {"label": "No"}, "1": {"label": "Yes"}}, }, "public_news": { "name": "Public News", "values": {"0": {"label": "No"}, "1": {"label": "Yes"}}, }, }, "sender": { "authoritative": { "name": "Authoritative", "values": { "0": {"label": "None"}, "1": {"label": "Peer"}, "2": {"label": "Superior"}, }, }, "external": { "name": "External", "values": { "0": {"label": "Not External/Unpsecified"}, "1": {"label": "Specified"}, }, }, "internal": { "name": "Internal", "values": { "0": {"label": "Not Internal/Unspecified"}, "1": {"label": "Generic/Close"}, "2": {"label": "Spoofed"}, }, }, }, }
f2396886191b55c8062b5ecaef42ff7d6d601bca
31,367
import requests def parse_configdb(configdb_address='http://configdb.lco.gtn/sites/'): """ Parse the contents of the configdb. Parameters ---------- configdb_address : str URL of the configdb, must be inside LCOGT VPN Returns ------- sites : list of dicts each site dictionary contains a timezone. cameras : list of dicts each camera dictionary contains a site, instrument code, and camera type. """ results = requests.get(configdb_address).json()['results'] cameras = [] sites = [] for site in results: sites.append({'code': site['code'], 'timezone': site['timezone']}) for enc in site['enclosure_set']: for tel in enc['telescope_set']: for ins in tel['instrument_set']: sci_cam = ins.get('science_camera') if sci_cam is not None: cameras.append({'site': site['code'], 'instrument': sci_cam['code'], 'camera_type': sci_cam['camera_type']['code'], 'schedulable': ins['state'] == 'SCHEDULABLE'}) return sites, cameras
501f9217901f0db30e2bddfebb0a85fbb266a766
31,371
def fleur_local_code(create_or_fake_local_code, pytestconfig): """ Create or load Fleur code """ executable = 'fleur' # name of the KKRhost executable exec_rel_path = 'local_exe/' # location where it is found entrypoint = 'fleur.fleur' # entrypoint fleur_code = create_or_fake_local_code(executable, exec_rel_path, entrypoint) if pytestconfig.getoption('--local-exe-hdf5'): fleur_code.description = 'Local executable with HDF5' return fleur_code
61c87db29678082746ff85ff3f6d91f1d68ccb85
31,373
def reverseStringv1(a_string): """assumes a_string is a string returns a string, the reverse of a_string""" return a_string[::-1]
e4c8ff9a496e54170ba1fdc988f4be9dd2437f34
31,374
from functools import reduce def chain_maps(*args): """Similar to collections.ChainMap but returned map is a separate copy (ie. changes to original dicts don't change the dict returned from this function).""" def merge(d1, d2): d1.update(d2) return d1 return reduce(merge, reversed(args), {})
a91584647e9d3f83f99680d28960db1b64ef923b
31,376
def test_function_arbitrary_arguments(): """Arbitrary Argument Lists""" # When a final formal parameter of the form **name is present, it receives a dictionary # containing all keyword arguments except for those corresponding to a formal parameter. # This may be combined with a formal parameter of the form *name which receives a tuple # containing the positional arguments beyond the formal parameter list. # (*name must occur before **name.) For example, if we define a function like this: def test_function(first_param, *arguments): """This function accepts its arguments through "arguments" tuple amd keywords dictionary.""" assert first_param == 'first param' assert arguments == ('second param', 'third param') test_function('first param', 'second param', 'third param') # Normally, these variadic arguments will be last in the list of formal parameters, because # they scoop up all remaining input arguments that are passed to the function. Any formal # parameters which occur after the *args parameter are ‘keyword-only’ arguments, meaning that # they can only be used as keywords rather than positional arguments. def concat(*args, sep='/'): return sep.join(args) assert concat('earth', 'mars', 'venus') == 'earth/mars/venus' assert concat('earth', 'mars', 'venus', sep='.') == 'earth.mars.venus'
a3729f1d7846a41e45db65382ada9cbad8b877f0
31,377
import numpy def k_max_index(array, k): """ Return index of max values, more eficient. Example: k_max_index2([2, 4, 5, 1, 8], 2)""" array = numpy.array(array) if len(array) < k: k = len(array) indexs = numpy.argpartition(array, -k)[-k:] return indexs[numpy.argsort(-array[indexs])].tolist()
baf4db247a566193a39440fdc0049bd3de1a98bf
31,379
def tatextcleaner(string): """"Cleaner function, add desc""" temp = string if(True): # cleaned, removes iban, bic and uci if("IBAN" in string and "BIC" in string): temp = string[:string.index("IBAN")] temp += " || " if("UCI" in string): temp += string[string.index(" ", string.find("BIC: ") + len("BIC: ")): string.find("UCI: ")] else: try: temp += string[string.index(" ", string.find("BIC: ") + len("BIC: ")):] except ValueError: pass if(False): # traditional way pos = string.find("BIC: ") # string that should be in every regular description if pos != -1: # there was one entry where it was missing temp = string.insert(spacer, string.index(" ", pos + len("BIC: "))).insert(spacer, tatext.index("UCI: ")) return temp
9f34f698c28e6105f46568c7eae933693c35d892
31,380
import random def sample_with_replacement(population, k): """ Sample <k> items from iterable <population> with replacement """ n = len(population) pop = list(population) return [pop[int(n * random.random())] for _ in range(k)]
4114a49faae9981dc2f850db4465ec1ccdc7101c
31,381
def sin_series(x): """Returns sin(x) for x in range -pi/2 .. pi/2""" # https://en.wikipedia.org/wiki/Sine#Series_definition C=[1.,0.16666666666666666,0.05,0.023809523809523808,0.013888888888888888,0.00909090909090909,0.00641025641025641,0.004761904761904762,0.003676470588235294,0.0029239766081871343,0.002380952380952381,0.001976284584980237,0.0016666666666666668,0.0014245014245014246,0.0012315270935960591,0.001075268817204301,0.000946969696969697,0.0008403361344537816,0.0007507507507507507,0.0006747638326585695] N, x2, r = len(C), x**2, 1. for j in range(N-1,0,-1): r = 1. - C[j] * ( x2 * r ) return r * x
d0a2f7aaa0b86494eae8562cc9bf3d0105462606
31,383
def NDVI(spectra): """Normalized Difference Vegetation Index""" ndvi = ((spectra.iloc[550,] - spectra.iloc[330,])/(spectra.iloc[550,] + spectra.iloc[330,])) return ndvi
424e1f0a583907743d35c2c5a0426adadfd6895b
31,385
import argparse def cmdLineParse(): """ Command line parser """ parser = argparse.ArgumentParser(description=""" Stage and verify DEM for processing. """, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-p', '--product', type=str, action='store', help='Input reference RSLC HDF5 product') parser.add_argument('-o', '--output', type=str, action='store', default='dem.vrt', dest='outfile', help='Output DEM filepath (VRT format).') parser.add_argument('-f', '--path', type=str, action='store', dest='filepath', default='file', help='Filepath to user DEM.') parser.add_argument('-m', '--margin', type=int, action='store', default=5, help='Margin for DEM bounding box (km)') parser.add_argument('-b', '--bbox', type=float, action='store', dest='bbox', default=None, nargs='+', help='Spatial bounding box in latitude/longitude (WSEN, decimal degrees)') return parser.parse_args()
243881de15733e7432c218c362e7cef33ac61f0e
31,386