content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def string_empty(string): """Return True if the input string is None or whitespace.""" return (string is None) or not (string and string.strip())
fddb1e639a57b29674ce6d0a101e875272731d77
698,628
def memoize(user_function): """Memoization decorator for user function of 1+ parameters""" class Memodict(dict): """Memoization performs user function call on dict miss""" def __getitem__(self, *key): return dict.__getitem__(self, key) def __missing__(self, key): result = self[key] = user_function(*key) return result return Memodict().__getitem__
e8f59f5e4460e332ff88bb235a6d8beb31065de2
698,629
def Dic_Test_Empty_Dic(indic): """ check if indic is a (nested) empty dictionary. """ if indic.keys()!=[]: for key,keydata in indic.items(): if isinstance(keydata,dict): if keydata=={}: pass else: return Dic_Test_Empty_Dic(keydata) else: return False return True else: return False
360e9190fd6ab23fb1d8b4d948647baed494f417
698,630
import imghdr def detect_image_type(filename): """ 获取图像文件类型 :param filename: :return: jpge|png|gif|tiff等。失败返回None """ return imghdr.what(filename)
a4701aa60927e730899ed8036d6f2d875aa0524a
698,631
import json def to_formatted_json(output): """Output is a single record""" return f"{json.dumps(output, indent=4)}\n"
e437c8b34e510f6a72cc27ccb7b2a14c3f60e09d
698,632
def _permutation_worker(k: int, items: list) -> list: """The kth Johnson-Trotter permutation of all items.""" n = len(items) if n <= 1: return items else: group = k // n item = k % n position = n - item - 1 if group % 2 == 0 else item dummy = _permutation_worker(group, items[0 : (n - 1)]) dummy.insert(position, items[n - 1]) return dummy
c997721d2682a9bf54b42596b093956bc17cf94b
698,633
from typing import Dict def _results_data_row(row: Dict[str, str]): """ Transform the keys of each CSV row. If the CSV headers change, this will make a single place to fix it. """ return { "k": row["Key"], "v": row["Content"], }
97dc7f9842d7e6a564cbe66fd93e3dbaeca08517
698,635
import os def dpath(path): """ get path to a data file (relative to the directory this test lives in) """ return os.path.join(os.path.dirname(__file__), path)
000bb169050820243181b88f221b522c24267361
698,636
def is_odd(self, allow_rescaling_flag=True): """ Returns true iff after rescaling by some appropriate factor, the form represents some odd integers. For more details, see parity(). Requires that Q is defined over `ZZ`. EXAMPLES:: sage: Q = QuadraticForm(ZZ, 2, [1, 0, 1]) sage: Q.is_odd() True sage: Q = QuadraticForm(ZZ, 2, [1, 1, 1]) sage: Q.is_odd() False """ return self.parity(allow_rescaling_flag) == "odd"
3e9465a19c2ad91a024b8358d27693573c620d8d
698,637
def _construct_url(resource, action, params=None): """ Constructs rest of API url based on resource, action and parameters. Multiple parameters must be a list. Single parameters must be a single string Internal usage """ final_url = '{}/{}'.format(resource, action) if type(params) == list: final_url += '?' for i in range(len(params)): if i < len(params) - 1: final_url += params[i] + '&' else: final_url += params[i] elif type(params) == str: final_url += '?{}'.format(params) else: pass return final_url
38b85a4400e362887f965c0035b3a2b10e7ecce4
698,638
from typing import List def _split_inputs(inputs: str, named_groups: dict) -> List[str]: """ Identify and split inputs for torch operations. This assumes that no quotes, apostrophes, or braces show up in the operation string. Currently, items in lists that are not within functions will be broken up into separate inputs. :param inputs: String of inputs to a torch operation. :param named_groups: Dictionary of names of grouped inputs. If the name of a group shows up in the inputs list, Map it to the group in this dictionary and treat each member of the group as a separate input. :return: List of strings representing each input to the operation. """ replaced_str = inputs parentheses_depth = 0 bracket_depth = 0 # Step through each character in the line and see if it is a comma which separates inputs. # Keep counters for parentheses, brackets, and braces to only identify commas outside of any such structures. for idx, inp in enumerate(inputs): curr_char = inp # Currently, individual elements in lists are each treated as a separate parameter. See if we need to keep the # lists intact as well (check bracket depth as well) if curr_char == ',' and parentheses_depth == 0: # Replace comma with '$' for easy splitting later. Assumes '$' does not normally show up in the code. replaced_str = replaced_str[:idx] + '$' + replaced_str[idx+1:] elif curr_char == '(': parentheses_depth += 1 elif curr_char == '[': bracket_depth += 1 elif curr_char == ')': parentheses_depth -= 1 elif curr_char == ']': bracket_depth -= 1 # We should never have seen more right parentheses, brackets, or braces than left ones. assert parentheses_depth >= 0 and bracket_depth >= 0 # Split the string using the '$' characters replaced earlier. # Also remove leading and trailing list brackets split = replaced_str.split('$') split = [s.strip('[] ') for s in split] # Items in split may be a named group. If so, replace the item with each item in the group. # Call split inputs recursively on the group in case it contains named groups within as well. split_inputs = [] for inp in split: if inp in named_groups.keys(): group_items = _split_inputs(named_groups[inp], named_groups) for item in group_items: split_inputs.append(item) else: split_inputs.append(inp) return split_inputs
5edc3c53fb9b7a19557504335eb04eb15dd3c932
698,639
def get_analysis_file(filename: str) -> str: """ :param filename: The name of the file containing the source code. :return: The expected name of the file containing the analysis. """ return filename.rsplit('.', 1)[0] + '.ll.cm'
6a147b5bf69078dde81fdc72585907ece6c2f461
698,640
from typing import Tuple from pathlib import Path from typing import Optional import argparse def parse_arguments() -> Tuple[str, Path, Optional[int]]: """Parse command line arguments Returns: Tuple[str, Path, Path]: (identifier, path to save log, path to VLC) """ parser = argparse.ArgumentParser( description="Connect to the GoPro via BLE only and continuously read the battery (either by polling or notifications)." ) parser.add_argument( "-i", "--identifier", type=str, help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. \ If not used, first discovered GoPro will be connected to", default=None, ) parser.add_argument( "-l", "--log", type=Path, help="Location to store detailed log", default="log_battery.log", ) parser.add_argument( "-p", "--poll", type=int, help="Set to poll the battery at a given interval. If not set, battery level will be notified instead. Defaults to notifications.", default=None, ) args = parser.parse_args() return args.identifier, args.log, args.poll
cf3e7528177510fbfdcdd3b615010f4ffc2e59b9
698,642
def _viewer_urls(spectra, zoom=13, layer='ls-dr9'): """Return legacysurvey.org viewer URLs for all spectra. Note: `layer` does not apply to the JPEG cutout service. """ u = "https://www.legacysurvey.org/viewer/jpeg-cutout?ra={0:f}&dec={1:f}&zoom={2:d}" v = "https://www.legacysurvey.org/viewer/?ra={0:f}&dec={1:f}&zoom={2:d}&layer={3}&mark={0:f},{1:f}" if hasattr(spectra, 'fibermap'): try: ra = spectra.fibermap['RA_TARGET'] dec = spectra.fibermap['DEC_TARGET'] except KeyError: ra = spectra.fibermap['TARGET_RA'] dec = spectra.fibermap['TARGET_DEC'] else: ra = spectra.meta['plugmap']['RA'] dec = spectra.meta['plugmap']['DEC'] return [(u.format(ra[i], dec[i], zoom, layer), v.format(ra[i], dec[i], zoom, layer), 'RA, Dec = {0:.4f}, {1:+.4f}'.format(ra[i], dec[i])) for i in range(len(ra))]
6fe781b92f929f9265d464935929490b67a6e9a7
698,643
def jsdate(d): """formats a python date into a js Date() constructor. """ try: return "new Date({0},{1},{2})".format(d.year, d.month - 1, d.day) except AttributeError: return 'undefined'
0c8331f928cf26e4cac7663dd6619c62cfff0fae
698,644
import typing import asyncio import functools def run_sync( target: typing.Callable[..., typing.Any], *args, timeout: int = 10, new_loop: bool = False, **keywords, ) -> typing.Any: """Run async tasks synchronously with a timeout :param target: :param args: :param timeout: :param new_loop: :param keywords: :return: """ loop = asyncio.new_event_loop() if new_loop else asyncio.get_event_loop() if new_loop: asyncio.set_event_loop(loop) async def wait(coroutine): try: return await asyncio.wait_for(coroutine(), timeout=timeout) except asyncio.TimeoutError: print("Timeout") return None try: result = loop.run_until_complete( wait(functools.partial(target, *args, **keywords)) ) finally: if new_loop: loop.close() return result
6c3af5a29e79d2e5fcd2b4422895a5a761e7295a
698,645
import random def mutate(chromosome): """ Pick a random bit to mutate by XOR and shifting the selected bit. This ensures diversity in the population. """ altered_bit_position = random.randint(0, 31) mutation = chromosome ^ (1 << altered_bit_position) return mutation
ff93fb19d7d83c558172d005fbd140284e2b16c2
698,646
def mileage_format(value): """ Custom formatting for mileage. """ return "{:,} miles".format(value)
056f5fbf5d291ac892bd8f2c16cda16937f758da
698,648
def _list(key: str, vals: dict) -> list: """Get a key from a dictionary of values and ensure it is a list.""" result = vals.get(key, []) if not isinstance(result, list): result = [result] return result
cf8a5da580dc4d95f83f18a92f82f646be5c4cb3
698,649
def structureSvInstance(stackedLine, tabSpace, alignCol, alignCom): """ This function restructures an input "stacked line" module declaration from a .sv file. Expecting a module declaration on one line in the form of: blockName#(param1,param2,param3,...)(port1,port2,port3,...) or: blockName(port1,port2,port3,...) It will return a string of the form: blockName blockName_0 #( .param1 (param1), .param2 (param2), .param3 (param3), ... .paramN (paramN) )( .port1 (port1), // in 1 bit .port2 (port2), // out 3 bits .port3 (port3), // in Multidimensional Bus ... .portN (portN) // inout 3 bits ); or: blockName blockName_0 ( .port1 (port1), .port2 (port2), .port3 (port3), ... .portN (portN) ); """ newStackedPorts = "" # There are parameters in this module if("#" in stackedLine): modName,remainder = stackedLine.split("#(") paramList,remainder = remainder.split(")(") paramList = paramList.split(",") newParams = "" for param in paramList: if(newParams == ""): newParams = (" "*tabSpace) newParams = newParams+"."+param newParams = newParams+(" "*(alignCol-len(param))) newParams = newParams+"("+param+")" else: newParams = newParams+",\n" newParams = newParams+(" "*tabSpace) newParams = newParams+"."+param newParams = newParams+(" "*(alignCol-len(param))) newParams = newParams+"("+param+")" paramList = newParams portList,remainder = remainder.split(")") portList = portList.split(",") newPorts = "" nextAnnotate = "" afterPortLen = 0 for ports in portList: # Rip Out the annotation ports,annotate = ports.split("//") annotate = "//"+annotate if(newPorts == ""): newPorts = (" "*tabSpace) newPorts = newPorts+"."+ports newPorts = newPorts+(" "*(alignCol-len(ports))) newPorts = newPorts+"("+ports+")" afterPortLen = len(ports)+2 else: newPorts = newPorts+(",") newPorts = newPorts+(" "*(alignCom-afterPortLen)) newPorts = newPorts+("%s\n"%nextAnnotate) newPorts = newPorts+(" "*tabSpace) newPorts = newPorts+"."+ports newPorts = newPorts+(" "*(alignCol-len(ports))) newPorts = newPorts+"("+ports+")" afterPortLen = len(ports)+2 nextAnnotate = annotate portList = newPorts+(" "*(alignCom-afterPortLen+1)) portList = portList+("%s"%nextAnnotate) newStackedPorts = modName+" #(\n"+paramList+"\n) "+modName+"_0 (\n"+portList+"\n);" stackedLine = newStackedPorts else: modName,remainder = stackedLine.split("(") modName = modName+" "+modName+"_0 (" portList,remainder = remainder.split(")") portList = portList.split(",") newPorts = "" for ports in portList: if(newPorts == ""): newPorts = (" "*tabSpace) newPorts = newPorts+"."+ports newPorts = newPorts+(" "*(alignCol-len(ports))) newPorts = newPorts+"("+ports+")" else: newPorts = newPorts+",\n" newPorts = newPorts+(" "*tabSpace) newPorts = newPorts+"."+ports newPorts = newPorts+(" "*(alignCol-len(ports))) newPorts = newPorts+"("+ports+")" portList = newPorts newStackedPorts = modName+"\n"+portList+"\n);" return newStackedPorts
8939cb6cbd2115b8b6bae3866bd4be78d32e1954
698,650
def contrast_color(hex_color): """ Util function to know if it's best to use a white or a black color on the foreground given in parameter :param str hex_color: the foreground color to analyse :return: A black or a white color """ r1 = int(hex_color[1:3], 16) g1 = int(hex_color[3:5], 16) b1 = int(hex_color[5:7], 16) # Black RGB black_color = "#000000" r2_black_color = int(black_color[1:3], 16) g2_black_color = int(black_color[3:5], 16) b2_black_color = int(black_color[5:7], 16) # Calc contrast ratio l1 = 0.2126 * pow(r1 / 255, 2.2) + 0.7152 * pow(g1 / 255, 2.2) + 0.0722 * pow(b1 / 255, 2.2) l2 = 0.2126 * pow(r2_black_color / 255, 2.2) + 0.7152 * pow(g2_black_color / 255, 2.2) + 0.0722 * pow( b2_black_color / 255, 2.2) if l1 > l2: contrast_ratio = int((l1 + 0.05) / (l2 + 0.05)) else: contrast_ratio = int((l2 + 0.05) / (l1 + 0.05)) # If contrast is more than 5, return black color if contrast_ratio > 5: return '#000000' # if not, return white color return '#FFFFFF'
470f713af48673d7b9fb8aa3bb30f7e1498da9b5
698,651
def get_ips(filename: str) -> list: """ Reads ips from a list of files """ ips = [] with open(filename, 'r') as f: ips = f.read().splitlines() return ips
6dfd326dc57df8a8297cd27a0d3d01fbe11db3f4
698,652
import sys def check_source_target(db_source_atoms, res_target_atoms, dbatoms): """ several checks if the atoms in the dsr command line are consistent :param db_source_atoms: ['C1', 'O1', 'C2', ...] :param res_target_atoms: ['C1', 'Q2', 'C3_2', ...] :param dbatoms: [['C1', 1, '-0.00146', '0.26814', '0.06351'], ['C2', 1, '-1.13341', '-0.23247', '-0.90730'], ...]] """ temp = [i[0].upper() for i in dbatoms] # check if source and target are of same length: nsrc = len(db_source_atoms) ntrg = len(res_target_atoms) if nsrc != ntrg: print('*** Number of source and target atoms/peaks is different!! ' '({} and {} atoms/peaks) ***'.format(nsrc, ntrg)) sys.exit() # do the source atoms exist at all?: for i in db_source_atoms: i = i.upper() if i not in temp: print('\n*** Atom {} not found in database entry! ***'.format(i)) sys.exit() return True
db592ad544a20f536b425409ef1a0d8ddbfb1c42
698,653
def all_packages(request, verbose=False): """List all packages""" if verbose: packages = request.db.summary() else: packages = request.db.distinct() i = 0 while i < len(packages): package = packages[i] name = package if isinstance(package, str) else package["name"] if not request.access.has_permission(name, "read"): del packages[i] continue i += 1 return {"packages": packages}
76103ea29f787b7cd28ff8bc9aca37daea603432
698,654
import itertools def get_common_base(files): """Find the common parent base path for a list of files. For example, ``["/usr/src/app", "/usr/src/tests", "/usr/src/app2"]`` would return ``"/usr/src"``. :param files: files to scan. :type files: ``iterable`` :return: Common parent path. :rtype: str """ def allnamesequal(name): """Return if all names in an iterable are equal.""" return all(n == name[0] for n in name[1:]) level_slices = zip(*[f.split("/") for f in files]) tw = itertools.takewhile(allnamesequal, level_slices) return "/".join(x[0] for x in tw)
bb6c7fde6a9e2f9c620febf52047b547cf24e602
698,655
def magic_dir(tmp_path_factory): """Provides a temporary directory for testing copy/working directory behavior.""" magic = tmp_path_factory.mktemp('build_magic') return magic
c891b0e7e8dbf2d23d380ce316fb80b6d78d1ec7
698,656
def comment(src_line: str) -> list: """Returns an empty list.""" return []
c7e584811b899ae4a17ba4c9fd0debbc97e0a5cc
698,657
import numpy as np def mask_ccf_near_RV(rv,ccf,RVp,hw): """ This mask a region in the ccf near a the velocity RVp, typically for the purpose of masking velocities that are close to the expected velocity of the planet (hence RVp). RVp should be a list or array-like object with the same number of elements as there are rows in ccf; which is a 2D array-like object with x-size equal to the number of values in rv (its x-axis). The width of the masked area is twice the hw parameter (half-width). The output mask has values 1 everywhere, apart from the masked area which is set to NaN. The mask is therefore typically used in a multiplicative sense. """ nexp = np.shape(ccf)[0] ccf_mask = ccf*0.0+1.0 for i in range(nexp): sel = (rv >= RVp[i] - hw) & (rv <= RVp[i]+hw) ccf_mask[i,sel] = np.nan return(ccf_mask)
d074183960dc6d665f905b592cb820a3eabf349c
698,658
def is_regex_url(url, regexp): """ Wrapper method to search URL for different properties based on regex :param url: URL :type url: `str` :param regexp: Regular expression for the property :type url: `str` :return: True/False :rtype: `bool` """ return len(regexp.findall(url)) > 0
8e5de88e8201805b95d91001067a7756749be048
698,659
def hidden_loc(obj, name): """ Generate the location of a hidden attribute. Importantly deals with attributes beginning with an underscore. """ return ("_" + obj.__class__.__name__ + "__" + name).replace("___", "__")
5a8c3d066cb96cf282c1b8814ac1302bd68111c0
698,660
import json def _load_cluster_config(input_file_path): """ Load queues_info and add information used to render templates. :return: queues_info containing id for first queue, head_node_hostname and queue_name """ with open(input_file_path) as input_file: return json.load(input_file)
b86f163da96be1f831da7997872825e852439f84
698,661
def find_neighbors_hexagonal_grid(map_coordinates: list, current_position: tuple) -> list: """Finds the set of adjacent positions of coordinates 'current_position' in a hexagonal grid. Args: map_coordinates (list): List of map coordinates. current_position (tuple): Current position of the hexagonal grid whose neighbors we want to find. Returns: neighbors (list): List of neighbors from the current position in the hexagonal grid map. """ x = current_position[0] y = current_position[1] candidates = [(x - 2, y), (x - 1, y + 1), (x + 1, y + 1), (x + 2, y), (x + 1, y - 1), (x - 1, y - 1)] neighbors = [ neighbor for neighbor in candidates if neighbor[0] >= 0 and neighbor[1] >= 0 and (neighbor[0], neighbor[1]) in map_coordinates ] return neighbors
2a6071d59a69b828eb252504508fa3f706969e1b
698,662
from pathlib import Path def openW2wFile(path, mode): """ Helper function to read/write all files with same encoding and line endings :param str|Path path: full path to file :param str mode: open mode: 'r' - read, 'w' - write, 'a' - append :return TextIO: """ if isinstance(path, Path): path = str(path) # Windows line endings so that less advanced people can edit files, created on Unix in Windows Notepad return open(path, mode, encoding='utf-8', newline='\r\n')
6e42a26d2262ed10d15e19292e7b32fffd14aeb8
698,663
def ugly_numbers(n: int) -> int: """ Returns the nth ugly number. >>> ugly_numbers(100) 1536 >>> ugly_numbers(0) 1 >>> ugly_numbers(20) 36 >>> ugly_numbers(-5) 1 >>> ugly_numbers(-5.5) Traceback (most recent call last): ... TypeError: 'float' object cannot be interpreted as an integer """ ugly_nums = [1] i2, i3, i5 = 0, 0, 0 next_2 = ugly_nums[i2] * 2 next_3 = ugly_nums[i3] * 3 next_5 = ugly_nums[i5] * 5 for _ in range(1, n): next_num = min(next_2, next_3, next_5) ugly_nums.append(next_num) if next_num == next_2: i2 += 1 next_2 = ugly_nums[i2] * 2 if next_num == next_3: i3 += 1 next_3 = ugly_nums[i3] * 3 if next_num == next_5: i5 += 1 next_5 = ugly_nums[i5] * 5 return ugly_nums[-1]
69c92c8eea98b6d8d869f0725c49d6b164d24480
698,664
from typing import Iterable def make_conda_description(summary: str, conda_channels: Iterable[str] = ()) -> str: """ Create a description for the Conda package from its summary and a list of channels required to install it. The description will look like:: This is my fancy Conda package. Hope you like it 😉. Before installing please ensure you have added the following channels: conda-forge, bioconda .. versionadded:: 0.8.0 :param summary: :param conda_channels: """ conda_description = summary conda_channels = tuple(conda_channels) if conda_channels: conda_description += "\n\n\n" conda_description += "Before installing please ensure you have added the following channels: " conda_description += ", ".join(conda_channels) conda_description += '\n' return conda_description
b275e3bfcb67da33a4c7a34d7801a0cb4144527a
698,665
import hashlib def sha1(text): """ Calculate the SHA1 fingerprint of text. :param text: The text to fingerprint (a string). :returns: The fingerprint of the text (a string). """ context = hashlib.sha1() context.update(text.encode('utf-8')) return context.hexdigest()
43473e9cc22b6cbe072170244b56b831f0cc88ee
698,666
def ifirst_is_not(l, v): """ Return index of first item in list which is not the specified value. If the list is empty or if all items are the specified value, raise a ValueError exception. Parameters ---------- l : sequence The list of elements to be inspected. v : object The value not to be matched. Example: -------- >>> ifirst_is_not(['a', 'b', 'c'], 'a') 1 """ try: return next((i for i, _ in enumerate(l) if _ is not v)) except StopIteration: raise ValueError('There is no matching item in the list.')
fef5ae1a512772cf4df75a294057330280047f88
698,668
def get_calls(mock_observer_func): """Given a mock IPluginObserver method, returns the plugins that caused its methods to be called, so basically a list of plugins that loaded/unloaded""" return [call_tuple[0][0].name for call_tuple in mock_observer_func.calls]
41360429df5e71da47946f75f8207f985959528f
698,669
def format_date_ihme(date_in): """ Formats "m/d/yyy" to "yyyymmdd" """ try: month, day, year = date_in.split('/') return '%s%02i%02i' % (year, int(month), int(day)) except: return date_in.replace('-', '')
166739b2245833c6dddf35fcde87433648697277
698,670
def reverse_map(dict_of_sets): """Reverse a map of one-to-many. So the map:: { 'A': {'B', 'C'}, 'B': {'C'}, } becomes { 'B': {'A'}, 'C': {'A', 'B'}, } Args: dict_of_sets (dict[set]): A dictionary of sets, mapping one value to many. Returns: dict[set]: The reversed map. """ result = {} for key, values in dict_of_sets.items(): for value in values: result.setdefault(value, set()).add(key) return result
87b0679a0ebd00c3dbdcafc89fd2644b21ecbc85
698,671
from datetime import datetime import sys def write_xml(xml, agent_name, data_dir="/var/spool/pandora/data_in/"): """Creates a agent .data file in the specified data_dir folder\n Args: - xml (str): XML string to be written in the file. - agent_name (str): agent name for the xml and file name. - data_dir (str): folder in which the file will be created.""" Utime = datetime.now().strftime('%s') data_file = "%s/%s.%s.data" %(str(data_dir),agent_name,str(Utime)) try: with open(data_file, 'x') as data: data.write(xml) except OSError as o: sys.exit(f"ERROR - Could not write file: {o}, please check directory permissions") except Exception as e: sys.exit(f"{type(e).__name__}: {e}") return (data_file)
dbe5714bfdd97f94673c3a09b02671e3ee3991dc
698,672
def get_iemocap_emotion(session_file_name, turn_name): """gets the assigned evaluated iemocap emotion for a given turn name in a given session dialogue""" r = open(session_file_name, 'r') lines = r.readlines() for e, l in enumerate(lines): emo_info = l.split('\t') if len(emo_info) == 4 and emo_info[1] == turn_name: if emo_info[2] in ['xxx', 'oth']: emo = lines[e+1].split('\t')[1].strip(';') if emo.split(';')[0] != 'Other': return emo.split(';')[0] else: emo = lines[e + 2].split('\t')[1].strip(';') if emo.split(';')[0] != 'Other': return emo.split(';')[0] else: emo = lines[e + 3].split('\t')[1].strip(';') return emo.split(';')[0] else: return emo_info[2]
e82576633632e0f3cffd8d4187607e955dade594
698,673
import functools def expand_args(function): """Expand arguments passed inside the CallContext object. Converts internal method signature 'method(ctx)' to whatever is appropriate to the client code. Args: function: Async function expecting CallContext as a last argument. Returns: Async function with altered signature. """ @functools.wraps(function) async def expand_args_wrapper(*args): # Valid method signatures are foo(ctx) and foo(self, ctx). context = args[-1] return await function(*args, *context.args, **context.kwargs) return expand_args_wrapper
72e7de46684b2c02d958fad7f9ded71e653bb811
698,674
def derivative_of_binary_cross_entropy_loss_function(y_predicted, y_true): """ Use the derivative of the binary cross entropy (BCE) loss function. Parameters ---------- y_predicted : ndarray The predicted output of the model. y_true : ndarray The actual output value. Returns ------- float The derivative of the BCE loss function. """ numerator = (y_predicted - y_true) denominator = (y_predicted * (1 - y_predicted)) y = numerator / denominator return y
7803c852f794ab9eae165c639bfb7a04cd0c1028
698,675
def AUC(answers, scores): """ Compute the `AUC <https://en.wikipedia.org/wiki/Area_under_the_curve_(pharmacokinetics)>`_. @param answers expected answers 0 (false), 1 (true) @param scores score obtained for class 1 @return number """ ab = list(zip(answers, scores)) plus = [s for a, s in ab if a == 1] moins = [s for a, s in ab if a != 1] auc = 0 for p in plus: for m in moins: if p > m: auc += 2 elif p == m: auc += 1 den = len(plus) * len(moins) if den == 0: return 1.0 if len(moins) == 0 else 0.0 return auc * 1.0 / (len(plus) * len(moins) * 2)
477cdf2a7b51352dd728465c883dae9cb4ab6e9a
698,676
def factorial(number: int) -> int: """ Calculating factorial of a number using prime decomposition method """ prime: list = [True] * (number + 1) result: int = 1 for i in range (2, number + 1): if prime[i]: j: int = 2 * i while j <= number: prime[j] = False j += i SUM: int = 0 num: int = i while num <= number: SUM += number // num num *= i result *= i ** SUM return result
2d6a85daa1b324fe419c3e5aecfbfc8373cffdcf
698,677
import os def filestructure_snapshot(path): """ Create a filestructure snapshot by returning the paths of all files inside a path. """ paths = [] for dirpath, _dirnames, filenames in os.walk(path): paths.append(dirpath) for filename in filenames: paths.append(os.path.join(dirpath, filename)) return paths
36a0e5ee6f05c74e75bdeca7609702735127411e
698,678
def api_failed(response, endpoint, exit_on_error=True): """ Check if api request failed Can raise exception """ if 200 <= response.status_code < 300: return False if exit_on_error: raise Exception("Status code {0} calling {1}".format( response.status_code, endpoint)) return True
6a9d803444c4be09806eb6c5b588e1e04062c431
698,680
def select_field(X, cols=None, single_dimension=False): """ Select columns from a pandas DataFrame Args: X (pandas DataFrame): input data cols (array-like): list of columns to select single_dimension (bool): reduce data to one dimension if only one column is requested Returns: X (numpy array) """ if cols is None: return X.values if len(cols) > 1: return X[cols].values if len(cols) == 1: if single_dimension: return X[cols[0]].values else: return X[cols].values
4f80af3621af5e0c622f2c880e88d95a6e93034e
698,681
def get_pathless_file_size(data_file): """ Takes an open file-like object, gets its end location (in bytes), and returns it as a measure of the file size. Traditionally, one would use a systems-call to get the size of a file (using the `os` module). But `TemporaryFileWrapper`s do not feature a location in the filesystem, and so cannot be tested with `os` methods, as they require access to a filepath, or a file-like object that supports a path, in order to work. This function seeks the end of a file-like object, records the location, and then seeks back to the beginning so that the file behaves as if it was opened for the first time. This way you can get a file's size before reading it. (Note how we aren't using a `with` block, which would close the file after use. So this function leaves the file open, as an implicit non-effect. Closing is problematic for TemporaryFileWrappers which wouldn't be operable again) :param data_file: :return size: """ if not data_file.closed: data_file.seek(0, 2) size = data_file.tell() print(size) data_file.seek(0, 0) return size else: return 0
df1edfa6cb7b97c6abbdb2252b7dd8d505931768
698,682
import collections def _make_repolist(c, path): """turn the repo log into a history input: pathname to the repository output: dictionary containing the history """ ## logging.info('processing {}'.format(item)) with c.cd(path): result = c.run('hg log -v', hide=True) data = result.stdout outdict = collections.defaultdict(dict) in_description = False key = '' # just to satisfy the linters for line in data.split('\n'): line = line.strip() words = line.split() if line == '': in_description = False elif in_description: outdict[key]['desc'].append(line) elif words[0] == 'changeset:': key, _ = words[1].split(':', 1) key = int(key) elif words[0] == 'date:': outdict[key]['date'] = ' '.join(words[1:]) elif words[0] == 'files:': outdict[key]['files'] = words[1:] elif words[0] == 'description:': in_description = True outdict[key]['desc'] = [] return outdict
5c3d27bcfd52f8b0656d5bf770ce83b0fcbdf5d0
698,683
def fizz_buzz_one( start, end ): """ Note: This method returns a long string not a list. """ fizzbuzz = '' # range is NOT inclusive of the 2nd number, so add 1 to include it. # range(1,4) > 1,2,3. range(1, 4 + 1) > 1,2,3,4. for i in range(start,end+1): if i%3 == 0: fizzbuzz += "fizz" if i%5 == 0: fizzbuzz += "buzz" if i%3 != 0 and i%5 != 0: fizzbuzz += str(i) fizzbuzz += ' ' return fizzbuzz.rstrip()
9000ed93629bff15a74171565f562eee29814dd0
698,684
from functools import reduce import operator def factorial(n): """Calculate n factorial""" return reduce(operator.mul, range(2, n+1), 1)
a58e0aad4e3a8baf06bbd1a6929a3aab2ab4e66e
698,685
import argparse import time def parse_input(): """ Sets up the required input arguments and parses them """ parser = argparse.ArgumentParser() parser.add_argument('log_file', help='CSV file of log data') parser.add_argument('-e, --n_epochs', dest='n_epochs', help='number of training epochs', metavar='', type=int, default=5) parser.add_argument('-o, --out_dir', dest='out_dir', metavar='', default=time.strftime("%Y%m%d_%H%M%S"), help='directory where the model is stored') parser.add_argument('-n, --name_model', dest='name_model', metavar='', help='name of the trained model', type=str, default='model.h5') return parser.parse_args()
3d701c6a91312ee597d8d25149ffdf2f6a0ea8cc
698,686
from typing import Optional from typing import Tuple from typing import Match import re def is_conversion_err(error: Exception) -> Optional[Tuple[str, int]]: """Check if error is caused by generic conversion error. Return a tuple of the converter type and parameter name if True, otherwise return None. """ pattern: str = r'Converting to "(.+?)" failed for parameter "(.+?)"\.' match: Optional[Match] = re.search(pattern, str(error)) if match: return (match.group(1), match.group(2)) return None
2e53420bb8a5715c75ee30a549453f40202d4ff7
698,687
def txt_file_to_list(genomes_txt): """ Read ids from a one column file to a list Args: genomes_txt:str: Path to file with the ids. """ with open(genomes_txt, 'r') as fin: genomes_list = [line.strip() for line in fin] return genomes_list
f79874dfbcb6a2a71be87b180e80513d480fcc8e
698,688
def oxfordcomma(listed, condition): """Format a list into a sentance""" listed = [f"'{str(entry)}'" for entry in listed] if len(listed) == 0: return "" if len(listed) == 1: return listed[0] if len(listed) == 2: return f"{listed[0]} {condition} {listed[1]}" return f"{', '.join(listed[:-1])} {condition} {listed[-1]}"
c64ec05df35ccb6169363bf276fb874e61525c85
698,689
def get_all_user_attributes(client): """ This command gets all users, chooses the first then, run a second get command that returns all the users attributes """ user_id = "" attributes = [] all_users = client.get_all_users() users_list = all_users.get("searchRecords") if isinstance(users_list, list): user = users_list[0] user_id = user.get("Id") if user_id: user_data = client.get_user(user_id) attributes = list(user_data.keys()) return attributes
d92ccab35a61bec96e34d3ca4a4035d7688001a0
698,690
def setup_loading(): """Establish how recordings are loaded.""" # Indicates if all information on a recording is loaded in bulk, or as needed load_all = True # If load_all is True, indicates what is loaded in bulk # Should be a subset of ["signals", "spatial", "units"] to_load = ["signals"] # Whether a subset of recordings should be considered # True opens a console to help choose, but a list of indices can be passed select_recordings = False return load_all, to_load, select_recordings
8855e02ba57a79c72861796de3d0c3178232c2a8
698,691
def dense_rank(x, na_option = "keep"): """Return the dense rank. This method of ranking returns values ranging from 1 to the number of unique entries. Ties are all given the same ranking. Example: >>> dense_rank(pd.Series([1,3,3,5])) 0 1.0 1 2.0 2 2.0 3 3.0 dtype: float64 """ return x.rank(method = "dense", na_option = na_option)
53bd64e112741f20c0e62f44ba7f5bcf8cdb2f83
698,692
def erbb2_context(): """Create test fixture for ERBB2 Gene Context.""" return { "id": "normalize.gene:ERBB2", "type": "GeneDescriptor", "label": "ERBB2", "gene": { "gene_id": "hgnc:3430", "type": "Gene" }, "xrefs": [ "ncbigene:2064", "ensembl:ENSG00000141736" ], "alternate_labels": [ "NGL", "CD340", "HER2", "NEU", "TKR1", "HER-2", "HER-2/neu", "VSCN2", "MLN 19" ], "extensions": [ { "type": "Extension", "name": "symbol_status", "value": "approved" }, { "name": "approved_name", "value": "erb-b2 receptor tyrosine kinase 2", "type": "Extension" }, { "type": "Extension", "name": "associated_with", "value": [ "ucsc:uc002hso.4", "ena.embl:X03363", "ccds:CCDS77017", "vega:OTTHUMG00000179300", "ccds:CCDS77016", "uniprot:P04626", "refseq:NM_004448", "ccds:CCDS74052", "hcdmdb:CD340", "omim:164870", "ccds:CCDS32642", "ccds:CCDS45667", "cosmic:ERBB2", "iuphar:2019" ] }, { "type": "Extension", "name": "chromosome_location", "value": { "_id": "ga4gh:VCL.pS7M3aeNymozN9LKeAwVDEB5H1nt4Kqy", "type": "ChromosomeLocation", "species_id": "taxonomy:9606", "chr": "17", "interval": { "end": "q12", "start": "q12", "type": "CytobandInterval" } } }, { "name": "previous_symbols", "value": [ "NGL" ], "type": "Extension" } ] }
0a4ef3bf47d8ac622d6c2778bd33199f5da2a747
698,693
import codecs def decode_rot13(cipher: str): """Used to decode rot13""" return codecs.decode(cipher, 'rot_13')
3c20989614c6691c74294a93268745bbaccecf42
698,694
from unittest.mock import Mock def mock_channel_file( offered_by, channel_id, playlist_id, create_user_list=False, user_list_title=None ): """Mock video channel github file""" content = f"""--- offered_by: {offered_by} channel_id: {channel_id} playlists: - id: {playlist_id} {"create_user_list: true" if create_user_list else "" } { "user_list_title: " + user_list_title if user_list_title else "" } """ return Mock(decoded_content=content)
3042a7fc6f0fb622db8bf035a408672ddd6408ab
698,695
from functools import wraps def run_only_once(resolve_func): """ Make sure middleware is run only once, this is done by setting a flag in the `context` of `ResolverInfo` Example: class AuthenticationMiddleware: @run_only_once def resolve(self, next, root, info, *args, **kwargs): pass """ @wraps(resolve_func) def wrapper(self, nextFn, root, info, *args, **kwargs): has_context = info.context is not None decorator_name = "__{0}_run__".format(self.__class__.__name__) if has_context: if isinstance(info.context, dict) and not info.context.get( decorator_name, False ): info.context[decorator_name] = True return resolve_func(self, nextFn, root, info, *args, **kwargs) elif not isinstance(info.context, dict) and not getattr( info.context, decorator_name, False ): # Graphene: it could be a Context or WSGIRequest object setattr(info.context, decorator_name, True) return resolve_func(self, nextFn, root, info, *args, **kwargs) # No context, run_only_once will not work return nextFn(root, info, *args, **kwargs) return wrapper
9742318ca44ec92d7826d561968c87cea62db1bf
698,696
def ldapmask2filemask(ldm): """Takes the access mask of a DS ACE and transform them in a File ACE mask. """ RIGHT_DS_CREATE_CHILD = 0x00000001 RIGHT_DS_DELETE_CHILD = 0x00000002 RIGHT_DS_LIST_CONTENTS = 0x00000004 ACTRL_DS_SELF = 0x00000008 RIGHT_DS_READ_PROPERTY = 0x00000010 RIGHT_DS_WRITE_PROPERTY = 0x00000020 RIGHT_DS_DELETE_TREE = 0x00000040 RIGHT_DS_LIST_OBJECT = 0x00000080 RIGHT_DS_CONTROL_ACCESS = 0x00000100 FILE_READ_DATA = 0x0001 FILE_LIST_DIRECTORY = 0x0001 FILE_WRITE_DATA = 0x0002 FILE_ADD_FILE = 0x0002 FILE_APPEND_DATA = 0x0004 FILE_ADD_SUBDIRECTORY = 0x0004 FILE_CREATE_PIPE_INSTANCE = 0x0004 FILE_READ_EA = 0x0008 FILE_WRITE_EA = 0x0010 FILE_EXECUTE = 0x0020 FILE_TRAVERSE = 0x0020 FILE_DELETE_CHILD = 0x0040 FILE_READ_ATTRIBUTES = 0x0080 FILE_WRITE_ATTRIBUTES = 0x0100 DELETE = 0x00010000 READ_CONTROL = 0x00020000 WRITE_DAC = 0x00040000 WRITE_OWNER = 0x00080000 SYNCHRONIZE = 0x00100000 STANDARD_RIGHTS_ALL = 0x001F0000 filemask = ldm & STANDARD_RIGHTS_ALL if (ldm & RIGHT_DS_READ_PROPERTY) and (ldm & RIGHT_DS_LIST_CONTENTS): filemask = filemask | (SYNCHRONIZE | FILE_LIST_DIRECTORY |\ FILE_READ_ATTRIBUTES | FILE_READ_EA |\ FILE_READ_DATA | FILE_EXECUTE) if ldm & RIGHT_DS_WRITE_PROPERTY: filemask = filemask | (SYNCHRONIZE | FILE_WRITE_DATA |\ FILE_APPEND_DATA | FILE_WRITE_EA |\ FILE_WRITE_ATTRIBUTES | FILE_ADD_FILE |\ FILE_ADD_SUBDIRECTORY) if ldm & RIGHT_DS_CREATE_CHILD: filemask = filemask | (FILE_ADD_SUBDIRECTORY | FILE_ADD_FILE) if ldm & RIGHT_DS_DELETE_CHILD: filemask = filemask | FILE_DELETE_CHILD return filemask
a461fd5bdc6498df345491f38885a63b0dcbefc2
698,697
import re def get_input_size(filename): """Gets input size and # of channels.""" with open(filename) as input_file: for line in input_file: match = re.match(r'^input.*\(None, (\d*), (\d*), (\d*)\)', line) if match is not None: x = int(match.group(1)) c = int(match.group(3)) return x,c
3e753e5ac74babcbe4f48830cfde7a3002f7cc37
698,698
import os from pathlib import Path def get_generated_keys(): """ get_generated_keys function gets the .key & .pub files in the project directory. :return: private and public key file name """ priv_keys = [] pub_keys = [] for file in os.listdir("."): if file.endswith(".key"): priv_keys.append(Path(file).resolve()) if file.endswith(".pub"): pub_keys.append(Path(file).resolve()) if len(priv_keys) > 0: priv_key = priv_keys[0] else: priv_key = '' if len(pub_keys) > 0: pub_key = pub_keys[0] else: pub_key = '' return priv_key, pub_key
f287a38964c1ce6b63180c861a106095cb49d5f3
698,699
def indexOfSmallestInt(listOfInts): """ return index of smallest element of non-empty list of ints, or False otherwise That is, return False if parameter is an empty list, or not a list parameter is not a list consisting only of ints By "smallest", we mean a value that is no larger than any other value in the l There may be more than one instance of that value. Example: in [7,3,3,7],3 is smallest By "index", we mean the subscript that will select that item of the list when placed in [] Since there can be more than one smallest, we'll return the index of the first such value in those cases, i.e. the one with the lowest index. >>> indexOfSmallestInt([]) False >>> indexOfSmallestInt('foo') False >>> indexOfSmallestInt([3,5,4.5,6]) False >>> indexOfSmallestInt([40]) 0 >>> indexOfSmallestInt([-90,40,70,80,20]) 0 >>> indexOfSmallestInt([10,30,-50,20,-50]) 2 >>> """ if type(listOfInts)!=list or listOfInts==[]: # Return False if not a list or is empty return False currentMinIndex = 0 for i in range(0,len(listOfInts)): if type(listOfInts[i])!=int: return False # Return False if not all elements are ints if listOfInts[i] < listOfInts[currentMinIndex]: currentMinIndex = i return currentMinIndex
63afb453b13ad4da75c83e9a7f6b409f58739d7e
698,700
def field_with_classes(field, *classes): """ Adds the specified classes to the HTML element produced for the field. """ return field.as_widget(attrs = { 'class': ' '.join(classes) })
2b9929b2629fafaafbe1d2537afc0a9a601eed53
698,701
import tkinter as Tkinter import tkinter.filedialog as tkFileDialog def directory_from_gui( initialdir='.', title='Choose directory'): # pragma: no cover """ Opens dialog to one select directory Parameters ---------- initialdir : str, optional Initial directory, in which opens GUI (default: '.') title : str, optional Title of GUI (default: 'Choose directory') Returns ------- str Selected directory Examples -------- .. code-block:: python if not idir: idir = directory_from_gui() if not idir: raise ValueError('Error: no directory given.') """ root = Tkinter.Tk() root.withdraw() # hide root window, i.e. white square # always on top # focus on (hidden) window so that child is on top root.tk.call('wm', 'attributes', '.', '-topmost', 1) direcs = tkFileDialog.askdirectory( parent=root, title=title, initialdir=initialdir) root.destroy() return direcs
e0b43a044c0e05815023de2275cc0ff1ffb01d8e
698,702
def _test_path_to_file_url(path): """ Convert a test Path to a "file://" URL. Args: path: a tests.lib.path.Path object. """ return 'file://' + path.resolve().replace('\\', '/')
b13025da535fe43e074f79411ab4be55c766a572
698,703
def instantiate(repo, name=None, filename=None): """ Instantiate the generator and filename specification """ default_transformers = repo.options.get('transformer', {}) # If a name is specified, then lookup the options from dgit.json # if specfied. Otherwise it is initialized to an empty list of # files. transformers = {} if name is not None: # Handle the case generator is specified.. if name in default_transformers: transformers = { name : default_transformers[name] } else: transformers = { name : { 'files': [], } } else: transformers = default_transformers #========================================= # Map the filename patterns to list of files #========================================= # Instantiate the files from the patterns specified input_matching_files = None if filename is not None: input_matching_files = repo.find_matching_files([filename]) for t in transformers: for k in transformers[t]: if "files" not in k: continue if k == "files" and input_matching_files is not None: # Use the files specified on the command line.. transformers[t][k] = input_matching_files else: # Try to match the specification if transformers[t][k] is None or len(transformers[t][k]) == 0: transformers[t][k] = [] else: matching_files = repo.find_matching_files(transformers[t][k]) transformers[t][k] = matching_files return transformers
6a280b91598d9e0dd41d99165d0474b3dcdde7f3
698,704
def create_model_name(src): """Generate a name for a source object given its spatial/spectral properties. Parameters ---------- src : `~fermipy.roi_model.Source` A source object. Returns ------- name : str A source name. """ o = '' spatial_type = src['SpatialModel'].lower() o += spatial_type if spatial_type == 'gaussian': o += '_s%04.2f' % src['SpatialWidth'] if src['SpectrumType'] == 'PowerLaw': o += '_powerlaw_%04.2f' % float(src.spectral_pars['Index']['value']) else: o += '_%s' % (src['SpectrumType'].lower()) return o
d305dd26bc6017f3fce5db2a5267fa6e74df3bc6
698,705
def csv_to_list(value): """ Converts the given value to a list of strings, spliting by ',' and removing empty strings. :param str value: the value to split to strings. :return: a list of strings. :rtype: List[str] """ return list(filter(None, [x.strip() for x in value.split(',')]))
dbfb90dccf8d48a46f528ca02e958306e8dcc266
698,707
from pathlib import Path import os def collect_leaf_paths(root_paths): """Collects all paths to leaf folders.""" leaf_paths = [p for p in Path(root_paths).glob('**') if not os.walk(p).__next__()[1]] return leaf_paths
0c75bb94211710b156460702a8135448543f938d
698,708
import os def store_uploaded_file(title, uploaded_file): """ Stores a temporary uploaded file on disk """ upload_dir_path = '%s/static/taskManager/uploads' % ( os.path.dirname(os.path.realpath(__file__))) if not os.path.exists(upload_dir_path): os.makedirs(upload_dir_path) # A1: Injection (shell) # Let's avoid the file corruption race condition! os.system( "mv " + uploaded_file.temporary_file_path() + " " + "%s/%s" % (upload_dir_path, title)) return '/static/taskManager/uploads/%s' % (title)
5c6234a3259ea5ed682046ffb0cd3e00d1e839cd
698,709
def Main(): """ :return: """ a = 1 b = 2 c = a + b return c
6482b416105d1ba23f564d6a47a52b7cba9c38bc
698,710
def trim_walkers(res, threshold=-1e4): """Remove walkers with probability below some threshold. Useful for removing stuck walkers """ good = res['lnprobability'][:, -1] > threshold trimmed = {} trimmed['chain'] = res['chain'][good, :, :] trimmed['lnprobability'] = res['lnprobability'][good, :] trimmed['model'] = res['model'] return trimmed
ba8e3ebbb2c6948be06d4d1bbd3871011763d71c
698,711
def _str_eval_true(eval, act, ctxt) : """Returns false.""" return [False]
eb0fd9b576b55133319752f86750159c9684ba0c
698,712
import os def get_cred_from_file(key): """Read credential information from a file.""" filepath = os.path.join(os.path.expanduser('~'), '.%s' % key) with open(filepath) as f: return f.read().rstrip()
b19d2bd27d336a939dab51d9781768cee30bdbd3
698,713
def swapaxes(dat, ax1, ax2): """Swap axes of a Data object. This method swaps two axes of a Data object by swapping the appropriate ``.data``, ``.names``, ``.units``, and ``.axes``. Parameters ---------- dat : Data ax1, ax2 : int the indices of the axes to swap Returns ------- dat : Data a copy of ``dat`` with the appropriate axes swapped. Examples -------- >>> dat.names ['time', 'channels'] >>> dat = swapaxes(dat, 0, 1) >>> dat.names ['channels', 'time'] See Also -------- numpy.swapaxes """ data = dat.data.swapaxes(ax1, ax2) axes = dat.axes[:] axes[ax1], axes[ax2] = axes[ax2], axes[ax1] units = dat.units[:] units[ax1], units[ax2] = units[ax2], units[ax1] names = dat.names[:] names[ax1], names[ax2] = names[ax2], names[ax1] return dat.copy(data=data, axes=axes, units=units, names=names)
576ec10826c3ab92465052edcadf38cf04952c02
698,714
def update_weight(y, unsupervised_weight, next_weight): """update weight of the unsupervised part of loss""" y[:, -1] = next_weight unsupervised_weight[:] = next_weight return y, unsupervised_weight
c961714c248ba4c0f752be568115e444e1d963e2
698,715
import re def is_url(value): """Return whether or not given value is a valid URL.""" regex = re.compile( r"^" # startchar < r"<?" # protocol identifier r"(?:(?:https?|ftp)://)" r"(?:" r"(localhost)" r"|" # host name r"(?:(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)" # domain name r"(?:\.(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)*" # TLD identifier r"(?:\.(?:[a-z\u00a1-\uffff]{2,}))" r")" # port number r"(?::\d{2,5})?" # resource path r"(?:/\S*)?" # query string r"(?:\?\S*)?" # endchar > r">?" r"$", re.UNICODE | re.IGNORECASE ) return regex.match(value)
3a50e541a9e62156a89693d15d248080abb3718f
698,716
def ysum_ysq_count(ysum, ysq, counts): """ calculate mean and error given accumulated sum of val and sq Args: ysum (np.array): accumulated sum of values, (nentry,) ysq (np.array): accumulated sum of squares, (nentry,) counts (np.array): number of hits for each entry, (nentry,) Return: (np.array, np.array, np.array): (sel, ym, ye), (valid entries, mean, error) Example: >>> nr = len(myr) >>> ysum = np.zeros(nr) >>> ysq = np.zeros(nr) >>> csum = np.zeros(nr, dtype=int) >>> csq = np.zeros(nr, dtype=int) >>> for box, pos, vec in zip(boxl, posl, vecl): >>> drij = displacement_table(pos, box) >>> y1, c1 = gofv(myr, drij, vec) >>> ysum += y1 >>> ysq += y1**2 >>> csum += c1 >>> csq += c1**2 >>> sel, ym, ye = ysum_ysq_count(ysum, ysq, csum) >>> myx = myr[sel] >>> # g(r) >>> grcnt = len(boxl)*np.ones(len(myy)) >>> sel1, grm, gre = ysum_ysq_count(csum, csq, grcnt) >>> nvec = gofr_norm(myr, len(drij), volume) >>> grm *= nvec; gre *= nvec """ sel = counts > 1 # need to divide by counts-1 ym = ysum[sel]/counts[sel] y2 = ysq[sel]/counts[sel] ye2 = (y2-ym**2)/(counts[sel]-1) ye = ye2**0.5 return sel, ym, ye
ecdb04c291b28bb2ad8385be3b09a45a32a72c05
698,717
def remote_call(method): """Decorator to set a method as callable remotely (remote call).""" method.remote_call = True return method
ab276cbdb190185c7e46c5205df25f022a56a097
698,719
from pathlib import Path def node(ph5, path, class_name): """ Get the node handle of a given path and class name :param ph5: an open ph5 object :type ph5: ph5 object :param path: path to node :type path: string :param class_name: name of class to get :type class_name: string """ handle = None path = Path(path) handle = ph5.get_node(str(path.parent), name=path.name, classname=class_name) return handle
2a8090952fdec3dda7490844990f41c8e154c3af
698,721
import os def individuals_all(): """Return path to .ttl-file containing _all_ triples.""" path = os.path.normpath( os.path.join( os.path.dirname(__file__), "data", "individuals_with_reasoning.ttl" ) ) return path
d51c5cf49acc4eab107429964518ecd52dbe916f
698,722
def indian_word_currency(value): """ Converts a large integer number into a friendly text representation. Denominations used are: Hundred, Thousand, Lakh, Crore Examples: 1000 becomes 1 Thousand 15000 becomes 15 Thousands 15600 becomes 15.60 Thousands 100000 becomes 1 Lakh 1125000 becomes 11.25 Lakhs 10000000 becomes 1 Crore 56482485 becomes 5.64 Crore :return: String """ if isinstance(value, int) and value < 100: return str(value) if isinstance(value, float) and value < 99: return str(value) try: if isinstance(value, str): if '.' not in value and int(value) < 99: return value if float(value) < 99: return value except (ValueError, TypeError): return value value_integer = str(value).split('.')[0] value_len = len(value_integer) if value_len > 7: crores = value_integer[:-7] lakhs = value_integer[-7:-5] if crores == '1' and lakhs == '00': return '1 Crore' if lakhs == '00': return '%s Crores' % crores return '%s.%s Crores' % (crores, lakhs) elif value_len > 5: lakhs = value_integer[:-5] thousands = value_integer[-5:-3] if lakhs == '1' and thousands == '00': return '1 Lakh' if thousands == '00': return '%s Lakhs' % lakhs return '%s.%s Lakhs' % (lakhs, thousands) elif value_len > 3: thousands = value_integer[:-3] hundreds = value_integer[-3:-1] if thousands == '1' and hundreds == '00': return '1 Thousand' if hundreds == '00': return '%s Thousands' % thousands return '%s.%s Thousands' % (thousands, hundreds) else: hundreds = value_integer[:-2] tens_ones = value_integer[-2:] if hundreds == '1' and tens_ones == '00': return '1 Hundred' if tens_ones == '00': return '%s Hundreds' % hundreds return '%s.%s Hundreds' % (hundreds, tens_ones)
d162725f588d96a7dc3ead80b5278d71187bf4c1
698,723
import os def listdir_matches(match): """Returns a list of filenames contained in the named directory. Only filenames which start with `match` will be returned. Directories will have a trailing slash. """ last_slash = match.rfind('/') if last_slash == -1: dirname = '.' match_prefix = match result_prefix = '' else: match_prefix = match[last_slash + 1:] if last_slash == 0: dirname = '/' result_prefix = '/' else: dirname = match[0:last_slash] result_prefix = dirname + '/' def add_suffix_if_dir(filename): if (os.stat(filename)[0] & 0x4000) != 0: return filename + '/' return filename matches = [add_suffix_if_dir(result_prefix + filename) for filename in os.listdir(dirname) if filename.startswith(match_prefix)] return matches
8b47709f09453060739215a1b2ac275a4dfc7f11
698,724
def txfDate(date): """Returns a date string in the TXF format, which is MM/DD/YYYY.""" return date.strftime('%m/%d/%Y')
fd1f19b4080447379ec3ec57be0f2af047099673
698,726
import datetime def sobject_to_dict(obj, key_to_lower=False, json_serialize=False): """ Converts a suds object to a dict. Includes advanced features. :param json_serialize: If set, changes date and time types to iso string. :param key_to_lower: If set, changes index key name to lower case. :param obj: suds object :return: dict object """ if not hasattr(obj, '__keylist__'): if json_serialize and isinstance(obj, (datetime.datetime, datetime.time, datetime.date)): return obj.isoformat() else: return obj data = {} fields = obj.__keylist__ for field in fields: val = getattr(obj, field) if key_to_lower: field = field.lower() if isinstance(val, list): data[field] = [] for item in val: data[field].append(sobject_to_dict(item, json_serialize=json_serialize)) else: data[field] = sobject_to_dict(val, json_serialize=json_serialize) return data
5915fdd52bee26a0f4bfac67fc1140ab3d6a49a3
698,727
def allInstances(cls): """Return cls instances for each of this class's set""" items = [ (choice.value, cls(name=choice.name)) for choice in cls.set.values() ] items.sort() items = [v[1] for v in items] return items
702a77b9c247ec7976201cd0829a728c6c5fdd1f
698,728
from typing import Union def get_json_url(year: Union[str, int], kind: str = "photos") -> str: """Returns url for the json data from news-navigator for given `year` and `kind`""" return f"https://news-navigator.labs.loc.gov/prepackaged/{year}_{kind}.json"
0c7650f667cb1fceebbd73e7a6eaf00145061d38
698,729
def move_right(l: list) -> list: """リストの各要素を1つ右に移動させる""" return [l[len(l) - 1]] + l[:len(l) - 1]
c645595532fa2bc89b58cc38eb79feaeb43cf9b2
698,730
import torch def kl_energy(potential_fn, rates_fn, passive_rates_fn): """ Calculates the mean log-likelihood ell of a trajectory. Compares Feynman-Kac measure with parametrized measure. Normalizes mean log-likelihood by dividing by total time and number of sites. """ def validate_fn(traj, dts, transition_indices): # pre-process traj = traj[:-1] s = traj.shape # compute rates, view as (TxB, L, L) to use net, then view as (T, B, DxLxL) rates = rates_fn((traj ).view((-1,)+s[2:])).view(s[0:2]+(-1,)) passive_rates = passive_rates_fn(traj) potential = potential_fn(traj, passive_rates) potential = torch.sum(potential * dts, dim=0) # integrate over time passive_rates = passive_rates.view(s[0:2]+(-1,)) def get_log_rn(): """ Calculate log_rn between parametrized and passive dynamics. """ # Calculate time integral (continuous) part of log_rn # sum over lattice sites and directions of rates kinetic = (passive_rates - rates).sum(dim=-1) continuous_log_rn = torch.sum(kinetic * dts, dim=0) # integrate over time # Calculate transition part of log_rn transition_indices_ = transition_indices.unsqueeze(dim=-1) chosen_rates = rates.gather(dim=-1, index=transition_indices_) chosen_passive_rates = passive_rates.gather(dim=-1, index=transition_indices_) transition_log_rn = torch.sum( # over time torch.log(chosen_rates / chosen_passive_rates), dim=0).squeeze() return continuous_log_rn + transition_log_rn log_rn = get_log_rn() T = dts.sum(dim=0).mean(dim=0) # sum over time, average over batch N = s[-1] * s[-2] # number of lattice sitess energy = (potential + log_rn) / (N * T) return energy, log_rn return validate_fn
0d081d08e379c7cb5021f565e949dc7e7a48a7d5
698,731
def id_for_dst(value): """generate a test id for dest""" return f"dst->{value.comment}"
8db759514d720fd15f437a45ad320edaf3b51c93
698,732
def does_not_contain(token, value): """ Validate that `token` is not a substring of `value` :param: token: string e.g. : | . :param: value: dictionary, list, or str """ if isinstance(value, str): if token in value: raise ValueError(f"{value} cannot contain {token}") if isinstance(value, list): if any([token in v for v in value]): raise ValueError(str(value) + f" cannot contain {token}") if isinstance(value, dict): if any([isinstance(key, str) and token in key or isinstance(val, str) and token in val for key,val in value.items()]): raise ValueError(str(value) + f" cannot contain {token}") return True
06acdf6643a84f2991180c54cb116d5450bf0a9c
698,733
def get_dummy_metadata(): """ Incase of a temporal gap, the following metadata is used """ return dict({"product_file": "None"})
71a8a053579a153b9f78fffefe935ed4e9e40d01
698,734
def get_callback_result(callback, res_str): """ 根据callback后的jsonp语句 :param callback: :param res_str: :return: """ if callback is None: return res_str else: return '%s(%s)' % (callback.encode("utf-8"), res_str)
c78d7ff07b9fa07499f9dad41805c67308aa8646
698,735
def vdecomp(v, m=None, minlen=None, maxlen=None): """ Decompose a vector into components. an nD stack of m-element vectors will return a tuple with up to m elements, each of which will be an nD stack of scalars :param v: nD stack of m-element vectors, a numpy (n+1)D array with shape (n_stack0,n_stack1,...,n_stackn-2,m,n_stackn-1) :param minlen: If passed, this will pad out the returned vector components with zero scalars such that the returned tuple has minlen components. We do zero scalars rather than zero arrays of the same size as the other components to save memory, since a scalar is compatible by broadcasting with an array of any size. :param maxlen: If passed, this will restrict the returned vector components to the given size, even if the input vector has more components. :param m: If passed, treat the input as if it were an nD stack of m-element vectors. If the actual stack has more components, don't return them. If it has less, return scalar zeros for the missing components :return: A tuple. Each element is a vector component. Vector components pulled from the vector will be an nD stack of scalars, a numpy nD array with shape (n_stack0,n_stack1,...,n_stackn-2,n_stackn-1). Vector components which are made up will be scalar zeros. Note: If you pass maxlen<minlen, the result is still well-defined, since the maxlen is used first, then the minlen. If you pass a vector with m=4, a minlen of 7, and a maxlen of 2, you will get a result with the first two components of the vector, followed by 5 zeros. I'm not sure if this is useful, but there it is. Example: v=np.zeros((24,3,50)) #Suitable for holding multiple trajectories #OR v0=np.zeros((3,50)) #Initial conditions for 50 trajectories t=np.arange(24) #Time steps v=rk4(x0=v0,t=t) #Numerically integrate multiple trajectories. Result shape will be (t.size,)+v0.shape, #IE (24,3,50) x,y,z=vcomp(v) #after this, x, y, and z are each numpy arrays of shape (24,50) """ if maxlen is None and m is not None: maxlen = m if minlen is None and m is not None: minlen = m ndStack = len(v.shape) > 2 efflen = v.shape[-2 if ndStack else 0] if maxlen is not None and maxlen < efflen: efflen = maxlen result = tuple([v[..., i, :] if ndStack else v[i, ...] for i in range(efflen)]) if minlen is not None and minlen > efflen: result = result + (0,) * (minlen - efflen) return result
cbc846be6a07082711e5c9faa191181c8cdc75f8
698,736