content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def node_region(patched_ast_node): """Get the region of a patched ast node""" return patched_ast_node.region
870fc43f1b635d5b5eb5e5a4ffa80ab322af168d
18,818
def source2id(vocab, text): """Convert a source to ids""" sequence_length = 7 return [vocab.get(word, vocab['<UNK>']) for word in text] \ + [vocab['<PAD>']] * (sequence_length - len(text))
682eda8d67dd3a88e7c6c61abef754b1669d3760
18,819
def get_NM_number(tags): """ Return the number of mismatches found in the NM tag Arguments: - `tags`: The tags argument """ for tpair in tags: if tpair[0] == 'NM': return tpair[1] return 10
ce192b22ee73bcb71d85a3ff22dd8cd005ca2633
18,821
def _dense_q_minus_grad(q, dense_adjacency_matrix, s, alpha): """Computes q-grad on dense arguments.""" return (1. - alpha) * dense_adjacency_matrix.T @ q - alpha * s
9ac5c097eeef66f203e06368db7ef7e0778cb847
18,822
def _extract_target(data, target_col): """Removes the target column from a data frame, returns the target col and a new data frame minus the target.""" target = data[target_col] train_df = data.copy() del train_df[target_col] return target, train_df
726a76e254c1d6dc120b590d9d7128986ddc9227
18,823
def score_compressed_state(total, trumps): """ Calculate score from compressed state """ score = 0 for num_trumps in range(trumps, -1, -1): score = total + num_trumps * 10 if score <= 31: break return score
0d6652174c2f3f409431c15f38874b8057b94921
18,824
def site(client): """Returns a Site object.""" return client.sites.post({'name': 'Foo', 'description': 'Foo site.'})
f7f23d95c151f14b8a0782b7fc16e0172a3dfe74
18,827
from typing import List def _collapse_data(table: List[List[List[str]]]) -> List[List[str]]: """combine data rows to return a simple list of lists""" result: List[List[str]] = [] for row in table: new_row: List[str] = [] for line in row: if new_row: for i, item in enumerate(line): new_row[i] = (new_row[i] + '\n' + item).strip() else: new_row = line result.append(new_row) return result
117a6446c006cbd232aad2e018be3fe336faa550
18,828
def title_format(name): """" This customized title function transform names in title format while keeping prepositions like "de" and "das" in lowercase """ person_names = [k.title() if len(k)>3 else k for k in name.split(' ')] return ' '.join(person_names)
e9d0501ddfc0714f9ca4acba5b1bdf3f379b0243
18,831
from typing import List from typing import Dict import collections def merge_series(list_of_metas: List[Dict]) -> Dict: """Merge series with the same SeriesUID. """ result = collections.defaultdict(list) for metas in list_of_metas: result[metas["SeriesInstanceUID"]].append(metas) return result
42b23b504c6e973913f93da2e86f71f8ea9baec2
18,833
def _rlimit_min(one_val, nother_val): """Returns the more stringent rlimit value. -1 means no limit.""" if one_val < 0 or nother_val < 0 : return max(one_val, nother_val) else: return min(one_val, nother_val)
ba286aa6b36a53d691a13b775bbbcfd8722c8b8e
18,834
def nspath_eval(xpath, nsmap): """Return an etree friendly xpath""" out = [] for chunks in xpath.split('/'): namespace, element = chunks.split(':') out.append('{%s}%s' % (nsmap[namespace], element)) return '/'.join(out)
1e859c10314d3381a9186f1118bc692887e35ffe
18,835
from typing import Dict from typing import Any from typing import Callable import argparse def arg_map(**mapping: Dict[Any, Any]) -> Callable[[Any], Any]: """ Create function to map arguments using given `mapping`. :param mapping: mapping between input arguments and their desired values. :return: function to perform argument mapping. """ def parse_argument(arg: Any) -> Any: if arg in mapping: return mapping[arg] else: msg = "invalid choice: {!r} (choose from {})" choices = ", ".join( sorted(repr(choice) for choice in mapping.keys()) ) raise argparse.ArgumentTypeError(msg.format(arg, choices)) return parse_argument
d24d94be7feafc86d2842fd94a961a2257deefe9
18,836
def multi_find(s, r): """ Internal function used to decode the Formants file generated by Praat. """ s_len = len(s) r_len = len(r) _complete = [] if s_len < r_len: n = -1 else: for i in range(s_len): # search for r in s until not enough characters are left if s[i:i + r_len] == r: _complete.append(i) else: i = i + 1 return(_complete)
c5cb9bbc8ed629b68ac1d7d6cbb2c887c8de1070
18,837
def select_sign(pathways, sign): """Selects all pathways depending on the overall sign """ selected = [] pos = False if sign > 0.0: pos = True for pway in pathways: if pos: if pway.sign > 0.0: selected.append(pway) else: if pway.sign < 0.0: selected.append(pway) return selected
b936ee7c18034ab869f26036f01e7c2d1399ff2a
18,838
def _get_fields(line, delimiter=' ', remove_fields=['', '\n']): """Gets the fields from a line delimited by delimiter and without entries in remove_fields Parameters ---------- line : str Line to find the fields delimiter : str Text separating fields in string remove_fields : list of str Fields to delete Returns ------- fields : list of str Fields of the line """ for remove_field in remove_fields: line = line.replace(remove_field, '') all_fields = line.split(delimiter) fields = [] for field in all_fields: if all([field != remove_field for remove_field in remove_fields]): # Add field if it does not match any of the remove_fields fields.append(field) return fields
adfa2e6a1be18049b7b956bef7a0f3c121120641
18,839
def dict_alert_msg(form_is_valid, alert_title, alert_msg, alert_type): """ Function to call internal alert message to the user with the required paramaters: form_is_valid[True/False all small letters for json format], alert_title='string', alert_msg='string', alert_type='success, error, warning, info' """ data = { 'form_is_valid': form_is_valid, 'alert_title': alert_title, 'alert_msg': alert_msg, 'alert_type': alert_type } return data
f9f82c9b809be2ad1d6d872d6e00610a5ebc3493
18,840
def _count_openephys_sessions(filename): """Open-ephys can have multiple sessions. We count how many files are in the format: - Continuous_Data.openephys - Continuous_Data_2.openephys - Continuous_Data_3.openephys """ sessions = [] for f in filename.glob('*.openephys'): session_number = f.stem[16:] if session_number == '': sessions.append(1) else: sessions.append(int(session_number)) return sorted(sessions)
2828c5e31cfeafbec51e6ed46994e8a6deeaac5c
18,841
def check_no_overlap(op_start_times:list, machines_ops_map:dict, processing_time:dict): """ Check if the solution violates the no overlap constraint. Returns True if the constraint is violated. Keyword arguments: op_start_times (list): Start times for the operations machines_ops_map(dict): Mapping of machines to operations processing_time (dict): Operation processing times """ pvals = list(processing_time.values()) # For each machine for ops in machines_ops_map.values(): machine_start_times = [op_start_times[i] for i in ops] machine_pvals = [pvals[i] for i in ops] # Two operations start at the same time on the same machine if len(machine_start_times) != len(set(machine_start_times)): return True # There is overlap in the runtimes of two operations assigned to the same machine machine_start_times, machine_pvals = zip(*sorted(zip(machine_start_times, machine_pvals))) for i in range(len(machine_pvals) - 1): if machine_start_times[i] + machine_pvals[i] > machine_start_times[i+1]: return True return False
90736c066987466adcf641f9999542466c17fc8e
18,842
import os def my_loc() -> str: """ Since moving this to a library file... now it gives an undesired path. """ return os.path.dirname(os.path.realpath(__file__)).replace('lib', '')
0f75605aa265c7fffe87bdeb1249c41cc3709bb3
18,843
def p_stat_repeat(p): """stat : stat NEWLINE stat""" p[0] = p[1] + p[3] return p
b0a7332fee9cee28debaccc0b6ca948b019c4633
18,844
def filter_for_celltypes(x, y, celltypes): """ Filter data for cells belonging to specified celltypes :param x: :param y: :param celltypes: :return: """ cts = list(y['Celltype']) keep = [elem in celltypes for elem in cts] x = x.loc[keep, :] y = y.loc[keep, :] return (x, y)
e4cf59cad4903d128eedace71546777dce8dae46
18,846
def leap_year(year, calendar="standard"): """Determine if year is a leap year. Args: year (int): Year to assess. calendar (optional str): Calendar type. Returns: bool: True if year is a leap year. """ leap = False if (calendar in ["standard", "gregorian", "proleptic_gregorian", "julian"]) and ( year % 4 == 0 ): leap = True if ( (calendar == "proleptic_gregorian") and (year % 100 == 0) and (year % 400 != 0) ): leap = False elif ( (calendar in ["standard", "gregorian"]) and (year % 100 == 0) and (year % 400 != 0) and (year < 1583) ): leap = False return leap
d1c569747082e5f75660cda662daab5f4d77269b
18,848
def dosya_oku(dosyaIsmi): """ Okunacak dosyanın konumu verilince satirlari dondurur. @param string dosyaIsmi : Dosya konumu @return list : Dosyanin icindeki satirlar """ dosyaOkuyucu = open(dosyaIsmi,'r',encoding='utf8',errors="ignore") satirlar = dosyaOkuyucu.readlines() return satirlar dosyaOkuyucu.close()
152cc87a02dd6ed25a0cb6f3a43913506e0bdc69
18,849
def index(): """ This is the display view. """ return "gutn tag!"
a1401ffa3635d613e9b02ae04834823a9a2ecaee
18,850
import requests def shock_download(url, token): """ Download data from a Shock node. Parameters ---------- url : str URL to Shock node token : str Authentication token for Patric web services Returns ------- str Data from Shock node """ response = requests.get(url + '?download', headers={'Authorization': 'OAuth ' + token}, verify=False) if response.status_code != requests.codes.OK: response.raise_for_status() return response.text
7a2e9855ca807892cef16af370755eac3644f9f5
18,851
import time def date_filename(): """Provides an html file name based on the current date and time. Returns: str -- File name """ return time.strftime('%Y%m%d%H%M%S') + '.html'
72012a6cef29567a9fb4e1642db29ab720904aa5
18,852
def trim_patch_boundary(img, patch_boundary, h, w, pH, sH, pW, sW, sf): """ Remove both rows and columns to reduce edge effect around patch edges. """ # trim rows if pH * sH >= patch_boundary: img = img[:, patch_boundary * sf:, :, :] if (pH + 1) * sH + patch_boundary <= h: img = img[:, :-patch_boundary * sf, :, :] # trim columns if pW * sW >= patch_boundary: img = img[:, :, patch_boundary * sf:, :] if (pW + 1) * sW + patch_boundary <= w: img = img[:, :, :-patch_boundary * sf, :] return img
8ebdad74d227098a286a16ed9678ca24b992e402
18,853
def read_all_file(all_file,components=None,verbose=False): """Read in JXP-style .all file in an appropriate manner NOTE: If program breaks in this function, check the all file to see if it is properly formatted. Fills components if inputted Parameters ---------- all_file : str Full path to the .all file components : list, optional List of AbsComponent objects """ assert False # USE LINETOOLS # Read if verbose: print('Reading {:s}'.format(all_file)) names=('Z', 'ion', 'logN', 'sig_logN', 'flag_N', 'flg_inst') # was using flg_clm table = ascii.read(all_file, format='no_header', names=names) # Fill components if components is not None: allZ = np.array([comp.Zion[0] for comp in components]) allion = np.array([comp.Zion[1] for comp in components]) # Loop for row in table: mt = np.where((allZ==row['Z'])&(allion==row['ion']))[0] if len(mt) == 0: pass elif len(mt) == 1: # Fill components[mt[0]].flag_N = row['flag_N'] components[mt[0]].logN = row['logN'] components[mt[0]].sig_logN = row['sig_logN'] else: raise ValueError("Found multiple component matches in read_all_file") # Write return table
e1c1396cc1efde43b2d250bb5870af99fcd2f31c
18,854
def url_add(tag=None, **kwargs): """ Displays a box containing an "add page" entry box and its form If tag is specified, it is the slug of a tag which will be assigned to the URL when it is submitted. """ if tag: tag = tag.slug kwargs["tag"] = tag return kwargs
03e2be366d51c1b0990110fb7e41626ad1a9106c
18,856
import os def find_images_from_tree(path): """ Collect images from a tree with one folder per identity """ print("Searching for images in {}".format(path)) image_files = [] for root, dirs, files in os.walk(path): for name in files: if name.lower().endswith(("jpg", "jpeg", "png", "bmp")): image_files.append(root + os.sep + name) return image_files
f63e83d55ef599a4d52588061710af27d9ee14b0
18,857
def create_html_email_href(email: str) -> str: """ HTML version of an email address :param email: the email address :return: email address for use in an HTML document """ return f'<a href="mailto:{email}">{email}</a>' if email else ""
c72bb76f3b1fc3d30571a879d1b8172d8c4e77cd
18,858
def mock_listdir(dir_map, dir): """ mock os.listdir() """ return dir_map.get(dir, [])
e274b10dcf3ea25538dd712a3cb91ce873d83bdb
18,859
def dummyListener(source, intent): """Sample intent listener Intent listeners (see :any:`registerIntentListener`) should follow this function prototype. The function can handle `intent` and perform appropriate action if desired. If the function handled the intent, it should return True to mark the intent has having been already processed. Consequently, no more callbacks are called for this intent, to avoid it being handled multiple times. If the function handled the intent and returned a "truthy" value, but it did not call :any:`Intent.accept`, the Intent is automatically accepted and the value returned by the function is considered to be the `Intent` result (:any:`Intent.result`). If the function didn't handle the intent, it should return False, so other callbacks have a chance to handle it. :param source: object which sent the intent :type source: QObject :param intent: :type intent: IntentEvent :returns: True if the listener handled the intent, else False :rtype: bool """ return False
c994b08f5d409debe80fb9f8a29380355a190e12
18,860
def compute_velocities(U_100): """Compute the secondary air flow rates (25-150%) based on the 100% airflow rate""" quarter_scale = 0.25 # 25 percent scale half_scale = 0.50 # half scale five_quarter_scale = 1.25 # adding 25% to U six_quarter_scale = 1.5 # adding 50% to U # Surrounding velocities RHS U_25_RHS = U_100*quarter_scale U_50_RHS = U_100*half_scale U_100_RHS = U_100 U_125_RHS = U_100*five_quarter_scale U_150_RHS = U_100*six_quarter_scale # Surrounding velocities LHS U_25_LHS = -1*U_100*quarter_scale U_50_LHS = -1*U_100*half_scale U_100_LHS = -1*U_100 U_125_LHS = -1*U_100*five_quarter_scale U_150_LHS = -1*U_100*six_quarter_scale # format RHS velocities as strings with max length 5 U_25_RHS_str = str(U_25_RHS)[:5] U_50_RHS_str = str(U_50_RHS)[:5] U_100_RHS_str = str(U_100_RHS)[:5] U_125_RHS_str = str(U_125_RHS)[:5] U_150_RHS_str = str(U_150_RHS)[:5] # format LHS velocities as strings with max length 5 U_25_LHS_str = str(U_25_LHS)[:6] U_50_LHS_str = str(U_50_LHS)[:6] U_100_LHS_str = str(U_100_LHS)[:6] U_125_LHS_str = str(U_125_LHS)[:6] U_150_LHS_str = str(U_150_LHS)[:6] return U_25_RHS_str, U_50_RHS_str, U_100_RHS_str, U_125_RHS_str, U_150_RHS_str, U_25_LHS_str, U_50_LHS_str, U_100_LHS_str, U_125_LHS_str, U_150_LHS_str
1a5274100dbb8c316bf7506a8213bcfa66036994
18,861
import sqlite3 def isSqlite3DB(filepath): """ Returns whether file at filepath is a sqlite3 database. Args: filepath (str): The file to check. Returns: bool: Whether the database could be opened and queried. """ try: conn = sqlite3.connect(filepath) conn.execute("pragma schema_version;") except Exception: return False conn.close() return True
54a0c42faea75ffa9d2571b78976db9116c81207
18,862
def _resolve_refs(variables: dict, ref_table: dict) -> dict: """Replaces `ref_table` refs while copying `variables` to the output.""" new_vars = {} for num, kvpair in enumerate(variables.items()): k, v = kvpair new_vars[k] = ref_table[str(v)] return new_vars
d7c3110aa50f061234e9cc9f2c4a225ff4cbba41
18,864
def get_sea_attribute_cmd(seaname): """ Get pvid, pvid_adapter, and virt_adapters from the configured SEA device. Also get the state of the SEA. :param seaname: sea device name :returns: A VIOS command to get the sea adapter's attributes. """ return ("ioscli lsdev -dev %(sea)s -attr pvid,pvid_adapter,virt_adapters;" "ioscli lsdev -type sea | grep %(sea)s" % {'sea': seaname})
18224e14e45b73ff716f4282aaff3e06c4584866
18,867
import numpy as np def calculateNorm(var): """ Calculate polar cap anomalies """ ### Import modules mean = np.nanmean(var,axis=0) anom = (var - mean)/np.nanstd(var,axis=0) print('Completed: Calculate normalized anomalies for polar cap!') return anom
e9e8db2286ced83aa3083722809cd6e11822486f
18,868
def input_vector(): """enter vector Returns: list: vector with args """ return list(map(int, input('Элементы вектора: ').split()))
a50e733aa180ec2877fb9219f9062b7eac7f5475
18,869
import numpy def s2dctmat(nfilt,ncep,freqstep): """Return the 'legacy' not-quite-DCT matrix used by Sphinx""" melcos = numpy.empty((ncep, nfilt), 'double') for i in range(0,ncep): freq = numpy.pi * float(i) / nfilt melcos[i] = numpy.cos(freq * numpy.arange(0.5, float(nfilt)+0.5, 1.0, 'double')) melcos[:,0] = melcos[:,0] * 0.5 return melcos
295aef6530ed350c9ebf90f3d276551aa50f8583
18,870
import sys import pkgutil def _importable_libraries(*libraries): """ Of all traceable_libraries, separate those available and unavailable in current execution path. """ available = [] unavailable = [] for library in libraries: if library in sys.modules or pkgutil.find_loader(library) is not None: available.append(library) else: unavailable.append(library) return available, unavailable
9152ae1ff82fdac1a199b3c8a43c46b2c9402efd
18,871
def askbookingInfoPlaces(): """ This function asks the user for the number of places required in the room. Parameters ---------- places : int Returns ------- places : int Note: You can only pick dates from 01/01/2022, 00 till 12/31/2022, 23" """ while True: try: places = int(input("Please enter the capacity of the room: \n")) except ValueError: # executes if input is not an integer print("Error. Please provide numbers only.") continue else: return places
90630673d4646dfdfffd73bcb5fd1fcc359691b1
18,872
import itertools def groupby(): """Group sorted elements.""" def square(value): """Calculate the square of a number.""" return value**2 sorted_items = sorted(range(-2, 3), key=square) grouped = itertools.groupby(sorted_items, key=square) return dict((key, list(values)) for key, values in grouped)
e8e6ec4a4f34e0745a7f1b2f91cad808908a9587
18,873
import os def get_progress(out_corenlp_dir: str) -> int: """ Get the current progress of the NLP tool :param out_corenlp_dir: reads the output dir and checks how many .json files have been created already :return: length of processed documents """ hits = 0 for fn in os.listdir(out_corenlp_dir): if fn.endswith('.json'): hits += 1 return hits
c59c08b890d33ef4ddd30a0e6c25ac225e32d64a
18,874
def remove_indent(code_block): """ makes sure first line is not indented and removes that much space from the rest of the lines as well :returns: str - the block unindented """ lines = code_block.strip('\n').split('\n') indent = len(lines[0]) - len(lines[0].lstrip()) return '\n'.join([line[indent:] for line in lines])
360af8f8ca15f9f73e3904402f521b67850902be
18,876
def zzx_neg(f): """Negate a polynomial in Z[x]. """ return [ -coeff for coeff in f ]
c813597dc9540c8d85221352da10db894de4aa4c
18,877
import inspect import sys def run_module(module_name_string, args_dict): """Run a module specified via CLI. Args: module_name_string (str): Module name to execute. args_dict (dict): CLI arguments dictionary. Returns: [any type returned by module]: Data returned by module. If any. """ args = list() kwargs = dict() module_signature = inspect.signature(eval(module_name_string)) for module_param_name in module_signature.parameters.keys(): if module_signature.parameters[module_param_name].default is inspect._empty: # if default value is not defined, the parameter is positional and mandatory if module_param_name in args_dict.keys(): args.append(args_dict[module_param_name]) else: sys.exit('ERROR: Mandatory positional parameter {} is missing.'.format(module_param_name)) else: if module_param_name in args_dict.keys(): kwargs.update({ module_param_name: args_dict[module_param_name] }) if module_name_string.startswith('runAM'): # simple way to keep eval() safe return eval(module_name_string)(*args, **kwargs)
45c21cf66f560339019b6b2e224046afaa1148dc
18,878
def check_undirected(graph): """ dict -> boolean Just a sanity check that a graph is in fact undirected. """ for node in graph: for neighbor in graph[node]: if node not in graph[neighbor]: return False return True
3ea285e60ec1e18a6ff641274124d4c9e8dfd95a
18,880
def get_all_indices(n): """get all the row, col indices for an (n, n) array""" return map(list, zip(*[(i, j) for i in range(n) for j in range(n)]))
25db8d87540966afea0d2f3e6ee83397f716576b
18,884
def Initial_T_exponential(process_time, n, m, a): """[generate temperature declining process] Returns: [1D-np.array]: [temperature declining array] """ ret = [] sum = process_time.sum()/(5*n*m) ret.append(sum) # initial temperature b = 1 - 2*a/(n*(n-1)) # declining rate cur = sum while(1): if cur < 1e-3: break cur = cur*b ret.append(cur) return ret
b0f2e39cf4a55ecf449ef90d73981909735af17c
18,885
def _secant(a, b, dfa, dfb): """Returns the secant interpolation for the minimum. The secant method is a technique for finding roots of nonlinear functions. When finding the minimum, one applies the secant method to the derivative of the function. For an arbitrary function and a bounding interval, the secant approximation can produce the next point which is outside the bounding interval. However, with the assumption of opposite slope condtion on the interval [a,b] the new point c is always bracketed by [a,b]. Note that by assumption, f'(a) < 0 and f'(b) > 0. Hence c is a weighted average of a and b and thus always in [a, b]. Args: a: A scalar real `Tensor`. The left end point of the initial interval. b: A scalar real `Tensor`. The right end point of the initial interval. dfa: A scalar real `Tensor`. The derivative of the function at the left end point (i.e. a). dfb: A scalar real `Tensor`. The derivative of the function at the right end point (i.e. b). Returns: approx_minimum: A scalar real `Tensor`. An approximation to the point at which the derivative vanishes. """ return (a * dfb - b * dfa) / (dfb - dfa)
50edfa4d3dfb214e6c3f0db595afe1282cb73f10
18,886
def join_by_dash(name): """ if the name 'Room 1' is given, the output would be 'Room-1' """ return "-".join(name.split())
6819f447c4b2616007fae9f94e427b8df4c57c0b
18,888
def get_q_values(node): """ Return all triples (state, action, q) Parameters ---------- node : Node Initial node. Returns ------- Recursively transverse the tree, and return all triples (state, action, q) """ if node.children == None: return [[node.state_val, node.action_val, node.qVal]] else: q_values = [] for c in node.children: q_values.extend(get_q_values(c)) return q_values
8c3015f3b9c44ec7bfaaaf8833b39d167598f2bc
18,890
import inspect import os def get_importing_module_filename(level=2): """Run this during the initialization of a module to return the absolute pathname of the module that it is being imported from.""" module_filename = inspect.getframeinfo( inspect.getouterframes(inspect.currentframe())[level][0])[0] return os.path.abspath(module_filename)
2d68e2e24af0bd6367ce95e13c496743208ad356
18,892
def change_linewidth(ax, lw=3): """change linewidth for each line plot in given axis Parameters ---------- ax : mpl.axis axis to change fontsize of lw : float, optional [description], by default 3 Returns ------- ax mpl.axis Examples -------- fig, ax = plt.subplots(1, 1) x = np.arange(10) y = np.arange(10) ax.plot(x, y, x + 1, y, x -1, y ) change_linewidth(ax, 3) """ for item in ax.lines: item.set_linewidth(lw) return ax
b92294878351b6f251c99f43295a8e8e56995cd1
18,893
import torch def _category_weights(): """ Sum up the number of each category """ category_sizes = torch.Tensor( [[131., 199., 177., 157., 3446., 1689., 14838.,186.], [379., 366., 1705., 1297., 9746., 873., 1475.,4982.], [232., 257., 241., 3422., 126., 11225., 5105., 215.], [137., 115., 142., 3192., 3066., 10036., 3983., 152.], [781., 594., 1183., 4753., 664., 9038., 1394., 2416.], [140., 214., 150., 4644., 11075., 1974., 182., 2444.], [169., 133., 150., 104., 2090., 12701., 2157., 3319.]]) weight = category_sizes.sum(1, keepdim=True) / category_sizes return weight
6a18c71bd42eff90b5ab2f8d8a2d0fe9fdbf6086
18,895
def find_upper_bound(x): """ find_upper_bound returns an integer n with n >= len(x), without using the len() function. The complexity is O(log(len(x)). """ n = 1 while True: try: v = x[n] except IndexError: return n n *= 2
4251791d0e270f29bc9f4d26bbf81c2292ffa50c
18,896
def yes_or_no(question): """Asks a yes or no question, and captures input. Blank input is interpreted as Y.""" reply = str(input(question+' (Y/n): ')).capitalize().strip() if reply == "": #pylint: disable=no-else-return return True elif reply[0] == 'Y': return True elif reply[0] == 'N': return False return yes_or_no("Please enter Y or N.")
f441e39b85dc7407bedce5c120aa9839f0a3064f
18,897
def get_material_nodes_by_type(material, bl_idname): """ Find material nodes with bl_idname type name """ return (node for node in material.node_tree.nodes if node.bl_idname == bl_idname)
4406bf09a0d44ecb29e917dc7ff7d78b179cfbf8
18,898
import os def getFileExtension(filename): """Return the extension part of a filename, sans period, in lowercase.""" return os.path.splitext(filename)[1][1:].strip().lower()
d2fbedfe31cc9baec378d8332d4386c3fe2e7409
18,899
import requests def _good_response(status_code): """ Determines what status codes represent a good response from an API call. """ return status_code == requests.codes.ok
dd2aef136390640bae65ce41172b15da6e8fb078
18,900
def matching(tr_synth, tr_seis): """ matching zeroes all values of the seismic trace `tr_seis` outside the limits of the synthetic trace `tr_synth` Parameters ---------- tr_synth : numpy.array The synthetic trace tr_seis : numpy.array The seismic trace Returns ------- tr_seis_new: numpy.array Returns the new seismic trace (with zeros outside the area of comparison) """ tr_seis_new = tr_seis.copy() i = 0 while tr_synth[i] == 0 and i < len(tr_seis): tr_seis_new[i] = 0 i += 1 i = len(tr_seis)-1 while tr_synth[i] == 0 and i >= 0: tr_seis_new[i] = 0 i -= 1 return tr_seis_new
55527b1302fd099e88353ea7c1ee447633a5a494
18,904
import random def generate_trace_id(): """ Create a random number formatted as a hexadecimal string, suitable for use as a trace identifier. """ return f"{random.randint(0, 2 ** 128 - 1):x}"
ffab4ce96b03903f70ba831dadb069aa4ab9c312
18,905
import os import re def get_current_container_id(read_from='/proc/self/cgroup'): """ Get the ID of the container the application is currently running in, otherwise return `None` if not running in a container. This is a best-effort guess, based on cgroups. :param read_from: the cgroups file to read from (default: `/proc/self/cgroup`) """ if not os.path.exists(read_from): return with open(read_from, 'r') as cgroup: for line in cgroup: if re.match('.*/[0-9a-f]{64}$', line.strip()): return re.sub('.*/([0-9a-f]{64})$', '\\1', line.strip())
5a536fc31f7abd2a5d4e081bceaef13aebbf5561
18,908
import numpy as np def _convert_to_colorscale(cmap): """ Return a colour scale list, as converted from a colour map. PURPOSE: This is a helper function used to convert a colour map into a colour scale list, as used by the Plotly colorscale parameter. DESIGN: Returned format: [(0.00, u'#f7fbff'), (0.33, u'#abd0e6'), (0.66, u'#3787c0'), (1.00, u'#08306b')] DEPENDENCIES: - numpy """ return [i for i in zip(np.linspace(0, 1, num=len(cmap)), cmap)]
3d1aeb4bc02fb9553c27708596e8bafe4cd3b084
18,910
def robot_move(row, col, k): """ :param row: row of matrix, m :param col: col of matrix, n :param k: bit sum limit :return: num of blocks can reach """ def bit_sum(num): """ calculate bit sum :param num: num :return: bit sum """ b_sum = 0 while num: b_sum += num % 10 num = num // 10 return b_sum if row < 1 or col < 1: raise Exception('Invalid Matrix') to_check = [(0,0)] next_check = set() block_count = 0 while to_check: i_cur, j_cur = to_check.pop(0) block_count += 1 if j_cur + 1 < col and bit_sum(i_cur) + bit_sum(j_cur + 1) <= k: next_check.add((i_cur, j_cur+1)) if i_cur + 1 < row and bit_sum(i_cur + 1) + bit_sum(j_cur) <= k: next_check.add((i_cur + 1, j_cur)) if not to_check: to_check.extend(list(next_check)) next_check = set() return block_count
edf463b050deffd9de7c42816d672c7a3edc24b4
18,911
def booleanize_if_possible(sample): """Boolean-ize truthy/falsey strings.""" if sample.lower() in ['true', 'yes', 'on', 1]: sample = True elif sample.lower() in ['false', 'no', 'off', 0]: sample = False return sample
10ffb3481f15a7548512f01266027977a61a7e13
18,912
def normalize_feature_for_target_col(in_df, str_norm_target_col, abs_max_num): """ normalize designated column e.g. normalize col with max "60" [20, 30, 70, 65, -90] -> [0.333..., 0.5, 1.0, 1.0, -1.0] :param in_df : pandas.DataFrame, :param str_norm_target_col : string, :param abs_max_num : float, absolute max number (normalize col with this number) :return out_df : pandas.DataFrame """ assert(abs_max_num > 0), ( 'Please set positive number in "abs_max_num".' ) # find target exceeding abs_max_num and overwrite the num with abs_max_num df = in_df # positive cond = (df.loc[:, str_norm_target_col] >= abs_max_num) df.loc[cond, str_norm_target_col] = abs_max_num # negative cond = (df.loc[:, str_norm_target_col] <= -abs_max_num) df.loc[cond, str_norm_target_col] = -abs_max_num # normalization df.loc[:, str_norm_target_col] = df.loc[:, str_norm_target_col] / abs_max_num out_df = df return out_df
63eaa8065d6485d6bdacb850d318ed993225204f
18,913
import collections import operator def dnsbl_hit_count(log_data): """Counts how many hosts were found in each dnsbl.""" y = collections.defaultdict(int) for v in log_data.values(): for bl in v: y[bl] += 1 return sorted(y.items(), key=operator.itemgetter(1), reverse=True)
6a03054b7f50b1cbb27e58e4edc1212bb7cc2abf
18,915
def resize_lane(lane, x_ratio, y_ratio): """Resize the coordinate of a lane accroding image resize ratio. :param lane: the lane need to be resized :type lane: a list of dicts :param x_ratio: correspond image resize ratio in x axes. :type x_ratio: float :param y_ratio: correspond image resize ratio in y axes. :type y_ratio: float :return: resized lane :rtype: list """ return [{"x": float(p['x']) / x_ratio, "y": float(p['y']) / y_ratio} for p in lane]
23dacea6ac9820b73fad124433630a991ea14d37
18,916
def clean_up_database_str(string_to_transform): """Removes unneded chars from the string, retrieved from the database Args: string_to_transform: the string that should be parsed Returns: The fixed string """ return string_to_transform[3:-3]
45bc809f9e60efad69a4df0d6937cbf542966a95
18,917
from pathlib import Path def unique_filepath(filepath): """Generate a unique filename to ensure existing files are not overwritten. :param filepath: File-name, including path, to the file location :type filepath: str :return: Unique filepath (filename and path to file location) :rtype: str """ filepath = Path(filepath) suffix = filepath.suffix name = filepath.name[: -len(filepath.suffix)] path = filepath.parent count = 0 while filepath.is_file(): count += 1 filepath = Path("{}/{}_{}{}".format(path, name, count, suffix)) return filepath
7ca638b1cd0f2ac4f0a9e720e659179da3e03e6a
18,918
def check_user_mask(input,verbose=False): """ Checks user-defined soft constraints by ensuring that input is a list of strings """ output = [] if not input: ouput = [] return output if (not isinstance(input,(list,tuple))): raise ValueError("\n User mask must be in the form of a list of tuples, each of length 2. \n") for con in input: if (not isinstance(con,(list,tuple))) or (len(con)!=2) or (not isinstance(con[0],(int,float))) or (not isinstance(con[1],(int,float))) or (con[1]<con[0]): raise ValueError("\n User mask must be in the form of a list of tuples, each of length 2. \n") else: output.append(con) return output
d36b79d7f12dade7f56aaafbcdec642b235e03c1
18,919
def shift(df, shift_map=None): """shift the dataframe Args: df (dataframe): origin dataframe shift_map (dict, optional): mapping of shift for columns of dataframe. Defaults to None. Returns: dataframe: shiftted dataframe """ if shift_map is not None: df = df.copy() min_lag = min(0, min(shift_map.values())) max_lead = max(0, max(shift_map.values())) df = df.reindex(df.index.union(df.index.shift(min_lag)).union(df.index.shift(max_lead))) for k, v in shift_map.items(): df[k] = df[k].shift(v) return df else: return df
41a9948fe90a00f42a02a85a0f5c308b2c03954b
18,920
def merge_spending_diff(df_spending_current, df_spending_2013, df_spending_difference): """ Merge spending by brand and adverse events """ df_spending_2013 = df_spending_2013.drop(['average_spending_per_claim', 'average_spending_per_beneficiary', 'manufacturer'], axis=1) df_spending_tot = df_spending_2013.merge(df_spending_difference, left_index =True, right_index =True, how = 'inner') df_spending_current.rename(columns={'average_spending_per_dosage_unit': 'current_avg_spending_per_dose'}, inplace=True) df_spending_tot['current_avg_spending_per_dose'] = df_spending_current['current_avg_spending_per_dose'] return df_spending_tot
5924dbccd278cbf9a1f5cd0a12dcef4bb293b895
18,921
def _attrfilter(label, value, expr): """ Build an `attrfilter(<label>, <value>, <expr>)` type query expression. `expr` is a query expression of any supported type. """ return "attrfilter({label}, {value}, {expr})".format( label = label, value = value, expr = expr, )
38dfc19bf043a9c327665c324f96d7f205ea6416
18,922
def isEmbeddedInOtherArc(arc, arcs, startIndex=0, stopIndex=-1): """ Check whether an arc is embedded within another arc between two indices. """ isEmbedded = False testArcs = [] for testArc in arcs: if (testArc[0] >= startIndex and testArc[-1] <= stopIndex and testArc != arc): testArcs.append(testArc) for testArc in testArcs: if arc[0] >= testArc[0] and arc[-1] <= testArc[-1]: isEmbedded = True return isEmbedded
dec7d73b98e13b5f43b2aa62897dfea62afa5154
18,923
def genererate_principal(primary: str, instance: str, realm: str) -> str: """ Generate a Kerberos principal from the three different components. """ if instance: principal = "{}/{}".format(primary, instance) else: principal = primary return "{}@{}".format(principal, realm.upper())
a39055e2029f044ce50107cb58860465491a5333
18,926
def foo(a, b): """function docstring""" return a + b
ca70214cc351b9807d472a3e94ff6291e6a054b9
18,927
import re def check_password_complexity(email, password): """ Check that a password meets the minimum complexity requirements, returning True if the requirements are satisfied, False otherwise. The rules are: - minimum length 10 - at least one lowercase letter - at least one uppercase letter - at least one numeric character - at least one symbol character - a maximum of 3 consecutive characters from the email address :param email: the user's email address :param password: the input plain-text password :return: boolean """ if len(password) < 10: return False if not re.search(r'[a-z]', password): return False if not re.search(r'[A-Z]', password): return False if not re.search(r'[0-9]', password): return False if not re.search(r'''[`~!@#$%^&*()\-=_+\[\]{}\\|;:'",.<>/?]''', password): return False for i in range(len(email) - 3): if email[i:(i + 4)] in password: return False return True
16bd2d99777a7d0764ce3b697b1c3319c60ccf87
18,928
def autoname(index, sizes): """ Given an index and list of sizes, return a name for the layer. >>> autoname(0, sizes=4) 'input' >>> autoname(1, sizes=4) 'hidden1' >>> autoname(2, sizes=4) 'hidden2' >>> autoname(3, sizes=4) 'output' """ if index == 0: n = "input" elif index == sizes - 1: n = "output" elif sizes == 3: n = "hidden" else: n = "hidden%d" % index return n
c7b426f7b3865472e64b84cab4ddff003cd47576
18,933
def _standardize(dataframe): """Transform features by centering the distribution of the data on the value 0 and the standard deviation to the value 1. The transformation is given by: scaled_value = (value - mean) / standard deviation Parameters ---------- dataframe : pandas.DataFrame The data frame to be used for EDA. Returns ------- res : pandas.core.frame.DataFrame Scaled dataset """ res = dataframe.copy() for feature_name in dataframe.columns: mean = dataframe[feature_name].mean() stdev = dataframe[feature_name].std() res[feature_name] = (dataframe[feature_name] - mean) / stdev return res
8db61585170223056e176e8b444a33a785cec591
18,934
def deform_conv_openvino(ctx, g, input, offset, weight, stride, padding, dilation, groups, deform_groups, bias=False, im2col_step=32): """Rewrite symbolic function for OpenVINO backend.""" assert not bias, 'The "bias" parameter should be False.' assert groups == 1, 'The "groups" parameter should be 1.' kh, kw = weight.type().sizes()[2:] domain = 'org.openvinotoolkit' op_name = 'DeformableConv2D' return g.op( f'{domain}::{op_name}', input, offset, weight, strides_i=stride, pads_i=[p for pair in zip(padding, padding) for p in pair], dilations_i=dilation, groups_i=groups, deformable_groups_i=deform_groups, kernel_shape_i=[kh, kw])
226b1af110dd621710ce7eccbc4ad8f51e79f1db
18,935
import os def from_env(env_var, default): """ Gets value from envrionment variable or uses default Args: env_var: name of envrionment variable default: the default value """ new = os.environ.get(env_var) if new: return new else: return default
b9e1b248ff690bf2d6e1e65cfedb3a05b3a09e8f
18,936
def issues_data(record): """Retrieve issues data from record.""" total = int(record["total_files"]) issues = int(record["files_with_issues"]) correct = total - issues return total, issues, correct
9ff63711f50ef7df1c274d93eec3bd5780d2337d
18,937
import typing import random def shuffle_dependent_lists(*lists: typing.Iterable): """Shuffle multiple lists, but keep the dependency between them""" tmp = list(zip(*lists)) # Seed the random generator so results are consistent between runs random.Random(123).shuffle(tmp) return zip(*tmp)
44727399539a3864b19f7271d9f6506331a24d6d
18,938
from typing import Dict from typing import List def systemd_run( cpus: int = 4, memory_gigabytes: int = 8, env: Dict[str, str] = {} ) -> List[str]: """ Since we limit memory inside our VM we also limit the number of CPUs for the benchmark """ assert memory_gigabytes >= 1 # if 0 this is an empty string, which means no restrictions mask = ",".join(map(str, range(cpus))) high_mem = (memory_gigabytes - 0.5) * 1000 cmd = [ "systemd-run", "--pty", "--wait", "--collect", "-p", f"MemoryHigh={high_mem}M", "-p", f"MemoryMax={memory_gigabytes}G", "-p", f"AllowedCPUs={mask}", ] for k, v in env.items(): cmd.append(f"--setenv={k}={v}") cmd.append("--") return cmd
7619f0ac600b0687182633b0bcc9181e40fa432a
18,939
import inspect def _wrapped_fn_argnames(fun): """Returns list of argnames of a (possibly wrapped) function.""" return tuple(inspect.signature(fun).parameters)
d68d744051c45c5992e700f06d91121af8738486
18,940
import logging def get_log(log_id): """ Return a logging object set to the given id. """ # print('¤'*100) # print('¤'*100) # print('¤'*100) # print(logging.Logger.manager.loggerDict.keys()) # print('¤'*100) for item in logging.Logger.manager.loggerDict.keys(): # print('{} _ {}'.format(log_id, item)) if log_id in item: log_id = item break return logging.getLogger(log_id)
9533e2d35c487331234df40e93c0e50b6b1f4960
18,942
def modify_cert(conf): """This function modifies each of the certs (ca cert, client cert and client key) as a single line string to make it compatible with Grafana. """ fpd = open(conf["trustFile"], 'r') lines = fpd.readlines() tls_ca_cert = "\\n".join([line.strip() for line in lines]) fpd = open(conf["certFile"], 'r') lines = fpd.readlines() tls_client_cert = "\\n".join([line.strip() for line in lines]) fpd = open(conf["keyFile"], 'r') lines = fpd.readlines() tls_client_key = "\\n".join([line.strip() for line in lines]) cert = {} cert['tls_ca_cert'] = tls_ca_cert cert['tls_client_cert'] = tls_client_cert cert['tls_client_key'] = tls_client_key return cert
18a8a8628371d02874cfd5146bb7191eede3de91
18,944
import os import re def get_scenarios(fixtures_path, in_ext='yaml', out_ext='xml'): """Returns a list of scenarios, each scenario being described by two parameters (yaml and xml filenames by default). - content of the fixture output file (aka expected) """ scenarios = [] files = os.listdir(fixtures_path) input_files = [f for f in files if re.match(r'.*\.{0}$'.format(in_ext), f)] for input_filename in input_files: output_candidate = re.sub(r'\.{0}$'.format(in_ext), '.{0}'.format(out_ext), input_filename) # Make sure the input file has a output counterpart if output_candidate not in files: raise Exception( "No {0} file named '{1}' to match {2} file '{3}'" .format(out_ext.upper(), output_candidate, in_ext.upper(), input_filename)) conf_candidate = re.sub(r'\.yaml$', '.conf', input_filename) # If present, add the configuration file if conf_candidate not in files: conf_candidate = None scenarios.append((input_filename, { 'in_filename': input_filename, 'out_filename': output_candidate, 'conf_filename': conf_candidate, })) return scenarios
6b78bef22611bc41eeb3781e13d6f34ab196b637
18,945
def clean_sql_statement(original: str) -> str: """ Cleans up SQL statements so that they end with a semicolon and don't have any leading or trailing whitespace """ clean = original.strip() if not clean.endswith(";"): clean = clean + ";" return clean
cf8bc73da26cd4cad363b8560170a37c6d5228de
18,946
def solution(N, A): """ This problem took me a good while to solve. The problem in itself is not hard, but the description is not very clear. I had to read it several times and even then, it took me a good few tries until I realised what it was asking me to do. If I had been given this task in a ticket, in all honesty I probably would have contacted the owner directly to ask for clarification... Anyway I digress... The solution is fairly straightforward in itself: to get the time complexity down to the required level, simply keep track of the both the counter X and max counter while you iterate through A. After that we can flatten the counter list and return it. Complexity here would be O(len(A) + N) """ # initialise variables to keep track of the counters counters = [0] * N counter_x, max_counter = 0, 0 for val in A: # increase(X) if 1 <= val <= N: # increase counter X by 1 counters[val - 1] = max(counters[val - 1], counter_x) + 1 # update max counter max_counter = max(counters[val - 1], max_counter) # update counter to current max else: counter_x = max_counter # use list comprehension to re-calculate each counter return [max(val, counter_x) for val in counters]
e70e6b24d566d93cf6dd58004c7668856a9f1cc8
18,947
def sort_keypoints(kps): """ Sort a list of cv2.KeyPoint based on their response """ responses = [kp.response for kp in kps] indices = range(len(responses)) indices = sorted(indices, key=lambda i: responses[i], reverse=True) return [kps[i] for i in indices]
7b1cc49498571715b2715118fb3887384d4e386c
18,948
def render_revert_function(autogen_context, op): """ Collect the function definition currently live in the database and use its definition as the downgrade revert target """ target = op.target autogen_context.imports.add(target.render_import_statement()) context = autogen_context engine = context.connection.engine with engine.connect() as connection: db_target = op.target.get_database_definition(connection) variable_name = db_target.to_variable_name() return db_target.render_self_for_migration() + f"op.replace_entity({variable_name})"
91f36487b4871141c69737cb35960bc3b87f5f99
18,949
import base64 import os def gen_private_key(): """Generate 32 byte random private key""" return base64.urlsafe_b64encode(os.urandom(32))
08e1558897480d8aac74c115f1138816775d380e
18,950
def normalize_none(value): """ Normalize a none string value to a None. """ if isinstance(value, None.__class__): return value if isinstance(value, str): if value.lower().strip() == "none": return None raise ValueError("Cannot convert {} to None".format(value))
7ed3de84dfa0c1c0cf3b1013eab2d978084769b9
18,951
def check_result_size(result: dict, parameters: dict) -> dict: """Returns the results according to the result_size. Only edits the targeted results, e.g. for a drug-search, only the drugs will be edited. :param dict result: Dictionary of the result. :param dict parameters: Dictionary of the task parameters. :return: Dictionary of the results, according to the result_size. """ target = parameters["target"] result_size = parameters["resultSize"] algorithm = ["algorithm"] drugs = result["drugs"] genes = result["genes"] if target == "drug": if len(drugs) <= result_size: return {"drugs": drugs, "genes": genes} if len(drugs) > result_size: sorted_drugs_list = sorted(drugs.values(), key=lambda item: item['score']) if algorithm != "proximity": # drugs are sorted from low to high score # usually the highest scored drugs are wanted # so sorted_drugs is reversed # only with the proximity algorithm # the lowest scored drugs are wanted sorted_drugs_list.reverse() resized_drugs = {} for drug in sorted_drugs_list[0:result_size]: label = drug["label"] resized_drugs[label] = drug drug_filtered_genes = {} for gene, detail in genes.items(): old_edges = detail["has_edges_to"] new_edges = [] for edge in old_edges: if edge in resized_drugs.keys(): new_edges.append(edge) drug_filtered_genes[gene] = { **detail, "has_edges_to": new_edges } all_edges = [] for _, gene in drug_filtered_genes.items(): for e in gene["has_edges_to"]: all_edges.append(e) filtered_genes = {} for gene, detail in drug_filtered_genes.items(): if detail["is_seed"] or detail["has_edges_to"] or gene in all_edges: filtered_genes[gene] = detail return {"drugs": resized_drugs, "genes": filtered_genes} if target == "drug-target": resized_genes = {} non_seed = [] for gene, detail in genes.items(): if detail["is_seed"]: resized_genes[gene] = detail else: non_seed.append(detail) if len(non_seed) <= result_size: for gene in non_seed: symbol = gene["symbol"] resized_genes[symbol] = gene if len(non_seed) > result_size: sorted_genes = sorted(non_seed, key=lambda item: item['score']) sorted_genes.reverse() for i in range(result_size): symbol = sorted_genes[i]["symbol"] resized_genes[symbol] = sorted_genes[i] for gene, detail in resized_genes.items(): old_edges = detail["has_edges_to"] new_edges = [] for edge in old_edges: if edge in resized_genes.keys(): new_edges.append(edge) resized_genes[gene]["has_edges_to"] = new_edges return {"drugs": drugs, "genes": resized_genes} return {"drugs": drugs, "genes": genes}
0a06cfb2e23ce4505291eb20e8f71d66251fae16
18,957
def aumento_salarial(salario, porcentagem): """ Recebe um salário e sua porcentagem de aumento, e retorna o novo salário""" novosalario = salario + (salario * porcentagem / 100) return round (novosalario, 2)
7578f69bf486ba58374b70be673c1453694b3a72
18,958