content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def compress_dataframe_time_interval(processed_df, interval): """ Resamples dataframe according to time interval. If data is originally in 1 minute intervals the number of rows can be reduced by making the interval 15 minutes. To maintain data quality, an average is taken when compressing the dataframe. Args: processed_df: Pandas dataframe containing a "Time" column with date ranges interval: Integer representing the new date range interval for the compressed dataframe Returns: Pandas dataframe with compressed time interval """ resampled_df = processed_df.resample('{}min'.format(interval), on='Time').mean() return resampled_df
ffbb35719e33f445ba4b5c91acf8a069cd4902a6
700,277
def iseast(bb1, bb2, north_vector=[0,1,0]): """ Returns True if bb1 is east of bb2 For obj1 to be east of obj2 if we assume a north_vector of [0,1,0] - The min X of bb1 is greater than the max X of bb2 """ #Currently a North Vector of 0,1,0 (North is in the positive Y direction) #is assumed. At some point this should be updated to allow for non-traditional #North to be taken and to allow for directions based on perspective. if north_vector != [0,1,0]: raise NotImplementedError bb1_min, _ = bb1 _, bb2_max = bb2 x1,y1,z1 = bb1_min x2,y2,z2 = bb2_max return x1 > x2
9764d373d14530fca2d26d8c7855cc0620e14496
700,278
import itertools def concat_list(in_list: list) -> list: """Concatenate a list of list into a single list.""" return list(itertools.chain(*in_list))
5a58e8e1899fce99f8dabe681206507ae8ad4b8c
700,279
def is_sale(line): """Determine whether a given line describes a sale of cattle.""" return len(line) == 5
e4ff4ae2ea7ea14a2975eaf87852eed2fad0abff
700,280
def _is_recipe_fitted(recipe): """Check if a recipe is ready to be used. Fitting a recipe consists in wrapping every values of `fov`, `r`, `c` and `z` in a list (an empty one if necessary). Values for `ext` and `opt` are also initialized. Parameters ---------- recipe : dict Map the images according to their field of view, their round, their channel and their spatial dimensions. Can only contain the keys `pattern`, `fov`, `r`, `c`, `z`, `ext` or `opt`. Returns ------- _ : bool Indicates if the recipe is fitted or not """ # all keys should be initialized in the new recipe, with a list or a string for key in ['fov', 'r', 'c', 'z']: if key not in recipe or not isinstance(recipe[key], list): return False for key in ['ext', 'opt']: if key not in recipe or not isinstance(recipe[key], str): return False if 'pattern' not in recipe or not isinstance(recipe['pattern'], str): return False return True
77e438dd00ac5606c52c88518c6932a09dff75df
700,281
def view_event(user, event): """ Check whether a user may view a specified event. :param User user: :param Event event: :return: bool """ if event is None: return None return user.has_perm("booking.view_hidden_events") or event.visible is True
0aca52c9a60449ab0711a2291c5f12f42c8b3f96
700,282
def parse_filename(fname, return_ext=True, verbose=False): """ Parses `fname` (in BIDS-inspired format) and returns dictionary Parameters ---------- fname : str os os.PathLike Filename to parse return_ext : bool, optional Whether to return extension of `fname` in addition to key-value dict. Default: False verbose : bool, optional Whether to print status messages. Default: False Returns ------- info : dict Key-value pairs extracted from `fname` ext : str Extension of `fname`, only returned if `return_ext=True` """ try: base, *ext = fname.split('.') fname_dict = dict([ pair.split('-') for pair in base.split('_') if pair != 'feature' ]) except ValueError: print('Wrong filename format!') return if verbose: print(fname_dict) if return_ext: return fname_dict, '.'.join(ext) return fname_dict
1512b50fa6d07a0bcbb69831418a935f28abe2d8
700,283
def get_ls(omega_list): """Return the array of the Solar longitude of each OMEGA/MEx observation in omega_list. Parameters ========== omega_list : array of OMEGAdata The input array of OMEGA observations. Returns ======= ls : ndarray The array of the omega_list Ls. """ ls = [] for omega in omega_list: ls.append(omega.ls) return ls
c8be1927a55ff9aac0134d52691b3b2bdd049724
700,284
def run_intcode(memory, noun, verb): """Assign noun and verb then run intcode program on memory.""" memory[1] = noun memory[2] = verb pointer = 0 while True: opcode = memory[pointer] if opcode == 99: return memory[0] param_one = memory[pointer + 1] param_two = memory[pointer + 2] param_three = memory[pointer + 3] if opcode == 1: memory[param_three] = memory[param_one] + memory[param_two] elif opcode == 2: memory[param_three] = memory[param_one] * memory[param_two] pointer += 4
b4f0c6762a9a27021ce1cabd03b950fb8fabcd48
700,285
def fix_term_scope(term): """ 对term的scope进行硬处理 :param term: :return: """ scope = 'realtime' left = term.left if left and left.type == 'func' and left.subtype == 'getvariable' and 'profile' in left.variable[1]: scope = 'profile' # hard code if left and left.subtype == 'setblacklist' and left.check_type == 'USER(register_channel)': scope = 'profile' if left and left.subtype == 'spl': scope = 'profile' term.scope = scope return scope
bf67f5839a402281c2ef943983e857e10d5da627
700,286
import pkg_resources def version(): """ Returns the current version of the CySCS Python wrapper. """ return pkg_resources.get_distribution("cyscs").version
b530c798702dfb62a041a6a9c1fb8ff1dc1e6365
700,288
import random def miller_rabin_primality_testing(n): """Calculates whether n is composite (which is always correct) or prime (which theoretically is incorrect with error probability 4**-k), by applying Miller-Rabin primality testing. For reference and implementation example, see: https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test :param n: Integer to be tested for primality. :type n: int :param k: Number of rounds (witnesses) of Miller-Rabin testing. :type k: int :return: False if the number is composite, True if it's probably prime. :rtype: bool """ bitsize = n.bit_length() # Set number of rounds. if bitsize >= 1536: k = 3 elif bitsize >= 1024: k = 4 elif bitsize >= 512: k = 7 else: # For smaller bitsizes, set arbitrary number of rounds. k = 10 # prevent potential infinite loop when d = 0 if n < 2: return False # Decompose (n - 1) to write it as (2 ** r) * d # While d is even, divide it by 2 and increase the exponent. d = n - 1 r = 0 while not (d & 1): r += 1 d >>= 1 # Test k witnesses. for _ in range(k): # Generate random integer a, where 2 <= a <= (n - 2) a = random.randint(2, n - 2) x = pow(a, d, n) if x == 1 or x == n - 1: continue for _ in range(r - 1): x = pow(x, 2, n) if x == 1: # n is composite. return False if x == n - 1: # Exit inner loop and continue with next witness. break else: # If loop doesn't break, n is composite. return False return True
6f7263f261bf20b851aa40e0c616a68e9936f16d
700,289
import functools from datetime import datetime def busy_try(delay_secs: int, ExceptionType=Exception): """ A decorator that repeatedly attempts the function until the timeout specified has been reached. This is different from timeout-related functions, where the decorated function is called only *once*. Because the decorated function is called repeatedly, the delay period should not be long. Use `timeout` if you hope to avoid occupying system resources. Parameters ---------- delay_secs : time delayed, in seconds. ExceptionType : optional exception caught when the function attempted raises an error. Default to `Exception`. Returns ------- The return value of the decorated function, if any function call is successful; `None`, otherwise. Raises ------ TimeoutError : when the function has tried `delayed_secs` seconds and no function call succeeds. """ def _busy_try(func): @functools.wraps(func) def busy_try_wrapper(self, *args, **kwargs): start = datetime.now() while True: try: return func(self, *args, **kwargs) except ExceptionType: continue finally: now = datetime.now() if (now - start).seconds > delay_secs: raise TimeoutError break return None return busy_try_wrapper return _busy_try
0d005935ffa8b7f594da692edfa39ec4342ed4e1
700,290
def MakeDeclarationString(params): """Given a list of (name, type, vectorSize) parameters, make a C-style parameter declaration string. Ex return: 'GLuint index, GLfloat x, GLfloat y, GLfloat z'. """ n = len(params) if n == 0: return 'void' else: result = '' i = 1 for (name, type, vecSize) in params: result = result + type + ' ' + name if i < n: result = result + ', ' i += 1 #endfor return result #endif
1b009bce0d6c25b25e4830b3a021dda877519ea3
700,291
from typing import Tuple from typing import Union def min_avg(arr: list, k: int) -> Tuple[list, Union[float, int]]: """ Time Complexity: O(n) space complexity: O(1) """ avg: Union[int, float] = sum(arr[:k]) / k start: int = 0 end: int = k - 1 cur_avg = avg for index in range(k, len(arr)): cur_avg = cur_avg + (arr[index] - arr[index - k]) / k if cur_avg < avg: avg = cur_avg start = index - k + 1 end = index return arr[start : end + 1], avg
1a2cdafd54eda4ed1078fe72f369fb8750634b84
700,292
import numpy as np def rand_jitter(arr, sensib = 0.01, lowerLimit=None, upperLimit=None): """ Creation of jittering in a one-D data array arr sensibility can be adjust with parameter sensib. In case of an array only made of a same value, you can use the lowerlimit and upperlimit parameters """ if len(arr) != 0: if lowerLimit is None or upperLimit is None: stdev = sensib * (np.amax(arr) - np.amin(arr)) else: stdev = sensib * (upperLimit-lowerLimit) return arr + np.random.randn(len(arr)) * stdev else: return []
1e7e6f67a660508effedb1b68cb4034acaa5e0c2
700,293
def make_text_list(postings_dict, first_n_postings=100): """ Extract the texts from postings_dict into a list of strings Parameters: postings_dict: first_n_postings: Returns: text_list: list of job posting texts """ text_list = [] for i in range(0, first_n_postings+1): # Since some number could be missing due to errors in scraping, # handle exception here to ensure error free try: text_list.append(postings_dict[str(i)]['posting']) except: continue return text_list
0d2a4e0f2d904b246942508e03cfd97cf5d43ea0
700,294
import json def get_file(projectCode, branch, filepath): """ :projectCode: идентификатор проекта :branch: необходимая ветка :folderPath: GET параметр путь к папке, получить через request.args.get('filePath') **Response:** ``` { "name": "myfile.md", "full_path": "/folder/myfile.md", "parent": "/folder/", "attributes": { "attrib1": "val1", "attrib2": "val2", "attrib3": 123, "attrib4": [ "one", "two" ], "attrib5": "true", "attrib6": "false" }, "text": "many many many many many words in text" } ``` """ response = { "name": "myfile.md", "full_path": "/folder/myfile.md", "parent": "/folder/", "attributes": { "attrib1": "val1", "attrib2": "val2", "attrib3": 123, "attrib4": [ "one", "two" ], "attrib5": "true", "attrib6": "false" }, "text": "many many many many many words in text" } return json.dumps(response)
1765c422e2f9ced254775f1da354f424071b7763
700,295
def calculate_iou(confusion_matrix): """ https://github.com/ternaus/robot-surgery-segmentation/blob/master/validation.py """ confusion_matrix = confusion_matrix.astype(float) ious = [] for index in range(confusion_matrix.shape[0]): true_positives = confusion_matrix[index, index] false_positives = confusion_matrix[:, index].sum() - true_positives false_negatives = confusion_matrix[index, :].sum() - true_positives denom = true_positives + false_positives + false_negatives if denom == 0: iou = 0 else: iou = float(true_positives) / denom ious.append(iou) return ious
7f8eb6f957b031c808bb4ef0f921de26a2c855eb
700,296
def cards_db(db): """Empty the CardsDB object after each function""" db.delete_all() return db
1f62dc919e860b9d4db31f611a8109356c1c6c1b
700,297
def invert0(x): """ Invert 0 -> 1 if OK = 0, FALSE > 1 """ return 0 if x > 0 else 1
1c7a71885cdc84f12b3e2214aa74f99ff2aab326
700,298
import torch def bounded_iou_loss(loc1, size1, loc2, size_): """ loc1/2:[(l,t),...] size1/2:[(w,h),...] 2 |= target :return 0 if same, >0 if diff SINGLE_IMAGE """ loc_delta = torch.abs(loc1 - loc2) iou_loc = torch.div(size_ - loc_delta, size_ + loc_delta) iou_size = torch.min(torch.div(size1, size_ + 1e-5), torch.div(size_, size1 + 1e-5)) bounded_iou = torch.mean(iou_loc + iou_size) / 2 # average over localization and size iou return 1 - bounded_iou
67964833f5a190065b843f1c89c1eccd16824245
700,299
def one_or_more(amount, single_str, multiple_str): """ Return a string which uses either the single or the multiple form. @param amount the amount to be displayed @param single_str the string for a single element @param multiple_str the string for multiple elements @return the string representation """ if amount == 1: ret_str = single_str else: ret_str = multiple_str return ret_str.format(amount)
8c3495614cd8c718e243383bcc72cc7daa8fa286
700,300
import torch def accuracy(output, target, topk=(1,), exact=False): """ Computes the top-k accuracy for the specified values of k Args: output (ch.tensor) : model output (N, classes) or (N, attributes) for sigmoid/multitask binary classification target (ch.tensor) : correct labels (N,) [multiclass] or (N, attributes) [multitask binary] topk (tuple) : for each item "k" in this tuple, this method will return the top-k accuracy exact (bool) : whether to return aggregate statistics (if False) or per-example correctness (if True) Returns: A list of top-k accuracies. """ with torch.no_grad(): # Binary Classification if len(target.shape) > 1: assert output.shape == target.shape, \ "Detected binary classification but output shape != target shape" return [torch.round(torch.sigmoid(output)).eq(torch.round(target)).float().mean()], [-1.0] maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] res_exact = [] for k in topk: correct_k = correct[:k].view(-1).float() ck_sum = correct_k.sum(0, keepdim=True) res.append(ck_sum.mul_(100.0 / batch_size)) res_exact.append(correct_k) if not exact: return res else: return res_exact
cc2194bb72460ff39e3648e173d52875f64abeab
700,301
import itertools def enumerate_hyperparameter_combinations(parameter_to_options): """ Returns a list of dictionaries of all hyperparameter options :param parameter_to_options: a dictionary that maps parameter name to a list of possible values :return: a list of dictionaries that map parameter names to set values """ keys, values = zip(*parameter_to_options.items()) return [dict(zip(keys, v)) for v in itertools.product(*values)]
8665ef66b7cb1467599ff0a56f47ac60042e0e9a
700,302
def get_new_pvars(opvs, epvs): """Returns a list of new projection variables from a list of old PVar's opvs, based on a list of existing PVar's epvs. Args: opvs: Old projection variables. evps: Existing projection variables. Returns: A list of projection variables. """ if len(epvs) == 0: return opvs n = max(epvs) + 1 return [x+n for x in range(len(opvs))]
b344e4fb60daa0452c944065b3164d90e7698a21
700,303
def count_gaps(sequence): """In order to calculate the correct percent divergence between two strings where there are gaps present, the gaps need to be counted in order to be deducted from the total number of differences. This function takes a sequence string and returns the number of instances of the gap character ('-') present. Args: sequence (str): Sequence string to have gap characters counted Returns: no_of_dif (int): Number of differences between strings """ # define gaps as zero so variable can be reused globally gaps = 0 # loop through the sequence character by character and count gaps for a in range(0, len(sequence)): # if the character is a gap character then increment the gap variable by 1 if sequence[a] == '-': gaps += 1 # return the number of gaps present return gaps
a0124ef8ee77d5b1c39da96131a9a0dff7301bcc
700,304
def suffix(pattern, k): """we define SUFFIX(Pattern) as the last (k-1)-mers in a k-mer Pattern""" return pattern[-(k - 1):]
d6cec61ba024f551071a6ba9d6409963ff2ffe7d
700,305
def runSingleThreaded(runFunction, arguments): """ Small overhead-function to iteratively run a function with a pre-determined input arguments :param runFunction: The (``partial``) function to run, accepting ``arguments`` :param arguments: The arguments to passed to ``runFunction``, one run at a time :return: List of any results produced by ``runFunction`` """ results = [] for arg in arguments: results.append(runFunction(*arg)) return results
9cdc34f5e44751667ec5a3bddcf2958b3302f4d1
700,306
def comp(*args) -> bool: """Compare string lengths.""" return len(set([len(arg) for arg in args])) == 1
c3bf069c723b37030531ffbbb84a0a91488a7345
700,308
import sysconfig def get_build_cflags(): """Synthesize a CFLAGS env var from the current python env for building of C modules.""" return '{} {} -I{}'.format( sysconfig.get_config_var('BASECFLAGS'), sysconfig.get_config_var('OPT'), sysconfig.get_path('include') )
4449c19a6f1758c4db415f76a50fd7749afd8b57
700,309
def clip(num, num_min=None, num_max=None): """Clip to max and/or min values. To not use limit, give argument None Args: num (float): input number num_min (float): minimum value, if less than this return this num Use None to designate no minimum value. num_max (float): maximum value, if more than this return this num Use None to designate no maximum value. Returns float: clipped version of input number """ if num_min is not None and num_max is not None: return min(max(num, num_min), num_max) elif num_min is not None: return max(num, num_min) elif num_max is not None: return min(num, num_max) else: return num
fe46f5a200ab24d517c57c5e1d93d4bf86192e13
700,310
def Singleton(name, bases, dict): """Use this metaclass on Converter subclasses to create a instance.""" return type(name, bases, dict)()
d7a196f0a645dac8150d9fba46b426882358a9a6
700,311
def calculate_fraction_of_sn_discovered( log, surveyCadenceSettings, snSurveyDiscoveryTimes, redshifts, peakAppMagList, snCampaignLengthList, extraSurveyConstraints, zmin, zmax): """ *Given a list of the snSurveyDiscoveryTimes calculate the discovery/non-discovery ratio for a given redshift range* **Key Arguments:** - ``log`` -- logger - ``surveyCadenceSettings`` -- the cadence settings for the survey - ``snSurveyDiscoveryTimes`` -- the discovery times of the SN relative to the survey year - ``redshifts`` -- redshifts of the sne - ``peakAppMagList`` -- list of the SN peak magnitude in each filter - ``snCampaignLengthList`` -- a list of campaign lengths in each filter - ``extraSurveyConstraints`` -- some extra survey constraints - ``zmin`` -- minimum redshift in the range - ``zmax`` -- maximum redshift in the range **Return:** - None """ ################ > IMPORTS ################ ## STANDARD LIB ## ## THIRD PARTY ## ## LOCAL APPLICATION ## ################ >ACTION(S) ################ filters = ['g', 'r', 'i', 'z'] faintMagLimit = extraSurveyConstraints['Faint-Limit of Peak Magnitude'] lunarMonth = 29.3 surveyYear = 12. * lunarMonth observableFraction = surveyCadenceSettings["Observable Fraction of Year"] discovered = [] tooFaint = [] shortCampaign = [] discoveredRedshift = [] tooFaintRedshift = [] notDiscoveredRedshift = [] shortCampaignRedshift = [] #log.info('len(redshifts) %s' % (len(redshifts),)) dataDictionary = {} for item in range(len(redshifts)): if redshifts[item] > zmax or redshifts[item] <= zmin: continue if snSurveyDiscoveryTimes[item]['any'] is True: discoveryDayList = [] faintDayList = [] shortCampaignDayList = [] for ffilter in filters: if snSurveyDiscoveryTimes[item][ffilter]: if peakAppMagList[item][ffilter] < faintMagLimit: if (snCampaignLengthList[item]['max'] < extraSurveyConstraints['Observable for at least ? number of days']): shortCampaignDayList.append( snSurveyDiscoveryTimes[item][ffilter]) else: if (surveyYear * observableFraction - snSurveyDiscoveryTimes[item][ffilter]) > extraSurveyConstraints['Observable for at least ? number of days']: discoveryDayList.append( snSurveyDiscoveryTimes[item][ffilter]) # elif ((surveyYear*observableFraction - # snSurveyDiscoveryTimes[item][ffilter]) < # snSurveyDiscoveryTimes[item][ffilter] < # surveyYear*observableFraction): elif (snCampaignLengthList[item][ffilter] - (surveyYear - snSurveyDiscoveryTimes[item][ffilter])) > extraSurveyConstraints['Observable for at least ? number of days']: lcTail = snCampaignLengthList[item][ ffilter] - (surveyYear - snSurveyDiscoveryTimes[item][ffilter]) discoveryDayList.append( snSurveyDiscoveryTimes[item][ffilter]) else: shortCampaignDayList.append( snSurveyDiscoveryTimes[item][ffilter]) # log.info('item: %s. ffilter %s. too short due to going behind sun, discovery day %s, camp length %s' % (item, ffilter, snSurveyDiscoveryTimes[item][ffilter], snCampaignLengthList[item][ffilter])) else: faintDayList.append( snSurveyDiscoveryTimes[item][ffilter]) if len(discoveryDayList) > 0: discovered.append(min(discoveryDayList)) discoveredRedshift.append(redshifts[item]) elif len(shortCampaignDayList) > 0: shortCampaign.append(min(shortCampaignDayList)) shortCampaignRedshift.append(redshifts[item]) else: tooFaint.append(min(faintDayList)) tooFaintRedshift.append(redshifts[item]) else: notDiscoveredRedshift.append(redshifts[item]) if len(notDiscoveredRedshift) > 0: dataDictionary["Undiscovered"] = notDiscoveredRedshift if len(tooFaintRedshift) > 0: dataDictionary[ "Detected - too faint to constrain as transient"] = tooFaintRedshift if len(discoveredRedshift) > 0: dataDictionary["Discovered"] = discoveredRedshift if len(shortCampaignRedshift) > 0: dataDictionary[ "Detected - campaign to short to constrain as transient"] = shortCampaignRedshift totalWithinVolume = float(len(discoveredRedshift) + len( notDiscoveredRedshift) + len(tooFaintRedshift) + len(shortCampaignRedshift)) if len(discoveredRedshift) == 0: discoveryFraction = 0. else: discoveryFraction = float(len(discoveredRedshift)) / totalWithinVolume if len(tooFaintRedshift) == 0: tooFaintFraction = 0. else: tooFaintFraction = float(len(tooFaintRedshift)) / totalWithinVolume if len(shortCampaignRedshift) == 0: shortCampaignFraction = 0. else: shortCampaignFraction = float( len(shortCampaignRedshift)) / totalWithinVolume # log.info('len(discoveredRedshift) %s' % (len(discoveredRedshift),)) # log.info('len(notDiscoveredRedshift) %s' % (len(notDiscoveredRedshift),)) # log.info('discoveryFraction %s' % (discoveryFraction,)) return discoveryFraction, tooFaintFraction, shortCampaignFraction
70409bba2f7dd2601ed073ad140d75c8ec863942
700,312
def create_model_info(config, loss_func, accuracy): """Create a dictionary of relevant model info. Parameters ---------- param : dict Any parameter relevant for logging. accuracy_log : dict A dictionary containing accuracies. Returns ------- type Description of returned object. """ model_info = { 'training_accuracy': accuracy[:, 0], 'validation_accuracy': accuracy[:, 1], 'testing_accuracy': accuracy[:, 2], 'model_parameters': config, 'loss function': loss_func } return model_info
25cd8f49a0c7c7fd9b52f3d33c8a8ac24167fbff
700,313
def grid_coordinates(roi, x_divisions, y_divisions, position): """ Function that returns the grid coordinates of a given position. To do so it computes, for a given area and taking into account the number of x and y divisions which is the total amount of cells. After that it maps the given position to the cell it falls inside and returns the coordinates of that cell. Finally, it is assumed that the position is always inside the grid :param roi: region of interest to be gridezied :param x_divisions:number of divisions in the x axis :param y_divisions:number of divisions in the y axis :param position: position to transform into grid coordinates """ px_per_x_division = float(roi[0][1]-roi[0][0])/x_divisions px_per_y_division = float(roi[1][1]-roi[1][0])/y_divisions x_in_grid = position[0] - roi[0][0] y_in_grid = position[1] - roi[1][0] return (int(x_in_grid/px_per_x_division), int(y_in_grid/px_per_y_division))
1db6e8ed8b1c0abde965e3a5536867ae32ac2228
700,314
def splitmessage(message): """Returns a tuple containing the command and arguments from a message. Returns None if there is no firstword found """ assert isinstance(message, str) words = message.split() if words: return (words[0], words[1:])
d8db56ef55097f9f8858de95ee3d7799c0dc127e
700,315
def dec_to_str(total): """Converts decimals to strings for more natural speech.""" if total == 0.125: return "an eighth" elif total == 0.25: return "a quarter" elif total == 0.5: return "a half" elif total == 0.75: return "three quarters" else: if total % 1 == 0: return str(int(total)) elif total % 0.5 == 0: return "{0:.1f}".format(total) else: return "{0:.2f}".format(total)
05e170eb21f5f0b188a32a634b5c617536c64a11
700,317
def maybe_cast_list(value, types): """ Try to coerce list values into more specific list subclasses in types. """ if not isinstance(value, list): return value if type(types) not in (list, tuple): types = (types,) for list_type in types: if issubclass(list_type, list): try: return list_type(value) except (TypeError, ValueError): pass return value
c3078c42087103ee8b0a8b3257d345e7d73c0fd7
700,318
def read_wi_table(wi_file, wi_column): """ Reads in the relative adaptiveness file """ tmp_wi_dict = {} line_number = 0 with open(wi_file, 'r') as infile: for line in infile: line_number += 1 if line_number == 1: continue # skip header line = line.strip("\n").split("\t") codon = line[0] wi = line[wi_column] tmp_wi_dict[codon] = wi return tmp_wi_dict
89a1b38e375678b8d221b7386dd742fe6745223c
700,319
from os import makedirs from os.path import exists def check_path(path): """Check if path ends with a slash ('/'). Else, it adds a slash. The function also creates the directory if it does not existing. Parameters ---------- path : str A path Returns ------- path : str A functional path """ if len(path) > 0 and path[-1] != '/': path = path + '/' if not exists(path): makedirs(path) return path
f583d720d04d84f62fef4ca022a1b7d71c5d4493
700,320
import numpy def w_conj_kernel_fn(kernel_fn): """Wrap a kernel function for which we know that kernel_fn(w) = conj(kernel_fn(-w)) Such that we only evaluate the function for positive w. This is benificial when the underlying kernel function does caching, as it improves the cache hit rate. :param kernel_fn: Kernel function to wrap :returns: Wrapped kernel function """ def fn(theta, w, *args, **kw): if w < 0: return numpy.conj(kernel_fn(theta, -w, *args, **kw)) return kernel_fn(theta, w, *args, **kw) return fn
c5ba5bb741e9e696a94535c4373e84268a7ab95d
700,321
def loadWorld(worldName, store): """ Load an imaginary world from a file. The specified file should be a Python file defining a global callable named C{world}, taking an axiom L{Store} object and returning an L{ImaginaryWorld}. This world (and its attendant L{Store}) should contain only a single L{Actor} instance, which will be used for the player character. @param worldName: The path name to a Python file containing a world. @type worldName: L{str} @param store: The axiom data store to read the world into. @type store: L{Store} """ with open(worldName, "rb") as f: codeobj = compile(f.read(), worldName, "exec") namespace = {} eval(codeobj, namespace, namespace) return namespace['world'](store)
801c98b5be82e9d5c9ca19db9adbe3078f500674
700,322
import torch def full(*args, **kwargs): """ In ``treetensor``, you can use ``ones`` to create a tree of tensors with the same value. Example:: >>> import torch >>> import treetensor.torch as ttorch >>> ttorch.full((2, 3), 2.3) # the same as torch.full((2, 3), 2.3) tensor([[2.3000, 2.3000, 2.3000], [2.3000, 2.3000, 2.3000]]) >>> ttorch.full({'a': (2, 3), 'b': {'x': (4, )}}, 2.3) <Tensor 0x7ff363bbc7f0> ├── a --> tensor([[2.3000, 2.3000, 2.3000], │ [2.3000, 2.3000, 2.3000]]) └── b --> <Tensor 0x7ff363bbc8d0> └── x --> tensor([2.3000, 2.3000, 2.3000, 2.3000]) """ return torch.full(*args, **kwargs)
8c6df708b76a799c27a45979e9a43b3d3678ac8d
700,323
import math def tan(x): """Get tan(x)""" return math.tan(x)
112b52faee2f08262515086fe59b2ff978001200
700,324
def piece_placed(x, y, player, board): """This function determines the piece played. It takes the coordinates of the piece, the player number, and the board. The pieces are zeros or ones and the function returns the piece on the board based on the number.""" if player == 0: board[x][y] = 1 elif player == 1: board[x][y] = 2 return board
ffcd46e11c3e5b0704ed66d6010dfc227106c752
700,325
def get_max_unsecured_debt_ratio(income): """Return the maximum unsecured-debt-to-income ratio, based on income.""" if not isinstance(income, (int, float)): raise TypeError("Expected a real number.") # Below this income, you should not have any unsecured debt. min_income = 40000 if income <= min_income: return 0 slope = 1 / 600000 min_ratio = 0.1 ratio = (slope * income) + min_ratio # The maximum unsecured-debt-to-income ratio, for any income. max_ratio = 0.4 ratio = min(ratio, max_ratio) return ratio
ffff63807842197e2f60ebfd29b54ecf895f6279
700,326
def pubkey_to_merkletree(key, hashfunction, salt, prefix=""): """Convert a full signing-key pubkey into a merkletree dictionary""" drval = dict() part1 = key[0] part2 = key[1] if len(key) > 2: breakpoint = int(len(key)/2) part1, dpart1 = pubkey_to_merkletree(key[:breakpoint], hashfunction, salt, prefix + "0") part2, dpart2 = pubkey_to_merkletree(key[breakpoint:], hashfunction, salt, prefix + "1") for key2 in dpart1: drval[key2] = dpart1[key2] for key3 in dpart2: drval[key3] = dpart2[key3] rval = hashfunction(part1 + part2, salt) drval[prefix] = rval if len(key) == 2: drval[prefix + "0"] = part1 drval[prefix + "1"] = part2 if prefix: return rval, drval return drval
50e5283a7189aa0202e93201d70f7343719f1f13
700,328
from typing import Any import json def is_jsonable(x: Any): """ Check if an object is json serializable. Source: https://stackoverflow.com/a/53112659 """ try: json.dumps(x) return True except (TypeError, OverflowError): return False
3735de8bd1940d84c185142c0a4387366d7cd9c2
700,329
def add_number_of_different_roles(dev_type: str) -> int: """ INPUT dev_type - dev_type answer (separeted by ';') OUTPUT numeric value - number of different dev types """ try: return len(dev_type.split(';')) except: return 0
78872b9101b128cc107a0194fc85b353f1d2f836
700,330
def GetBigQueryTableID(tag): """Returns the ID of the BigQuery table associated with tag. This ID is appended at the end of the table name. """ # BigQuery table names can contain only alpha numeric characters and # underscores. return ''.join(c for c in tag if c.isalnum() or c == '_')
0fe659fd3c7ca3df5f061289dad5635841146901
700,331
def _linear_transform(src, dst): """ Parameters of a linear transform from range specifications """ (s0, s1), (d0,d1) = src, dst w = (d1 - d0) / (s1 - s0) b = d0 - w*s0 return w, b
7f55a2617721fdefcc724bcb8ce9f880d7bcd846
700,332
def feature_within_s(annolayer, list_of_s): """Extracts all <annolayer> from all sentence-elements in list_of_s; returns a flat list of <annolayer>-elements; """ list_of_lists_of_feature = [s.findall('.//' + annolayer) for s in list_of_s] list_of_feature = [element for sublist in list_of_lists_of_feature for element in sublist] return(list_of_feature)
df6ed3603381a4b8d2ea12fc483fa37ea3068372
700,333
def showname(keyvalue): """filter koji za neku od prosledjenih kljuceva vraca vrednost""" key_dict ={'P':'Accepted','C': 'Created','Z': 'Closed','O': 'On Wait'} return key_dict[keyvalue]
59453d5e0dd31696b99d01c8b927297adddec10c
700,334
def processPostMessage(post_message, status_type): """ Check if the message is >500 characters If it is, shorten it to 500 characters Ouput: a tuple of strings: read_more (empty if not shortened), post text """ if len(post_message) > 500: post_message = post_message[:500] last_space = post_message.rfind(' ') post_message = post_message[:last_space] post_message += "..." if status_type == 'added_video': return "\nЧитати повністю і дивитися відео:\n", post_message return "\nЧитати далі:\n", post_message return "", post_message
2d21ec04ef863b57f95bb4b8256f2195559e6f8e
700,335
def data_for_keys(data_dict, data_keys): """ Return a dict with data for requested keys, or empty strings if missing. """ return {x: data_dict[x] if x in data_dict else '' for x in data_keys}
b844ae2dba804e179e7e8dd08166f392a90e7f7a
700,336
def distinct_values_bt(bin_tree): """Find distinct values in a binary tree.""" distinct = {} result = [] def _walk(node=None): if node is None: return if node.left is not None: _walk(node.left) if distinct.get(node.val): distinct[node.val] = distinct[node.val] + 1 else: distinct[node.val] = 1 if node.right is not None: _walk(node.right) _walk(bin_tree.root) # for key in list(distinct): # if distinct[key] != 1: # del distinct[key] # return list(distinct.keys()) # lesser of 2 evils: for key in distinct: if distinct[key] == 1: result.append(key) return result # or cut 5 lines of code into 1 using list comprehension: # return [key for key, val in distinct.items() if val == 1] # Big O: # time: O(N) # space: O(2N) = O(N)
8d84d57559a0c813e7ac12199680172a9b591be9
700,337
import subprocess def runshell(cmd): """ Run a shell command. if fails, raise an exception. """ p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if p.returncode != 0: err = "Subprocess: \"{0}\" failed, std err = {1}".format(str(cmd), str(p.stderr)) raise RuntimeError(err) return p
d5a617cec03fe70d601f496f9f62b6c30fcdbd1e
700,338
def fix_iobtag(iob, DESC_DECISION): """ This is specific to the BBN Corpus; the reason some of the entity labels are being modified: 1) Errors in the original labeling, or 2) Not enough labels of the given category The parameter DESC_DECISION can be 'keep', 'merge' or 'remove', which determines how the "DESC" labels are treated. The following entity types have DESC corresponding types: FAC:X FAC_DESC:X GPE:X GPE_DESC:X ORGANIZATION:X ORG_DESC:X PRODUCT:X PRODUCT_DESC:X PERSON PER_DESC Where the X following the colon indicates the presence of a subtype. 'merge': re-label the DESC entity with the corresponding entity label 'remove': re-label the DESC entity with O 'keep': do nothing. """ if iob == 'O': fixed_iob = 'O' # These are incorrect: elif iob == 'FAC_DESC:STREET_HIGHWAY': fixed_iob = 'FAC_DESC:HIGHWAY_STREET' elif iob == 'PRODCUT:OTHER': fixed_iob = 'PRODUCT:OTHER' elif iob == 'FAC:HOTEL': fixed_iob = 'ORGANIZATION:HOTEL' elif iob == 'DATE': fixed_iob = 'DATE:DATE' elif iob == 'LOCATION': fixed_iob = 'LOCATION:OTHER' # 5 mentions (districts) # There were too few of the following entity types, so these were changed: elif iob in {'CONTACT_INFO:OTHER', 'SUBSTANCE:NUCLEAR', 'CONTACT_INFO:ADDRESS'}: fixed_iob = 'O' # 3 mentions, 3 mentions, 4 mentions elif iob == 'LOCATION:BORDER': fixed_iob = 'LOCATION:OTHER' # 1 mention elif iob in {'LOCATION:CITY', 'ORGANIZATION:CITY', 'ORGANIZATION:STATE_PROVINCE'}: fixed_iob = 'GPE:CITY' # 2 mentions, 1 mention, 1 mention elif iob in {'QUANTITY:TEMPERATURE', 'QUANTITY:SPEED', 'QUANTITY:ENERGY'}: fixed_iob = 'QUANTITY:OTHER' # 1 mention, 14 mentions, 20 mentions # Note: 3 of the QUANTITY:ENERGY labels are incorrect (kilobytes, megabytes) elif iob == 'PRODUCT:FOOD': fixed_iob = 'PRODUCT:OTHER' # 1 mention elif iob == 'PRODUCT:DRUG': fixed_iob = 'SUBSTANCE:DRUG' # 2 mentions elif iob == 'WORK_OF_ART:PAINTING': fixed_iob = 'WORK_OF_ART:OTHER' # 13 mentions else: fixed_iob = iob if DESC_DECISION not in {'merge','keep','remove'}: raise ValueError("DESC_DECISION must be 'merge', 'keep' or 'remove'.") if DESC_DECISION == 'remove': if 'DESC' in fixed_iob: fixed_iob = 'O' if DESC_DECISION == 'merge': if 'DESC' in fixed_iob: if 'ORG_DESC' in fixed_iob: fixed_iob = 'ORGANIZATION:'+ fixed_iob.split(':')[1] elif fixed_iob == 'PER_DESC': fixed_iob = 'PERSON' else: fixed_iob = ''.join(fixed_iob.split('_DESC')) return fixed_iob
2743e7d36c7d8153a7cf6694a69e9a212219ae8f
700,339
from typing import List def get_answered_questions(question_list: List[List[bytes]]) -> list: """Dont let the type hint confuse you, problem of not using classes. It takes the result of get_question_list(file_list) Returns a list of questions that are answered. """ t = [] for q in question_list: index = 0 for i in q: if b'</summary>' in i: index = q.index(i) if q[index+1: len(q) - 1]: t.append(q) return t
d485b374721f445ab62853eaa67de65bd2a893e2
700,340
def split_data_list(list_data, num_split): """ list_data: list of data items returning: list with num_split elements, each as a list of data items """ num_data_all = len(list_data) num_per_worker = num_data_all // num_split print("num_data_all: %d" % num_data_all) # data_split = [] posi_start = 0 posi_end = num_per_worker for idx in range(num_split): list_curr = list_data[posi_start:posi_end] data_split.append(list_curr) posi_start = posi_end posi_end += num_per_worker # if posi_start < num_data_all: data_split[-1].extend(list_data[posi_start:]) # list_num_data = [len(item) for item in data_split] print("list_data split: {}".format(list_num_data)) # return data_split #
7282d1ae89f830d5b48fa73ea3d355cd63344f5c
700,341
def patch_set_approved(patch_set): """Return True if the patchset has been approved. :param dict patch_set: De-serialized dict of a gerrit change :return: True if one of the patchset reviews approved it. :rtype: bool """ approvals = patch_set.get('approvals', []) for review in approvals: if (review['type'] == 'Approved' or (review['type'] == 'Workflow' and int(review['value']) > 0)): return True return False
af7e56be45e537be9308f0031fe3923425afd48c
700,343
import os def get_files_with_ext(path, str_ext, flag_walk=False): """ get files with filename ending with str_ext, in directory: path """ list_all = [] if flag_walk: # 列出目录下,以及各级子目录下,所有目录和文件 for (root, dirs, files) in os.walk(path): for filename in files: file_path = os.path.join(root, filename) list_all.append(file_path) else: # 列出当前目录下,所有目录和文件 for file in os.listdir(path): file_path = os.path.join(path, file) list_all.append(file_path) # file_list = [item for item in list_all if item.endswith(str_ext)] return file_list #
1a8ade0b5efead0b6260430145e601d98f6fa057
700,344
def PySet_Pop(space, w_set): """Return a new reference to an arbitrary object in the set, and removes the object from the set. Return NULL on failure. Raise KeyError if the set is empty. Raise a SystemError if set is an not an instance of set or its subtype.""" return space.call_method(space.w_set, "pop", w_set)
5303f9b7afd4d4cfdb6ee065e860720af5b88b8e
700,345
import random def random_mac_address(local=True): """ Generate random MAC address """ vendor = random.SystemRandom().choice( ( (0x00, 0x05, 0x69), # VMware MACs (0x00, 0x50, 0x56), # VMware MACs (0x00, 0x0C, 0x29), # VMware MACs (0x00, 0x16, 0x3E), # Xen VMs ( 0x00, 0x03, 0xFF, ), # Microsoft Hyper-V, Virtual Server, Virtual PC (0x00, 0x1C, 0x42), # Parallells (0x00, 0x0F, 0x4B), # Virtual Iron 4 (0x08, 0x00, 0x27), ) # Sun Virtual Box ) mac = [ vendor[0], vendor[1], vendor[2], random.randint(0x00, 0x7f), random.randint(0x00, 0xff), random.randint(0x00, 0xff), ] if local: mac[0] |= 2 return ":".join("{0:02X}".format(o) for o in mac)
eb89ab223d0e8ae9f011277d92f3cd01d27627e8
700,346
def check_win(game, players): """ -1: ongoing 0: tie > 0: id of winner """ # print(status) # print(status) for player in players: if len(player.destination_cards) == 0: return player.id, 1 if game.card_index >= len(game.cards) or player.trains == 0: if len(players[0].destination_cards) == len(players[1].destination_cards): # print(player.routes, player.destination_cards) return 0, 0 elif len(players[0].destination_cards) > len(players[1].destination_cards): return players[1].id, 0 else: return players[0].id, 0 for u in range(len(game.status)): for v in range(len(game.status)): if 0 in game.status[u][v]: return -1, -1 # print(status) return 0
0127de10c9b6b9b08dbf07b3747d92a2a9b4115c
700,347
def message_has_label(message, label): """Tests whether a message has a label Args: message: message to consider. label: label to check. Returns: True/False. """ return label['id'] in message.get('labelIds', [])
634808b2533469daa42779a3563f127d06ce1b14
700,348
def indent_func_def(func_def): """Ensures max columns in a function signature follows style guide""" if len(func_def) < 80: return func_def parts = func_def.split(',') idx = func_def.index('(') params = parts[0] for x in parts[1:]: params += ',\n{}{}'.format(idx * ' ', x) return params
371bde9c8580982894759d6716a9be6d4f8521a5
700,349
import sys def good_default_options(): """ Probably very subjective. But just to have some reasonable defaults, which might be used. This is dependent on the OS. Usage in your config might be like:: import common globals().update(common.good_default_options()) """ if sys.platform == "darwin": # Darwin (MacOSX) already has builtin mouse scroll wheel acceleration, # so we should apply much less extra acceleration. return {"exp": 0.5} # All other common desktop platforms do not (Linux, Windows). # (Android does, but we don't expect that here...) return {"multiplier": 0.5, "exp": 2}
29a739081f70bdce5fda4070036823854a22f53f
700,350
def transform_case(input_string): """ Lowercase string fields """ return input_string.lower()
4d15f33781c1b58d3a04a52fcc8e5f5042e33bdf
700,352
from typing import Union def format_numbers_consistently(number: Union[int, float]) -> Union[int, float]: """ Formats numeric values in a consistent way. Prevents inconsistencies with how numbers are formatted (e.g. '12.0' as '12') :type number: float or int :param number: numeric value to format :rtype float or int :return number as an integer if applicable, otherwise float """ number = float(number) if number.is_integer(): number = int(number) return number
ebf7acdca53ac331ac7a5e1e8ba2ee416cc7112b
700,353
import re def extract_date_from_date_time(date_time: str) -> str: """ Given a date in format YYYY-MM-DDTHH:MM:SS, extract the date part (i.e. YYYY-MM-DD) :param date_time : str (DATETIME_FORMAT) :return str a date in DATE_FORMAT """ assert type(date_time) == str, "date_time must be of type str. Got %s" % str(type(date_time)) match = re.findall("\d{4}-\d{2}-\d{2}", date_time) if len(match) == 1: return match[0] raise ValueError("Invalid date_time input given. Got (%s)" % date_time)
4727273615fa38a48eace841c4ff8760ab10d08e
700,354
def cli(ctx, role_name, description, user_ids="", group_ids=""): """Create a new role. Output: Details of the newly created role. For example:: {'description': 'desc', 'url': '/api/roles/ebfb8f50c6abde6d', 'model_class': 'Role', 'type': 'admin', 'id': 'ebfb8f50c6abde6d', 'name': 'Foo'} .. versionchanged:: 0.15.0 Changed the return value from a 1-element list to a dict. """ return ctx.gi.roles.create_role(role_name, description, user_ids=user_ids, group_ids=group_ids)
7b9b9f0d72dd2312448eee96dc9743b2b9657271
700,356
import numpy def sentence_to_weight_matrix(sentence): """ Converts the dependency graph of a sentence of tokens into a weight matrix. weight[u, v] = 0 iff u == v weight[u, v] = 1 iff u != v and are_bidirectionaly_directly_connected(u, v) == True weight[u, v] = 0 else """ V = len(sentence) weight = numpy.full([V, V], numpy.inf) for from_token in sentence: for to_token, _ in (from_token.features['dependency_to'] + from_token.features['user_dependency_to']): u = from_token.features['tmp_id'] v = to_token.features['tmp_id'] weight[u, v] = 1 weight[v, u] = 1 for u in range(V): weight[u, u] = 0 return weight
e413de859ce825b8fc0c00b4241d74746d26b0b0
700,358
import csv def find_by_column(filename, column, value): """ This method discovers interactions registered in the DLT looking at one specific value""" list = [] with open(filename) as f: reader = csv.DictReader(f) for item in reader: if item[column] == value: list.append(item) return list
928f53e72c7b5e3e63316748545a530c20559f8b
700,359
from datetime import datetime def dt_to_dec(dt): """Convert a datetime to decimal year.""" year_start = datetime(dt.year, 1, 1) year_end = year_start.replace(year=dt.year+1) return dt.year + ((dt - year_start).total_seconds() / # seconds so far float((year_end - year_start).total_seconds()))
0841f21c245b0f3a2a1404c7c8c5bff9a26aae21
700,360
def enhanceEntries(entriesList, feedId, feedName): """ Add Id of feed to each entry so that we only need the item, which then contains all information that we need Parameters ---------- entriesList : list A List of RSSEntries (FeedParserDicts) feedId : string The URL of the source feed feedName : string The clear text name of the source Returns ------- entriesList : dict The enhanced entriesList """ for entry in entriesList: entry["source"]=feedId entry["feed_name"]=feedName return entriesList
db4bf6a82ca4fe41ee0797d361b962126836a7b8
700,361
def tf_out(): """Static equilibrium results from threebar funicular.""" output = {} output["xyz"] = {0: [0.29289321881345254, -0.7071067811865475, 0.0], 1: [1.0, 0.0, 0.0], 2: [2.5, 0.0, 0.0], 3: [3.207106, -0.7071067, 0.0]} output["force"] = {(0, 1): -1.414213561, (1, 2): -1.0, (2, 3): -1.41421356} output["length"] = {(0, 1): 1.0, (1, 2): 1.5, (2, 3): 1.0} output["residual"] = {0: [1.0, 1.0, -0.0], 3: [-1.0, 1.0, 0.0]} return output
1f60894de3cb14b147baa552e4485aed0544a2b0
700,363
from bs4 import BeautifulSoup def get_text_from_XML_without_saving(path): """ :param path: path to the XML file :return: Text extracted from the path """ tree = open(path, 'r', encoding='utf8') soup = BeautifulSoup(tree) for script in soup(["script", "style"]): script.extract() text = soup.get_text() lines = (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) text = '\n'.join(chunk for chunk in chunks if chunk) return text
f6fd435ae75ca6cc7743e8d0b7da6afca24b0719
700,364
import os def find(dir_name :str) -> list: """ List regular files in a stored in given directory or one of its subdirectories. Args: dir_name: A String corresponding to an existing directory. Returns: A list of String, each of them corresponding to a file. """ filenames = list() files = os.listdir(dir_name) for base_name in files: cur_path = os.path.join(dir_name, base_name) if os.path.isdir(cur_path): filenames += find(cur_path) else: filenames.append(cur_path) return filenames
369b9997bdf36949972140a5c7a54fac72c3d25d
700,365
def pysiphash(uint64): """Convert SipHash24 output to Py_hash_t """ assert 0 <= uint64 < (1 << 64) # simple unsigned to signed int64 if uint64 > (1 << 63) - 1: int64 = uint64 - (1 << 64) else: int64 = uint64 # mangle uint64 to uint32 uint32 = (uint64 ^ uint64 >> 32) & 0xffffffff # simple unsigned to signed int32 if uint32 > (1 << 31) - 1: int32 = uint32 - (1 << 32) else: int32 = uint32 return int32, int64
a0a4bb7703aef9a95146c519aa1ace751bcf532e
700,366
import random import string def get_unique_key(str_len: int) -> str: """Returns a random string of length str_len. Args: str_len (int): Length of string. Returns: str: Random string """ return "".join([random.choice(string.ascii_lowercase) for _ in range(str_len)])
de8a7743ac5b802e3fe5687e4305ec14e26987a8
700,367
from typing import OrderedDict def sort_by_key(value, reverse=False): """ sorts a dictionary by its keys. :param dict value: dict to be sorted. :param bool reverse: sort by descending order. defaults to False if not provided. :rtype: OrderedDict """ result = sorted(value.items(), key=lambda x: x[0], reverse=reverse) return OrderedDict(result)
df200eaf2810e04281e8f8379ded09f21ed6df74
700,369
def transformation(job_title): """ Transform a job title in a unisex job title Here are examples of main transformations : - Chauffeur / Chauffeuse de machines agricoles --->>> Chauffeur de machines agricoles - Débardeur / Débardeuse --->>> Débardeur - Arboriste grimpeur / grimpeuse --->>> Arboriste grimpeur - Élagueur-botteur / Élagueuse-botteuse --->>> Élagueur-botteur - Débardeur forestier / Débardeuse forestière --->>> Débardeur forestier - Peintre-fileur-décorateur / Peintre-fileuse-décoratrice en céramique --->>> Peintre-fileur-décorateur en céramique - Ingénieur / Ingénieure halieute --->>> Ingénieur halieute - Conducteur livreur installateur / Conductrice livreuse installatrice --->>> Conducteur livreur installateur - Accessoiriste - Accueillant familial / Accueillante familiale auprès d'adultes Arguments : job title Return : modified job title """ try: left, right = map(str.strip, job_title.split('/')) start = left.count(' ') right = ' '.join(right.split()[start+1:]) return left + (' ' + right if right else '') except ValueError: return job_title
f8580e9a64070ba96cfd540a58327f904c311f9b
700,370
import random def coin_flip(): """Randomly return 'heads' or 'tails'.""" if random.randint(0,1) == 0: return "heads" else: return "tails"
c46afd1e6f6b899448501043400d1a7570aecabe
700,371
import requests def spotlight(text): """To implement the DBpedia Spotlight API: """ headers = { 'Accept': 'application/json', } #e.g. text = "What is a car?" "enitenziagite" #"President Obama" data = { "text":text , 'confidence': '0.35' } response = requests.post('http://api.dbpedia-spotlight.org/en/annotate', headers=headers, data=data) response_json = response.json() return response_json
dcf47994ae343cba9102d26e3cb86954e6f6c213
700,373
def detect_anomalies_cons(residuals, threshold, summary=True): """ Compares residuals to a constant threshold to identify anomalies. Can use set threshold level or threshold determined by set_cons_threshold function. Arguments: residuals: series of model residuals. threshold: constant threshold value. summary: if True, will print the ratio of detections. Returns: detected_anomaly: boolean series where True (1) = anomalous data point """ # DETERMINE ANOMALIES detected_anomaly = (residuals[0] < threshold['low']) | (threshold['high'] < residuals[0]) # gives bools # output summary if summary: print('ratio of detections: %f' % ((sum(detected_anomaly) / len(detected_anomaly)) * 100), '%') return detected_anomaly
4c37ca93ab8cbde85b57cb94b44ac1e234809be6
700,374
import redis def connect_redis(dsn): """ Return the redis connection :param dsn: The dsn url :return: Redis """ return redis.StrictRedis.from_url(url=dsn)
0b17418c36cd9a6eb5c0b4ea40a40762c1e41259
700,375
def _tag_depth(path, depth=None): """Add depth tag to path.""" # All paths must start at the root if not path or path[0] != '/': raise ValueError("Path must start with /!") if depth is None: depth = path.count('/') return "{}{}".format(depth, path).encode('utf-8')
6746ce97ac2569e2775cbdc13510263df0860deb
700,376
def calc_sl_price(contract_price, sl_percent): """Returns price that is sl_percent below the contract price""" return round(contract_price * (1 - sl_percent), 2)
487690208c81dcc708b526630929f1ba424b0c0f
700,377
import hashlib def verifyhash_password_hashlib(password: str, hash: str) -> bool: """check if password is correct using hashlib Args: password (str): user password hash (str): user password encrypted Returns: bool: True if correct password else False """ return hashlib.md5(password.encode('utf-8')).hexdigest() == hash
a3ca663abc33777df4f33d0483ba598cef8c5c3b
700,379
def riscale_coordination(coordinates, x, y, z): """ Robert: I do not understand why you would want this quantity. """ coordinatesCM = [] for i in range(len(coordinates)): coordinatesCM.append( [str(coordinates[i][0]), coordinates[i][1]-x, coordinates[i][2]-y, coordinates[i][3]-z]) return coordinatesCM
cee155c66b857f9bee6a2e6975f3797de40dfdcc
700,380
def get_bonds(input_group): """Utility function to get indices (in pairs) of the bonds.""" out_list = [] for i in range(len(input_group.bond_order_list)): out_list.append((input_group.bond_atom_list[i * 2], input_group.bond_atom_list[i * 2 + 1],)) return out_list
4f39d9d588a1d3e919fcd5e369cd72c6dbac3442
700,381
def read_data(file, delimiter="\n"): """Read the data from a file and return a list""" with open(file, 'rt', encoding="utf-8") as f: data = f.read() return data.split(delimiter)
e0854a2f7ac2190f3b296725b36da0bb4ad14ce3
700,383
def get_sources(dataframe): """ extract sources :param pandas.core.frame.DataFrame dataframe: :rtype: set :return: set of archive.org links """ sources = set() for index, row in dataframe.iterrows(): sources.update(row['incident_sources'].keys()) return sources
468c0cf6428833c9b05c06415917a516471189a5
700,385
from datetime import datetime def set_mediafile_attrs(mediafile, ufile, data, user): """ Copy metadata from uploaded file into Model """ mediafile.name = ufile.name mediafile.original_filename = ufile.name mediafile.filesize = ufile.size mediafile.original_path = data['pathinfo0'] # Date format from jupload is "dd/MM/yyyy HH:mm:ss" filedate = datetime.strptime(data['filemodificationdate0'], "%d/%m/%Y %H:%M:%S") mediafile.original_filedate = filedate mediafile.md5sum = data['md5sum0'] mediafile.mime_type = data['mimetype0'] mediafile.uploaded_by = user return mediafile
e7c373dadb3cd0087184fc725fb749e1a13e0b57
700,386
import ipaddress def is_valid_ipv6_address(ip): """Return True if valid ipv6 address """ try: ipaddress.IPv6Address(ip) return True except ipaddress.AddressValueError: return False
33f4785e768f5117c6fe43c320e2290791fc86a5
700,387
import argparse def parse_args(): """Parses arguments.""" parser = argparse.ArgumentParser( description='Train semantic boundary with given latent codes and ' 'attribute scores.') parser.add_argument('-o', '--output_dir', type=str, required=True, help='Directory to save the output results. (required)') parser.add_argument('-c', '--latent_codes_path', type=str, required=True, help='Path to the input latent codes. (required)') parser.add_argument('-s', '--scores_path', type=str, required=True, help='Path to the input attribute scores. (required)') parser.add_argument('-n', '--chosen_num_or_ratio', type=float, default=0.5, help='How many samples to choose for training. ' '(default: 0.2)') parser.add_argument('-r', '--split_ratio', type=float, default=0.7, help='Ratio with which to split training and validation ' 'sets. (default: 0.7)') parser.add_argument('-V', '--invalid_value', type=float, default=None, help='Sample whose attribute score is equal to this ' 'field will be ignored. (default: None)') return parser.parse_args()
c5315ef2e68214403c4168074108401318b4e22e
700,388
def get_list_view_name(model): """ Return list view name for model. """ return '{}-list'.format( model._meta.object_name.lower() )
765f4b2456d319a6cc5657dbe1e04d3eab471b42
700,389
import copy def render_plugins(plugins, context, placeholder, processors=None): """ Renders a collection of plugins with the given context, using the appropriate processors for a given placeholder name, and returns a list containing a "rendered content" string for each plugin. This is the main plugin rendering utility function, use this function rather than Plugin.render_plugin(). """ c = [] total = len(plugins) for index, plugin in enumerate(plugins): plugin._render_meta.total = total plugin._render_meta.index = index c.append(plugin.render_plugin(copy.copy(context), placeholder, processors=processors)) return c
2217033cea70a0c88dd6ab378cfc60f71ccfaa4f
700,390