content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def Pi_VH(phi, cond_GT): """ Osmotic pressure using Van-Hoff linear approximation """ rho = phi/cond_GT['Va'] kBT = cond_GT['kT'] return rho * kBT * 1.0
6dd35f8da091a5598633954ead3cfea7104635c0
696,303
def subtract_vect(a, b): """ subtract vector b from vector a Deprecated, use mpmath instead!!! :param a: [float, float, float] :param b: [float, float, float] >>> subtract_vect([1, 2, 3], [3, 2, 2]) (-2, 0, 1) """ return (a[0] - b[0], a[1] - b[1], a[2] - b[2])
3465a670158a0ae34879a7d21599a9b098733f4d
696,304
def _join_matchers(matcher, *matchers): """Joins matchers of lists correctly.""" output = {k: [e for e in v] for k, v in matcher.items()} for matcher_to_join in matchers: has_default = False for key, value in matcher_to_join.items(): if key == "default": # Default has a special case. has_default = True if key in output: output[key].extend(value) else: # Important to add the "default" value, if it exists, as the new # key was a "default", but is not anymore. output[key] = value if "default" in output and key != "default": value.extend(output["default"]) if has_default: for key in output.keys(): # All keys from output that are not in matcher_to_join were part # of "default" in matcher_to_join, so they need to be added the # default. if key not in matcher_to_join: output[key].extend(matcher_to_join["default"]) return output
226419a14f7609b5ba8aa8010d41b029bca5104e
696,305
import re def generate_root_filename(rawname, add="_mass"): """Generate the appropriate root filename based on a file's LJH name. Takes /path/to/data_chan33.ljh --> /path/to/data_mass.root """ fparts = re.split(r"_chan\d+", rawname) prefix_path = fparts[0] return prefix_path + add + ".root"
412056f37ffea1835b2f63b346ff4168d8391d2f
696,306
def skills_detector(text, skills=None): """ search skills in text """ if skills == None: skills = { 'data analyst','sql', 'аналитик', 'разработчик', 'pandas', 'numpy', 'scipy', 'excel', 'matplotlib', 'seaborn', 'statistics','tableau','sas','power bi','powerbi', 'ltv', 'cac', 'retention', 'data engineer', 'Kubernetes','asyncio', 'flask', 'django', 'api', 'linux', 'bash', 'mongodb', 'sqlalchemy','fastapi', 'kafka', 'etl', 'spark','python','data warehousing', 'pig','hive','hadoop','hbase','elastic', 'jenkins', 'git', 'docker', 'airflow', 'scala', 'java','lucene', 'data science', 'random forest', 'logistic regression' 'machine learning', 'scikit', 'computer vision','data mining','matlab', 'cnn', 'rnn', 'statistics','linear algebra', 'keras','tensorflow','pytorch','torch','bert','theano', 'deep learning','image processing', 'digital signal processing','opencv','uplift', 'lgd', 'catboost', 'xgboost', 'scikit', 'lightgbm', 'computer vision', 'ocr', 'tensorrt', 'openvino', 'object detection', 'cnn', 'rnn', 'unet', 'u-net', 'vgg', 'resnet','pytorch','bert', 'nltk', 'gensim','image processing','opencv' } detected_skills = [] text = text.lower() for skill in skills: if skill in text: detected_skills.append(skill) return detected_skills
54aa08dfb7c86fca9fb1b1b3e6a2b5fdfc7745b2
696,307
import pprint def lookup(unit_str): """look up the input keyword in the dictionary and return the standard synonym""" unit_dict = {"T": ["T", "T_b", "T_cmb", "T_CMB", "K", "K_CMB"], "T_RJ": ["T_rj", "T_RJ", "s_nu", "K_RJ", "K_rj"], "I": ["I", "I_nu", "MJy/sr"] } try: unit_synonym = [key for key, value in unit_dict.items() if unit_str in value][0] except IndexError: pprint.pprint("Use a valid keyword from:") pprint.pprint(unit_dict) raise return unit_synonym
6b3d69cad8c38ea0cd28e311f262795a962dc7c0
696,308
def gnn_forward(asts, model, num_passes=1, test_acc=None): """ Forward pass for Graph Neural Network, set up so that pytorch fold can be used for dynamic batching """ # reset for ast, acc in asts: ast.reset(model) ast.annotate() # first upward pass for ast, acc in asts: # the different conj in the disj for leaf in acc['constraints']: # constraint in conj leaf.up_first(model) for passes in range(num_passes): # downward for ast, acc in asts: # the different conj in the disj for leaf in acc['constraints']: # constraint in conj leaf.down(model) # update logic variables for lvar in acc['lvar'].values(): lvar.actually_update(model) for leaf in acc['constraints']: # constraint in conj leaf.up(model) # read out the logit out = [] for ast, acc in asts: # conj leaf_logit = ast.logit(model) out.append(leaf_logit) out = model.get_cat(len(out), *out) return out
9cf59671b46861a65efdc23c3efa3740bb33e18b
696,309
def get_top10(recommendations_list): """ Returns the first 10 elements of a list""" return recommendations_list[:10]
32be743fda0a8eb3932416ef487993ec686d7bc8
696,310
import os def my_dirname(filename):#{{{ """ A wrapper of the function: dirname """ d = os.path.dirname(filename) if d == "": d = "." return d
db83d8dfc627a166cea26723642e9ccdfa4ec516
696,311
def roi_to_matlab(rois): """ ROIs to MATLAB. Adds one to index -------------------------- :param rois: All ROIs in experiment :return: New ROIs """ for roi in rois: roi.index += 1 return rois
5a6fe428eaea89498f62e46469c925c51e56e2b3
696,312
def read_xsc_step_number(xsc_filename): """ Read a NAMD .xsc file to extract the latest step to use as input for restarts and such. """ last_step = 0 with open(xsc_filename, "r") as f: for line in f.readlines(): if line.startswith("#"): continue last_step = int(line.strip().split(" ")[0]) return last_step
115db280e4b7cf43e9b781b4f8855e0af8792b06
696,313
import time def handle_play_like_call(func): """ This method is used internally to wrap the passed function, into a function that actually writes to the video stream. Simultaneously, it also adds to the number of animations played. Parameters ---------- func : function The play() like function that has to be written to the video file stream. Returns ------- function The play() like function that can now write to the video file stream. """ # NOTE : This is only kept for OpenGL renderer. # The play logic of the cairo renderer as been refactored and does not need this function anymore. # When OpenGL renderer will have a proper testing system, # the play logic of the latter has to be refactored in the same way the cairo renderer has been, and thus this # method has to be deleted. def wrapper(self, scene, *args, **kwargs): self.animation_start_time = time.time() self.file_writer.begin_animation(not self.skip_animations) func(self, scene, *args, **kwargs) self.file_writer.end_animation(not self.skip_animations) self.num_plays += 1 return wrapper
e86034bffea9ca1cd3045511b20c6f30f134746e
696,314
def read_file(filename): """Read a file and return lists Parameters: filename - a text file to read Returns: list: lines """ with open(filename) as file: lines = file.read().splitlines() return lines
f16fa3e1923a2687f72438fa4223da0c82a0043e
696,315
def has_lower(s: str) -> bool: """ Returns True if the string consists of one or more lower case characters, True otherwise. """ if isinstance(s, str): return len(s) > 0 and not s.isupper() raise TypeError("invalid input - not a string")
17c442bf2339a7d39017ffbb65db00bdbd7d9e52
696,316
def _mock_authenticate_user(_, client=None): """Mock Pycognito authenticate user method. This code is from Pycognito's test suite.""" return { "AuthenticationResult": { "TokenType": "admin", "IdToken": "dummy_token", "AccessToken": "dummy_token", "RefreshToken": "dummy_token", } }
dccbdf5138eea63c543a824de3c003efb5af6210
696,317
def iter_first(sequence): """Get the first element from an iterable or raise a ValueError if the iterator generates no values. """ it = iter(sequence) try: return next(it) except StopIteration: raise ValueError()
007648dcbc903572ca33221c5884febc7e78d956
696,318
def compareRule(origFileName): """ Function that applies a rule to a file name to be comparable to other file names. Basically it extracts the file name part to compare with others. Example: tif files that only differ in one character at the end, like 038_FJB_1904-001a.tif and 038_FJB_1904-001b.tif """ compareFileName = origFileName.decode('utf-8').rstrip('.tif')[:-1] return compareFileName
5e7bdad032d5475eb988637210f951bb97bdc0f7
696,319
import math def multinomLog2(selectors): """ Function calculates logarithm 2 of a kind of multinom. selectors: list of integers """ ln2 = 0.69314718055994528622 noAll = sum(selectors) lgNf = math.lgamma(noAll + 1.0) / ln2 # log2(N!) lgnFac = [] for selector in selectors: if selector == 0 or selector == 1: lgnFac.append(0.0) elif selector == 2: lgnFac.append(1.0) elif selector == noAll: lgnFac.append(lgNf) else: lgnFac.append(math.lgamma(selector + 1.0) / ln2) return lgNf - sum(lgnFac)
684df63a5c371a6cb524a05643cf695d938df7f5
696,320
def plural(word, items): """Returns "N words" or "1 word".""" count = len(items) if isinstance(items, (dict, list, set, tuple)) else items return "%s %s%s" % (count, word, "s" if count != 1 else "")
a40493ff2cf09dc5e033962037b544f02d9f4666
696,321
def nillable_string(func): """Decorator that retuns None if input is None.""" def wrapper(cls, string): if string is None: return None else: return func(cls, string) return wrapper
e4dc2fda61334e6ed1368dfca431bdc5b8479e6c
696,322
import re def get_master_names(desired_master_state, name_regex): """Returns masters found in <desired_master_state> that match <name_regex>. Args: desired_master_state: A "desired_master_state" object, e.g. as returned by desired_state_parser Returns: [str1, str2, ...] All masters found in <desired_master_state> """ # Modify regex to allow for optional "master." prefix name_regex = r'(master\.)?' + name_regex master_matcher = re.compile(name_regex) return [m for m in desired_master_state["master_states"].keys() if master_matcher.match(m)]
9343964103d1e93ff0d6de7d019c1fd206e84d3b
696,323
def merge_list_entries(list_to_merge): """Merge overlapping tuples in a list. This function takes a list of tuples containing exactly two numbers (as floats) with the smaller number first. It sorts them by lower bound, and then compares them to see if any overlap. Ultimately it returns a list of tuples containing the union of any tuples that overlap in range. Parameters ---------- list_to_merge : list A list of tuples of floats, denoting regions on the number line. Returns ------- list A list containing all the overlapping regions found in the input list. """ merged = [] sorted_by_lower_bound = sorted(list_to_merge, key=lambda tup: tup[0]) for higher in sorted_by_lower_bound: if not merged: merged.append(higher) else: lower = merged[-1] if higher[0] <= lower[1]: upper_bound = max(lower[1], higher[1]) merged[-1] = (lower[0], upper_bound) else: merged.append(higher) return merged
e507a855e7b6dc0330ac21dfe159a793d6e5cd8c
696,324
def get_organic_aerosols_keys(chem_opt): """ Return the anthropogenic and biogenic keys """ asoa_keys = None bsoa_keys = None if chem_opt == 106: asoa_keys = ('orgaro1i', 'orgaro1j', 'orgaro2i', 'orgaro2j', 'orgalk1i', 'orgalk1j', 'orgole1i', 'orgole1j') # SOA Anth bsoa_keys = ('orgba4i', 'orgba4j', 'orgba3i', 'orgba3j', 'orgba2i', 'orgba2j', 'orgba1i', 'orgba1j') # SOA Biog elif chem_opt == 108 or chem_opt == 100: asoa_keys = 'asoa1j,asoa1i,asoa2j,asoa2i,asoa3j,asoa3i,asoa4j,asoa4i'.split(',') # SOA Anth bsoa_keys = 'bsoa1j,bsoa1i,bsoa2j,bsoa2i,bsoa3j,bsoa3i,bsoa4j,bsoa4i'.split(',') # SOA Biog else: print('PP: this chem_opt {} is not implemented, dont know how to combine organics') return asoa_keys, bsoa_keys
68b342adde5c0dd1de9e81de12de99c0cab40d0b
696,325
def mean_wikipedia_frequency(frequency_cache, lemmatizer, tokens): """ Retrieves frequency for a list of tokens and returns mean frequency. :param frequency_cache: a frequencey lookup table :param lemmatizer: a lemmatizer :param tokens: a sequence of tokens (strings) """ freq_sum = 0 for token in tokens: lemma = lemmatizer.lemmatize(token) freq_sum = frequency_cache.get(lemma, 1) return freq_sum / len(tokens)
d92334cd99127ee60a323db39b71970ad4b1c1f2
696,326
def benefits(income, n_children, params): """Calculate benefits according to income, number of children and params. Args: income (pd.Series) n_children (pd.Series): Same length as income. params (pd.series): Must contain "benefit_per_child" and "benefit_cutoff" Returns: pd.Series: The benefits. """ raw_benefits = n_children * params.benefit_per_child benefits = raw_benefits.where(income <= params.benefit_cutoff, 0) return benefits
beb6f3f3a695ee4ae2b76ce7058906ca14ccebeb
696,327
def collect_first_sep(_, nodes): """ Used for: Elements = Elements "," Element; """ e1, _, e2 = nodes if e2 is not None: e1 = list(e1) e1.append(e2) return e1
378dc75f20d0e5a03c2c34c1fd02feea651e5fb7
696,328
def camelcase_to_underscores(argument): """Converts a camelcase param like theNewAttribute to the equivalent python underscore variable like the_new_attribute""" result = "" prev_char_title = True if not argument: return argument for index, char in enumerate(argument): try: next_char_title = argument[index + 1].istitle() except IndexError: next_char_title = True upper_to_lower = char.istitle() and not next_char_title lower_to_upper = char.istitle() and not prev_char_title if index and (upper_to_lower or lower_to_upper): # Only add underscore if char is capital, not first letter, and next # char is not capital result += "_" prev_char_title = char.istitle() if not char.isspace(): # Only add non-whitespace result += char.lower() return result
d50d77cf0952c06f1d2ea003d4e6b2e534ef84f7
696,329
import os import logging def _find_h5_data(filename): """ Because we have legacy data and new data re-processed for QuickNXS, we have to ensure that we get the proper data file. """ if filename.endswith('.nxs'): _new_filename = filename.replace('_histo.nxs', '.nxs.h5') _new_filename = _new_filename.replace('_event.nxs', '.nxs.h5') _new_filename = _new_filename.replace('data', 'nexus') if os.path.isfile(_new_filename): logging.warning("Using %s" % _new_filename) return _new_filename return filename
44743a00eed95a33372c9122b4a20d9507da2e2a
696,330
def pfreduce(func, iterable, initial=None): """A pointfree reduce / left fold function: Applies a function of two arguments cumulatively to the items supplied by the given iterable, so as to reduce the iterable to a single value. If an initial value is supplied, it is placed before the items from the iterable in the calculation, and serves as the default when the iterable is empty. :param func: A function of two arguments :param iterable: An iterable yielding input for the function :param initial: An optional initial input for the function :rtype: Single value Example:: >>> from operator import add >>> sum_of_squares = pfreduce(add, initial=0) * pfmap(lambda n: n**2) >>> sum_of_squares([3, 4, 5, 6]) 86 """ iterator = iter(iterable) try: first_item = next(iterator) if initial: value = func(initial, first_item) else: value = first_item except StopIteration: return initial for item in iterator: value = func(value, item) return value
621b48d894c2c510a713f6948e623c791cd429f5
696,331
def get_volumes(volumes, **kwargs): """ Returns a list of volumes Arguments: :param volumes: a list of volumes that needs to be filtered. Keyword arguments: :param vm_login_id: owning user's VM login ID. :param email: owning user's email address. :param group_id: owning group's group ID. :param budget_id: budget ID. :param size: minimum size of the volume. :returns: a list of filtered volumes. :rtype: list """ filtered_volumes = volumes if 'vm_login_id' in kwargs and kwargs['vm_login_id'] is not None: filtered_volumes = [volume for volume in volumes if hasattr(volume, 'owning_user') and 'vm_login_id' in volume.owning_user and volume.owning_user['vm_login_id'] == kwargs['vm_login_id']] if 'email' in kwargs and kwargs['email'] is not None: if filtered_volumes is not None: volumes = filtered_volumes filtered_volumes = [volume for volume in volumes if hasattr(volume, 'owning_user') and 'email' in volume.owning_user and volume.owning_user['email'] == kwargs['email']] if 'group_id' in kwargs and kwargs['group_id'] is not None: if filtered_volumes is not None: volumes = filtered_volumes filtered_volumes = [volume for volume in volumes if hasattr(volume, 'owning_groups') for group in volume.owning_groups if group['group_id'] == int(kwargs['group_id'])] if 'budget_id' in kwargs and kwargs['budget_id'] is not None: if filtered_volumes is not None: volumes = filtered_volumes filtered_volumes = [volume for volume in volumes if hasattr(volume, 'budget') and volume.budget == int(kwargs['budget_id'])] if 'size' in kwargs and kwargs['size'] is not None: if filtered_volumes is not None: volumes = filtered_volumes filtered_volumes = [volume for volume in volumes if volume.size_in_gb >= int(kwargs['size'])] return filtered_volumes
0321bac0b0f1da902c4e25567bf7555ab7e47fb3
696,332
import torch def micro_f1(prediction, labels, class_num): """micro_f1 for each classes""" _, predicted = torch.max(prediction, 1) tps = [] fps = [] fns = [] for i in range(class_num): pred = (predicted == i).squeeze() gt = (labels == i).squeeze() tp = torch.sum((pred & gt).squeeze()).item() fp = torch.sum(pred).item() - tp fn = torch.sum(gt).item() - tp tps.append(tp) fps.append(fp) fns.append(fn) mic_presision = sum(tps) / (sum(tps) + sum(fps)) mic_recall = sum(tps) / (sum(tps) + sum(fns)) micro_f1 = 2 * (mic_presision * mic_recall) / (mic_presision + mic_recall) return micro_f1
65d2c98c7a268965d791e4ab9158945e75f35da9
696,333
from typing import Counter def get_most_common(exercises, n=3): """ Get n most common sports """ exes = [e.sport for e in exercises] cnt = Counter() for e in exes: cnt[e] +=1 commons = cnt.most_common(n) commons_array = [co[0] for co in commons] return commons_array
204684d2d284cc902b5e64b55757d486af71a8ad
696,335
def get_substrings(text, substrings=[], index=0, next_sub=""): """Returns list of all unique substrings (letters only) in text.""" # Base case: return all substrings if index == len(text): print(substrings) return substrings else: # index < len(text) next_char = text[index] # add on to the next substring being added if next_char not in next_sub: next_sub += next_char # repeat letter found else: next_sub = "" # move on in traversing text index += 1 substrings.append(next_sub) return get_substrings(text, substrings, index, next_sub)
48882b7c0b64a62d8e95889c00963b731f73cc9e
696,336
def colorize(shape, fill, stroke, strokeWidth): """Change the color of the input shape.""" if shape is None: return None new_shape = shape.clone() new_shape.fillColor = fill if strokeWidth > 0: new_shape.strokeColor = stroke new_shape.strokeWidth = strokeWidth else: new_shape.strokeColor = None return new_shape
1c5344e9c7f8ca3e623fcf326be44207283591f4
696,338
import importlib def _decoder_object_hook(data): """Helper function called by JSON decoder to reverse `_encoder_default`. This function is typically supplied as `object_hook` argument to JSON decoder in order to reconstruct the object instance of customized classes. More details are outlined in module-level docstring. """ try: module = importlib.import_module(data["__module__"]) cls = getattr(module, data["__class__"]) s_args = data.get("__args__", ()) s_kwargs = data.get("__kwargs__", {}) s_dict = data.get("__dict__", {}) except: return data else: o = cls(*s_args, **s_kwargs) o.__dict__.update(s_dict) return o
217c48e63e7cfadb2221d1c42ed2f6b1e4a4b675
696,339
def __validateInputForGrid(request, isConcernAlternativeResponseGrid): """ This function is used to validate the input that will be used to create a grid for the user. Arguments: isConcernAlternativeResponseGrid: boolean request: HttpRequest information: this argument is needed as the concerns, alternatives, ratings and weights will be contained in the HttpRequest object. Return: Type: Tulip Information: The tulip contains 5 positions. Position zero is where the number of concerns is located, position one contains the number of alternatives, position two contains the array of tulips containg the concern data, position three contains the array of altenative names and position four contains the array of values. The array of tulips used to store concern data had 3 positions. Position zero contains the left pole name of the concern, position one contains the name of the right pole and position two contains the weight of the concern. """ concernValues = [] # this will contain a tuple with 3 values, (leftPole, rightPole, weight) alternativeValues = [] ratioValues = [] usedConcernNames = [] i = 0 j = 0 nAlternatives = int(request.POST['nAlternatives']) nConcerns = int(request.POST['nConcerns']) # check if the keys with the alternatives are present while i < nAlternatives: keyName = 'alternative_' + str((i + 1)) + '_name' if not request.POST.has_key(keyName): raise KeyError('Invalid request, request is missing argument(s)', 'Error key not found: ' + keyName) else: # alternative names should be unique in a grid, so lets check for that temp = request.POST[keyName].strip() if temp != '': if not temp in alternativeValues: alternativeValues.append(temp) else: raise ValueError("The name " + request.POST[keyName] + " is being used more than one time") # return HttpResponse(createXmlErrorResponse("The name " + request.POST[keyName] + " is being used more than one time")) else: raise ValueError("No empty values are allowed for alternatives") # return HttpResponse(createXmlErrorResponse("No empty values are allowed for alternatives"), content_type='application/xml') i += 1 i = 0 # check if all the keys for the left and right pole are present while i < nConcerns: leftPole = None rightPole = None # check the left pole first keyName = 'concern_' + str((i + 1)) + '_left' if not request.POST.has_key(keyName): raise KeyError('Invalid request, request is missing argument(s)', 'Error key not found: ' + keyName) # return HttpResponse(createXmlErrorResponse("Invalid request, request is missing argument(s)"), content_type='application/xml') else: # the right and left pole can be None so convert the empty string into None leftPole = request.POST[keyName] if leftPole == '': leftPole = None # the names of the left and right pole should be unique in a grid, so lets check for that. If the left pole is none, allow it to be saved if not leftPole in usedConcernNames or leftPole == None: usedConcernNames.append(leftPole) else: raise ValueError("The name " + request.POST[keyName] + " is being used more than one time") # return HttpResponse(createXmlErrorResponse("The name " + request.POST[keyName] + " is being used more than one time"), content_type='application/xml') # check the right pole keyName = 'concern_' + str((i + 1)) + '_right' if not request.POST.has_key(keyName): raise KeyError('Invalid request, request is missing argument(s)', 'Error key not found: ' + keyName) # return HttpResponse(createXmlErrorResponse("Invalid request, request is missing argument(s)"), content_type='application/xml') else: # the right and left pole can be None so convert the empty string into None rightPole = request.POST[keyName].strip() if rightPole == '': rightPole = None # the names of the left and right pole should be unique in a grid, so lets check for that. If the right pole is none, allow it to be saved if not rightPole in usedConcernNames or rightPole == None: usedConcernNames.append(rightPole) else: raise ValueError("The name " + request.POST[keyName] + " is being used more than one time") # if it is a response grid of the alternative.concern we don't need to check for the weights as they will not be there if not isConcernAlternativeResponseGrid: # lets check if the weight key is present keyName = 'weight_concern' + str((i + 1)) if not request.POST.has_key(keyName): raise KeyError('Invalid request, request is missing argument(s)', 'Error key not found: ' + keyName) else: # allowed values for the values are None, '', ' ' and numbers keyValue = request.POST[keyName] if not (keyValue == None or keyValue == ' ' or keyValue == ''): try: value = float(keyValue) concernValues.append((leftPole, rightPole, value)) except: raise ValueError("Invalid input " + keyValue) # return HttpResponse(createXmlErrorResponse("Invalid input " + keyValue), content_type='application/xml') else: raise KeyError('Invalid request, request is missing argument(s)', 'Error key not found: ' + keyName) else: concernValues.append((leftPole, rightPole, None)) i += 1 i = 0 # we are going to check the ratios now, because the response grid for the alternative/concern doesn't have ratios we don't need to check for them if not isConcernAlternativeResponseGrid: i = 0 j = 0 hasEmptyConcern = False; while i < nConcerns: ratios = [] # it is not allowed to have rations in an concern that has no leftPole or rightPole if concernValues[i][0] is not None and concernValues[i][1] is not None: hasEmptyConcern = False else: hasEmptyConcern = True while j < nAlternatives: keyName = 'ratio_concer' + str((i + 1)) + '_alternative' + str((j + 1)) if not request.POST.has_key(keyName): raise KeyError('Invalid request, request is missing argument(s)', 'Error key not found: ' + keyName) else: keyValue = request.POST[keyName].strip() # valid values for the they are None, ' ', '' and numbers, anything else is not allowed if not (keyValue == None or keyValue == ''): if hasEmptyConcern: raise ValueError('It is not allowed to have ratings while the concern is empty') # return HttpResponse(createXmlErrorResponse('It is not allowed to have ratings while the concern is empty'), content_type='application/xml') else: try: value = float(keyValue) ratios.append(value) except: raise ValueError("Invalid value: " + keyValue) # return HttpResponse(createXmlErrorResponse("Invalid value: " + keyValue), content_type='application/xml') else: raise KeyError('Invalid request, request is missing argument(s)', 'Error rating not found: ' + keyName) j += 1 ratioValues.append(ratios) j = 0 i += 1 return nConcerns, nAlternatives, concernValues, alternativeValues, ratioValues
c0831b53e5f6fb02e967dbf1f11561a40aed4bfe
696,340
def available_colors(G, vertex, number_of_colors): """Returns all the available colors for vertex Parameters: G: a networkx graph with Graph Nodes vertex: the vertex number (int) number_of_colors: the number of colors (int) Returns: colors: list of available colors (list) """ colors = [x for x in range(0, number_of_colors)] for neighbor in G.neighbors(vertex): try: index = colors.index(G.nodes[neighbor]['node'].color) colors.pop(index) except Exception: pass return colors
b19dfe9516eb7a74d259a3d69b868e78fe56d3e9
696,341
def parseTextFile(file_name, delimiter=",", header=0): """ Parse a text file to a list. The file contents are delimited and have a header. :param file_name: The path to the file :type file_name: str :param delimiter: The delimiter to use to parse the file :type delimiter: str :param header: The number of lines at the top of the file to ignore :type header: int :return: Text file parsed into a list :rtype: list """ with open(file_name) as f: # Skip the header for i in range(header): next(f) data = [] # Parse file contents for line in f: # Remove the newline char line = line.replace("\n", "").replace("\r", "") # Split the line by the delimiter line = line.split(delimiter) # Strip whitespaces from individual entries in the line for i, entry in enumerate(line): line[i] = entry.strip() # Add the contents of the line to the data list data.append(line) return data
fe711396e13f2dd6a7bb688b570f59d3a23a850a
696,342
def _cast_types(args): """ This method performs casting to all types of inputs passed via cmd. :param args: argparse.ArgumentParser object. :return: argparse.ArgumentParser object. """ args.x_val = None if args.x_val == 'None' else int(args.x_val) args.test_size = float(args.test_size) args.C = float(args.C) # kernel args.degree = int(args.degree) # gamma args.coef0 = float(args.coef0) args.shrinking = (args.shrinking in ['True', "True", 'true', "true"]) args.probability = (args.probability in ['True', "True", 'true', "true"]) args.tol = float(args.tol) args.cache_size = float(args.cache_size) # class_weight if args.class_weight == "None" or args.class_weight == 'None': args.class_weight = None args.verbose = (args.verbose in ['True', "True", 'true', "true"]) args.max_iter = int(args.max_iter) # decision_function_shape args.random_state = None if args.random_state == 'None' else int(args.random_state) return args
92518cb5abb951c9d26ac3222ad047cf02e411b1
696,343
def is_m_to_n_pandigital(num, bound_m, bound_n): """ Determine if a number is m-to-n pandigital. """ digit_count = dict() list_form = list(str(num)) for _digit in list_form: # return early if any digit shows up more than once if _digit in digit_count.keys(): return False digit_count[_digit] = 1 target_count = dict() for _d in range(bound_m, bound_n + 1): target_count[str(_d)] = 1 # compare two sets if digit_count == target_count: return True return False
ab0fb7b1e8369ea7118408dac108c87d17b07eef
696,345
def findpeak(x,thresh,diff=10,bundle=20) : """ Find peaks in vector x above input threshold attempts to associate an index with each depending on spacing """ j=[] fiber=[] f=0 for i in range(len(x)) : if i>0 and i < len(x)-1 and x[i]>x[i-1] and x[i]>x[i+1] and x[i]>thresh : j.append(i) fiber.append(f) #print(i,f) if len(j)>1 and j[-1]-j[-2] > diff and f%bundle != 0: print(j[-1],j[-2],f) f=f+1 f=f+1 return j,fiber
57a6ffdbb32f2cb2e7c615512c7cc1056e810969
696,346
def log_user(func): """Simple decorator for a function with one argument """ def wrap(user): """Wrapper function that logs the user that is logged in """ print("## User - {0} ##".format(user)) return func(user) return wrap
bb224e5b8eb5fbd1d3a6aec55a1a097a200dfeac
696,347
def hours_mins_2_mins(time): """ Converts a time consisting of hours & minutes to minutes Parameters ------------ time : str Time, in hours & minutes, to be converted to minutes Returns ------------ mins_tot : int Time converted from hours:minutes to minutes """ if (type(time) != str): print("Error: Time must be of type str") return -1 else: hrs = int(time[:2]) mins = int(time[2:]) mins_tot = (hrs * 60) + mins return mins_tot
acbbbdea7617f2db5390e01436127bb8c423c634
696,348
import os import subprocess import json def kaldi_ali(task_id, audio_path): """ 使用kaldi完成单词级别打点 :param task_id: :param audio_path: :return: """ print(task_id) os.chdir('/root/tools/kaldi/egs/aidatatang_200zh/s5') cmd = "python align/run.py chinese {} ".format(audio_path) status, ret = subprocess.getstatusoutput(cmd) print(status) ret = ret.split('\n')[-1] ret = json.loads(ret) word_ali = ret['ctm'] return word_ali
e68e3c6a52af33f1b34290446862243020fa88b5
696,349
def next_multiple(x: int, k: int = 512) -> int: """Calculate x's closest higher multiple of base k.""" if x % k: x = x + (k - x % k) return x
fbf8cf548851d0c57867292f9ddcfc33de9b03c0
696,350
def convert_to_DNA(sequence): """ Converts RNA to DNA """ sequence = str(sequence) sequence = sequence.upper() return sequence.replace('U', 'T')
2a69a3102df8f5a16b2b049fb1d80dae720b10e3
696,351
def fit(model, data_bunch, **kwargs): """ Fits an H2O Model :param model: An H2O Model :param data_bunch: A DataBunch with "train" and "valid" datasets that consist of H2ODataWrapper or H2OSparklingDataWrapper :param kwargs: Keyword arguments to be passed to model.train :return: A (fitted) H2O Model """ train_df = data_bunch.train.full_data.underlying valid_df = data_bunch.valid.full_data.underlying features_list = data_bunch.train.features.field_names targets_list = data_bunch.valid.targets.field_names[0] model.train(x=features_list, y=targets_list, training_frame=train_df, validation_frame=valid_df, **kwargs ) return model
f9d0dd6835f145d00b7da6a99acca9ccc90653a8
696,352
def lib2vocab(libels, vocab, padding_idx=0, length=20): """ transforms a list of strings to a list of several integers following the vocab dict @param libels (lst of str): list of libelles str @param vocab (dict): output of vocabtoidx func """ libelsidx = [] for lib in libels: sized_lib = lib.split()[0:length] sized_lib += [padding_idx] * (length - len(sized_lib)) libelsidx.append([vocab[word] if word in vocab.keys() else padding_idx for word in sized_lib]) return libelsidx
c38f701f5d3a8675a8b7e46b2dfc76195be72825
696,353
import yaml def pretty_yaml(value, file_=None): """ Print an object to a YAML string :param value: object to dump :param file_: Open, writable file object :return: str (YAML) """ return yaml.dump(value, stream=file_, indent=2, allow_unicode=True, default_flow_style=False)
6c59ac3b34a0e4fdd8878074298787d30a8404ff
696,354
import functools import operator def get_dict_element(data, path, delimiter='.'): """ Traverse a dict using a 'delimiter' on a target string. getitem(a, b) returns the value of a at index b """ return functools.reduce(operator.getitem, path.split(delimiter), data)
2699c6c514f894a9d38e92de982efb9c27ddfa46
696,355
def ass_vali(text): """ 验证是否为合法ASS字幕内容 :param text: 文本内容 :return: True|False """ if "V4+ Styles" in text: return True else: return False
925744ed632e3ae1fa35f475682e2d34266ad544
696,356
def get_c_and_cpp_testcasesupport_dir(): """ Used to get the path to the C/C++ test case support directory """ return "..\\..\\testcasesupport"
ce8c4f48612a0cbcc33305f471f4e9f3de78c541
696,357
def solution(socks): """Return an integer representing the number of pairs of matching socks.""" # build a histogram of the socks sock_colors = {} for sock in socks: if sock not in sock_colors.keys(): sock_colors[sock] = 1 else: sock_colors[sock] += 1 # count the amount of pairs pairs = 0 for count in sock_colors.values(): pairs += count // 2 return pairs
1e64be91018ee633f2637ff768346f9bc6f39603
696,358
import click def validate_slashes( param, value, minimum=2, maximum=None, form=None, allow_blank=False ): """Ensure that parameter has slashes and minimum parts.""" try: value = value.split("/") except ValueError: value = None if value: if len(value) < minimum: value = None elif maximum and len(value) > maximum: value = None if not value: form = form or "/".join("VALUE" for _ in range(minimum)) raise click.BadParameter( "Must be in the form of %(form)s" % {"form": form}, param=param ) value = [v.strip() for v in value] if not allow_blank and not all(value): raise click.BadParameter("Individual values cannot be blank", param=param) return value
f3e317b2d497dfe01b5cb7fed3b522fe9733a666
696,359
def multi_valued(annotations): """Return set of multi-valued fields.""" return {name for name, tp in annotations.items() if getattr(tp, '__origin__', tp) is list}
09d2b61b96e3bc56758a27375c1e0616995d8554
696,360
import re def get_cxx_close_block_regex(semicolon=False, comment=None, at_line_start=False): ############################################################################### """ >>> bool(get_cxx_close_block_regex().match("}")) True >>> bool(get_cxx_close_block_regex(at_line_start=True).match("}")) True >>> bool(get_cxx_close_block_regex().match(" } ")) True >>> bool(get_cxx_close_block_regex(at_line_start=True).match(" }; ")) False >>> bool(get_cxx_close_block_regex(at_line_start=True).match(" } ")) False >>> bool(get_cxx_close_block_regex(comment="hi").match(" } ")) False >>> bool(get_cxx_close_block_regex(comment="hi").match(" } // hi")) True >>> bool(get_cxx_close_block_regex(comment="hi").match("} // hi ")) True >>> bool(get_cxx_close_block_regex(semicolon=True).match(" } ")) False >>> bool(get_cxx_close_block_regex(semicolon=True).match(" } ; ")) True >>> bool(get_cxx_close_block_regex(semicolon=True).match("};")) True >>> bool(get_cxx_close_block_regex(semicolon=True, comment="hi", at_line_start=True).match("}; // hi")) True >>> bool(get_cxx_close_block_regex(semicolon=True, comment="hi", at_line_start=True).match("}; // hi there")) False """ semicolon_regex_str = r"\s*;" if semicolon else "" line_start_regex_str = "" if at_line_start else r"\s*" comment_regex_str = r"\s*//\s*{}".format(comment) if comment else "" close_block_regex_str = re.compile(r"^{}}}{}{}\s*$".format(line_start_regex_str, semicolon_regex_str, comment_regex_str)) return re.compile(close_block_regex_str)
c62cd254aaa329358a33ba96fc8d23004c950d85
696,361
def urlpath(*parts): """ There is no real equivalent in stdlib """ return '/'.join(s.strip('/') for s in parts)
1900c6565364bec0e653f4a7026ab7eff061ff79
696,362
def init_make_parser(subparsers): """ make testcases: parse command line options and run commands. """ parser = subparsers.add_parser( "make", help="Convert YAML/JSON testcases to pytest cases.", ) parser.add_argument( "testcase_path", nargs="*", help="Specify YAML/JSON testcase file/folder path" ) return parser
64fd66ef63ef75121d3f001ddc956b77f6200b06
696,364
def get_forward_fill_targets(node): """ All columns with at least one missing value that do not have the first element missing """ # NOTE: This description is incorrect and in contrast with the slides. # Forward filling can certainly be done even if the first element is empty, # but this leads to erroneous results in the tree search, so we skip it. # Moreover, we add the restriction that it can't be done if *all* elements # are empty or if the last elements is the only non_empty one. if node.table.is_empty: return [] colset = [] for c in range(node.table.n_col): col = node.table.get_col(c) if all((c.is_empty for c in col)): continue if all((c.is_empty for c in col[:-1])) and not col[-1].is_empty: continue if col[0].is_empty: continue if any((c.is_empty for c in col)): colset.append(c) return colset
40248e41acf935cf151183ee4e80d45101f0a837
696,365
def encode_truncate(text, limit, encoding='utf8', return_encoded=True): """ Given a string, return a truncated version of the string such that the UTF8 encoding of the string is smaller than the given limit. This function correctly truncates even in the presence of Unicode code points that encode to multi-byte encodings which must not be truncated in the middle. :param text: The (Unicode) string to truncate. :type text: str :param limit: The number of bytes to limit the UTF8 encoding to. :type limit: int :param encoding: Truncate the string in this encoding (default is ``utf-8``). :type encoding: str :param return_encoded: If ``True``, return the string encoded into bytes according to the specified encoding, else return the string as a string. :type return_encoded: bool :returns: The truncated string. :rtype: str or bytes """ assert(text is None or type(text) == str) assert(type(limit) == int) assert(limit >= 0) if text is None: return # encode the given string in the specified encoding s = text.encode(encoding) # when the resulting byte string is longer than the given limit .. if len(s) > limit: # .. truncate, and s = s[:limit] # decode back, ignoring errors that result from truncation # in the middle of multi-byte encodings text = s.decode(encoding, 'ignore') if return_encoded: s = text.encode(encoding) if return_encoded: return s else: return text
e73bc332e16f932e609ff40299366e146947c430
696,366
def sort_function_weight(listmodel, iter1, iter2, data): """Sorts the weight column correctly, i.e. interprets the weight data as floats instead of strings.""" weight1 = float(listmodel.get_value(iter1, 2)) weight2 = float(listmodel.get_value(iter2, 2)) return int(100*(weight2 - weight1))
40e4a5f0f06603d301e795c6c33dea2406d75025
696,367
import itertools from typing import Counter def unigram_counts(sequences): """Return a dictionary keyed to each unique value in the input sequence list that counts the number of occurrences of the value in the sequences list. The sequences collection should be a 2-dimensional array. For example, if the tag NOUN appears 275558 times over all the input sequences, then you should return a dictionary such that your_unigram_counts[NOUN] == 275558. output format: out = {tag: occurance} """ sequences = itertools.chain.from_iterable(sequences) # Counting the occurance cnt = Counter() for x in sequences: cnt[x] += 1 #print("Counter: ", cnt, "\n") # Convering the Counter to list - (sequence, occurance) like ('NOUN', 1) word_count = list(cnt.items()) print("Word count: ", word_count, "\n") # Sum of the occurance (for normalization) sum_corpus = 0 for i in word_count: sum_corpus += i[1] print("Total number of corpuses: ", sum_corpus, "\n") dictionary = dict() for index, i in enumerate(word_count): #word_count[index] = i / total_length dictionary[i[0]] = i[1] #/ sum_corpus #print("Word count: ", word_count, "\n") return dictionary
bf1a42bc8204b7dc48c19a671c0acfbedd338b25
696,369
import string import re def stemmer(word): """Example: 'CHAIR' -> ('ch', 'air') 'apple -> ('a', 'pple')""" consonants = ''.join((filter(lambda c: c not in 'aeiou', string.ascii_lowercase))) # consonants = ''.join([c for c in string.ascii_lowercase if c not in 'aeiou']) regex = f'([{consonants}]+)?([aeiou])(.+)' match = re.match(regex, word.lower()) if match: p1 = match.group(1) or '' p2 = match.group(2) p3 = match.group(3) return p1, p2 + p3 else: return word.lower(), ''
dbb3d377ad19c7781976030f9c8e41ed62ae3178
696,370
import torch def to_cwh_form(boxes): """ :param boxes: (n, 4) tensor, (cx, cy, w, h) form. :return: (n, 4) tensor, (xmin, ymin, xmax, ymax) form """ cx = (boxes[:, 0] + boxes[:, 2]) / 2 cy = (boxes[:, 1] + boxes[:, 3]) / 2 w = boxes[:, 2] - boxes[:, 0] + 1 h = boxes[:, 3] - boxes[:, 1] + 1 return torch.stack([cx, cy, w, h], 1)
df7f09b848bf2c767b96708add03857245422822
696,371
def default_metric_cmp_fn(current_metric: float, prev_best: float) -> bool: """ The default function to compare metric values between current metric and previous best metric. Args: current_metric: metric value of current round computation. prev_best: the best metric value of previous rounds to compare with. """ return current_metric > prev_best
e99b346e3196ba9987b490c220ef43817ab0ce1f
696,372
def find_idx_scalar(scalar, value): """ Retrieve indexes of the value in the nested list scalar. The index of the sublist corresponds to a K value. The index of the element corresponds to a kappa value. Parameters: scalar -- list, size 2^n x 2^n value -- float Return indexes -- list of integer """ # Give sublist index and element index in scalar if element == value indexes = [[sublist_idx, elem_idx] for sublist_idx,sublist in enumerate(scalar) for elem_idx, elem in enumerate(sublist) if elem==value] return indexes
90032e41eb84084bf9f4b7e02c38ac950ade3f33
696,373
import struct import socket def int2ip(n): """Convert an long to IP string.""" packet_ip = struct.pack("!I", n) return socket.inet_ntoa(packet_ip)
c35cc6a64b57f4879f9580c77abbb05686276824
696,374
import codecs def convert_r_hash_hex_bytes(r_hash_hex_bytes): """ convert_r_hash_hex_bytes >>> convert_r_hash_hex_bytes(b'\xf9\xe3(\xf5\x84\xdad\x88\xe4%\xa7\x1c\x95\xbe\x8baJ\x1c\xc1\xad*\xed\xc8\x158\x13\xdf\xffF\x9c\x95\x84') 'f9e328f584da6488e425a71c95be8b614a1cc1ad2aedc8153813dfff469c9584' """ r_hash_hex_bytes = codecs.encode(r_hash_hex_bytes, 'hex') return r_hash_hex_bytes.decode()
303165e648285407f1159a382d810e8ebdab6370
696,375
from decimal import Decimal from math import floor def get_pow1000(num): """Determine exponent for which significand of a number is within the range [1, 1000). """ # Based on algorithm from http://www.mail-archive.com/ # matplotlib-users@lists.sourceforge.net/msg14433.html, accessed 2010/11/7 # by Jason Heeris 2009/11/18 dnum = Decimal(str(num)) if dnum == 0: return 0 elif dnum < 0: dnum = -dnum return int(floor(dnum.log10() / 3))
f809994c5023196f7124a3f46bc494c4ba0f654a
696,376
def interpolate_data(data): """Interpolates the data in order to have a daily array of data Args: data ([type]): [description] Returns: [type]: [description] """ # Work in progress aux = data return aux
2fdee4eeba958b0b2570b1df5df0b18c8175bb3c
696,377
def remove_spades(hand): """Returns a hand with the Spades removed.""" spadeless_hand = hand [:] for card in hand: if "Spades" in card: spadeless_hand.remove(card) return spadeless_hand
3b273ddd6c5011c6bb4a5a40ff0d2fc426b40dc5
696,378
import subprocess def is_asciidoctor_installed(): """Checks to see if the ruby gem for asciidoctor is installed """ #cmd = "gem list asciidoctor -i" cmd = "which asciidoctor" process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate() # return path to asciidoctor return output.decode("utf-8").strip()
d65e1cbef0c58bd14c816a8135003cfe4a7643a7
696,379
def escape_string(s): """ Logic taken from the official rcon client. There's probably plenty of nicer and more bulletproof ones """ st = "" for index in range(len(s)): st = (st + s[index] if s[index] != '\\' else st + "\\\\") if s[index] != '"' else st + "\\\"" return st
fafd62a3e79b4638c50a933f3e81101700a8b915
696,380
import functools def deco_cache(): """ Enable caching for a method or function. Put after possible static/class method deco. Can change to functools.cache when 3.8 support is dropped. """ return functools.lru_cache()
0c375bf314cc11c2e060b4fbcff84a8d67c8d2b3
696,381
def _quadratic_bezier(y_points, t): """ Makes a single quadratic Bezier curve weighted by y-values. Parameters ---------- y_points : Container A container of the three y-values that define the y-values of the Bezier curve. t : numpy.ndarray, shape (N,) The array of values between 0 and 1 defining the Bezier curve. Returns ------- output : numpy.ndarray, shape (N,) The Bezier curve for `t`, using the three points in `y_points` as weights in order to shift the curve to match the desired y-values. References ---------- https://pomax.github.io/bezierinfo (if the link is dead, the GitHub repo for the website is https://github.com/Pomax/BezierInfo-2). """ one_minus_t = 1 - t output = ( y_points[0] * one_minus_t**2 + y_points[1] * 2 * one_minus_t * t + y_points[2] * t**2 ) return output
7c5cf27ce2fadb0843039729dc0f01473dfa946c
696,382
import struct def bytes_to_long(byte_stream): """Convert bytes to long""" return struct.unpack(">Q", byte_stream)[0]
f5b7ec07b44b4c218a02c9cb9367e38fef311d30
696,383
def cast_tensor_type(tens, dtype): """ tens: pytorch tens dtype: string, eg 'float', 'int', 'byte' """ if dtype is not None: assert hasattr(tens, dtype) return getattr(tens, dtype)() else: return tens
378154acebad9ff080090b6dfad803c03c9ea11b
696,384
def topo_param_string(p, exclude=['print_level', 'name'], sep=", "): """ Formats global parameters nicely for use in version control (for example). """ strings = ['%s=%s' % (name,val) for (name,val) in p.get_param_values() if name not in exclude] return sep.join(strings)
d05220ce36d8ba7a22d94a0daef164d74b4d4a87
696,385
def change_name_of_compressed_op(x: str): """ Splits op name and adds kernel:0 to it :param x: Name of op :return: """ return x.split('/')[0]+'/kernel'+':0'
6829a49458b6308e06f67021f561295f2b05bad2
696,386
def parse_source_to_dict(source: str) -> str: """ Extract dict from source file :param source: :return: """ line = source.replace("\n", "") line = line.split("package_dir")[1] fixed = "" for char in line: fixed += char if char == "}": break line = fixed simplified_line = line.strip(" ,").replace("'", '"') parts = simplified_line.split("=") dict_src = parts[1].strip(" \t") return dict_src
135c230d092af9dade2d87e07a8d6cfb64acf1b1
696,387
def uprint(str): """Underlined <str> """ return "\033[4m{0}\033[0m".format(str)
80793e26ac44c4ae3b5724685a22aa4c0a79a89b
696,388
def LimitVlanRange(self, vlanrange, range=2): """Limits the length of vlan range""" vlan_endpoints = str(vlanrange).split("-") vlan_startid = int(vlan_endpoints[1]) vlan_endid = vlan_startid + (range-1) return str(vlan_startid) + "-" + str(vlan_endid)
1266fdb3bbef0d2da71c102e91e912215454c4a9
696,389
def file_option(file): """ """ switcher = { 'crypto': 'prices_crypto.txt', 'sp500': 'prices_snp500.txt', 'yahoo_fin': 'prices_yahoo.txt' } return switcher.get(file, "Invalid Selection")
3f0cb70b41de0e63efefd5dd7623a2983d1f8fcd
696,390
from typing import Dict def merge_two_dicts(x: Dict, y: Dict) -> Dict: """ Given two dicts, merge them into a new dict as a shallow copy, e.g. .. code-block:: python z = merge_two_dicts(x, y) If you can guarantee Python 3.5, then a simpler syntax is: .. code-block:: python z = {**x, **y} See http://stackoverflow.com/questions/38987. """ z = x.copy() z.update(y) return z
6e4f45ffdb7e231f59b2fda1a3d0931cd1cc1a85
696,391
def wrap_code_block(message): """ Wrap a message in a discord code block :param message: The message to wrap :return: A message wrapped in a code block """ return "```\r\n{0}\r\n```".format(message)
48cf6f8c58a6260dbd68750c12339aa578302e4d
696,392
def get_parameter_list_from_kwds(force, kwds, paramlist): """ """ # We passed in an instance, not a class name = force.__class__.__name__ ordered = [] for p in paramlist[name]: ordered.append(kwds[p]) return ordered
1033e012a3a0b8b2a4b5e413fb4a8f69cab04c7c
696,393
def get_units_part(*taxonomic_units): """Return a portion of the SQL used in the aggregation query""" taxonomic_units = list( taxonomic_units) if taxonomic_units else ["species"] select_fragments = [] # for use in the SELECT clause group_fragments = [] # for use in the GROUP BY clause for unit in taxonomic_units: part = "t.upper_ranks #>> '{%(unit)s, name}'" % { "unit": unit } select_fragments.append(part + " AS %(unit)s" % {"unit": unit}) group_fragments.append(part) select_part = ", ".join(select_fragments) group_part = ", ".join(group_fragments) ordering_part = ", ".join(group_fragments[::-1]) return select_part, group_part, ordering_part
7ca1069fc5d7fc7c4fa443a34ce86767cb50a13c
696,394
def getConfidence( probPathFilename ): """ Given a probabilities file: .prob, returns the number of symbols and their average confidence. :type probPathFilename: string :param probPathFilename: """ # Read file lines = [] # List of lines of the file with open( probPathFilename ) as f: lines = [line.rstrip() for line in f] # Compute the summation of the uncertainty sum = 0.0 n = 0 text = "" text_s = "" for line in lines: words = line.split('\t') if words[0] != ' ': text = text + words[0] text_s = text_s + words[0] if ( len(words) > 1 ): sum = sum + float(words[1]) n = n + 1 else: text_s = text_s + " " if n == 0: return "", "", 0, 0.0 else: return text, text_s, n, sum/n
05faf056a2f1e040a0fa76c16cbe09a8c5ee3d19
696,395
def isOverlap1D(box1, box2): """Check if two 1D boxes overlap. Reference: https://stackoverflow.com/a/20925869/12646778 Arguments: box1, box2: format: (xmin, xmax) Returns: res: bool, True for overlapping, False for not """ xmin1, xmax1 = box1 xmin2, xmax2 = box2 return xmax1 >= xmin2 and xmax2 >= xmin1
898331f7f0aced6a86b244e22ebb069423fee719
696,396
def xml_fixture(request, xml_files): """Parametrized backend instance.""" return xml_files[request.param]
1dc907cb66d938c026a0096013b9286fe01001a6
696,398
import unicodedata def unc_to_url(unc: str, as_file: bool = False): """Convert UNC to file or smb URL.""" # normalize for the Alfred Workflow unc = unicodedata.normalize("NFC", unc).strip() # pylint: disable=I1101 url = unc.replace("\\\\", "smb://").replace("\\", "/") if as_file: # for Windows 10 url = url.replace("smb://", "file://") url = url.replace(" ", "%20") # don't encode Umlauts return url
ba8828707d2c0ff940ebaa17d95c1b73d1b43c6f
696,399
def make_gcta_ma_line(genetic_association): """ Makes a GCTA line of the genetic variant. Will only return a string not newline ended, will not write to a file, the user is expected to do this himself. :param genetic association class object. :return tab separated string that can be part of ma file: """ # make sure the data that we need is available. if not genetic_association.has_position_data or not genetic_association.has_allele_data or not genetic_association.has_frequency_data: raise RuntimeError("Cannot write an Ma line. Does not contain the necessary data") if genetic_association.p_val == None: raise RuntimeError("No p value present") return "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(genetic_association.snp_name, genetic_association.minor_allele, genetic_association.major_allele, genetic_association.minor_allele_frequency, genetic_association.beta, genetic_association.se, genetic_association.p_val, genetic_association.n_observations)
f81fc51d5b51e0b254430ff59118e663224b264e
696,401
def determine_pos_neu_neg(compound): """ Based on the compound score, classify a sentiment into positive, negative, or neutral. Arg: compound: A numerical compound score. Return: A label in "positive", "negative", or "neutral". """ if compound >= 0.05: return 'positive' elif compound < 0.05 and compound > -0.05: return 'neutral' else: return 'negative'
6c36913ed4399a8bede622a18c06b46a1fb94c0b
696,402
import base64 import zlib import pickle def DecodeObjFromText( obj ): """Decode object from text string""" compressed = ( obj.startswith( '=' ) ) if compressed: obj = obj[1:] unhexed = base64.urlsafe_b64decode( obj ) if compressed: unhexed = zlib.decompress( unhexed ) unpickled = pickle.loads( unhexed ) return unpickled
3c223d894cba04b12d7aab0978c335cc381bc518
696,403
def starts(epsilon=0): """Returns a function that computes whether a temporal interval has the same start time as another interval (+/- epsilon), and ends before it. The output function expects two temporal intervals (dicts with keys 't1' and 't2' for the start and end times, respectively). It returns ``True`` if the first interval starts at the same time as the second interval (+/- ``epsilon``), and the first interval ends before the second interval. Args: epsilon: The maximum difference between the start time of the first interval and the start time of the second interval. Returns: An output function that takes two temporal intervals and returns ``True`` if the first interval starts at the same time as the second interval, and ends before the second interval ends. """ return lambda intrvl1, intrvl2: (abs(intrvl1['t1'] - intrvl2['t1']) <= epsilon and intrvl1['t2'] < intrvl2['t2'])
c26d9d05dac253d010d4d689c80552eb81714929
696,404
import sys def python_version(): """python_version() -> (major, minor, micro, release, serial) Returns python version info.""" return sys.version_info
3d08aeea0fca5eb48639cb9baf98b952730cc949
696,405
import json def _json_valid(val): """return a jason serializable value """ try: json.dumps(val) except TypeError: if hasattr(val, 'to_json'): return val.to_json() if hasattr(val, 'tolist'): return val.tolist() if hasattr(val, 'tostring'): return val.tostring() # string is always good val = str(val) return val
c0297a67bccb4ea3c2220e45f98e49c49f0d6d67
696,406
import argparse def get_predict_input_args(): """ Command Line Arguments: 1. Image path 2. Checkpoint path with default value checkpoints/checkpoint_best_accuracy.pth 3. Number of the top k most likely classes as --top_k with default value 5 4. JSON file to map categories to real names as --category_names with default value cat_to_name.json 5. Use gpu if available as -g or --gpu This function returns these arguments as an ArgumentParser object. Parameters: None - simply using argparse module to create & store command line arguments Returns: parse_args() -data structure that stores the command line arguments object """ parser = argparse.ArgumentParser() parser.add_argument('image_path', type = str, help = 'Path of the prediction image') parser.add_argument('checkpoint_path', type = str, default = 'checkpoints/checkpoint_best_accuracy.pth', help = 'Checkpoint path of the prediction model') parser.add_argument('-k', '--top_k', type = int, default = 1, help = 'Number of the top k most likely classes') parser.add_argument('-json', '--category_names_path', type = str, default = "cat_to_name.json", help = 'JSON file to map categories to real names') parser.add_argument('-g', '--gpu', action='store_true', required=False, help = 'Use gpu if available') in_args = parser.parse_args() if in_args is None: print("* Doesn't Check the Command Line Arguments because 'get_input_args' hasn't been defined.") else: print("Command Line Arguments:\n image_path =", in_args.image_path, "\n checkpoint_path =", in_args.checkpoint_path, "\n top_k =", in_args.top_k, "\n category_names_path =", in_args.category_names_path) if in_args.gpu is not None: print("\n Use gpu if available") return in_args
0ac37044ca6dda1494c8449d5063f7b356cfb019
696,407
import re def add_space_when_digit(line): """Add a space when see a digit, except for arguments of id_list""" id_list = ['ARG', 'op', 'snt', '-'] spl = line.split(':') for idx in range(1, len(spl)): if spl[idx].strip().replace(')', ''): if (spl[idx].strip().replace(')', '')[-1].isdigit() and (not any(x in spl[idx] for x in id_list))): ## if there is a digit after quant or value, put a space so we don't error, e.g. :value3 becomes :value 3, but not for op, snt and ARG new_string = '' added_space = False for ch in spl[idx]: if ch.isdigit(): if not added_space: new_string += ' ' + ch added_space = True else: new_string += ch else: new_string += ch spl[idx] = new_string elif (spl[idx].replace(')', '').replace('ARG', '').isdigit()): # change ARG2444 to ARG2 444 spl[idx] = re.sub(r'(ARG\d)([\d]+)', r'\1 \2', spl[idx]) return ':'.join(spl)
c0238406c3690115f3d070eafefab61924186226
696,408
def maxDepth(dg): """ | Get a maximum of depth >>> maxDepth (dict( [(0,([2],[])),(1,([1],[])),(2,([0],[]))])) 2 """ return max(map(lambda x: x[0], dg.items()))
b040000cbf093b81bdf1f77b4de5c4e197aab585
696,409