content
stringlengths
42
6.51k
def get_tfidf_svd_def(tfidf_def, SVD_def): """ Gets Default **kwawargs for Truncated SVD and TfidfVectorizer used in model. Args: tfidf_def (Dict): Dict with **kwargs for TfidfVectorizer initialization SVD_def (Dict): Dict with **kwargs for Truncated SVD initialization """ if tfidf_def is None: tfidf_def = {'stop_words': 'english', 'ngram_range': (1, 2), 'min_df' : 2, 'max_df' : .5 } if SVD_def is None: SVD_def = {'n_components': 20, 'algorithm' : 'randomized', 'n_iter' : 100 } return tfidf_def, SVD_def
def ip_sent(data=None): """ Construct a template for IP outgoing packet """ tpl = { 'ip-event': 'sent' } if data is not None: tpl['ip-data'] = data return tpl
def assign_devs(roles, devs): """ Create a dictionary of devs indexed by roles :param roles: List of roles :param devs: Corresponding list of devices. :returns: Dictionary of devs indexed by roles. """ return dict(zip(roles, devs))
def get_ec2_instances(session, regions): """Get EC2 instance information across regions""" instance_data = '' return instance_data
def node_float(node): """Returns stripped node text as float, or None.""" if node is None: return None text = node.text.strip() if text.lower() == 'none': return None if len(text): return float(text) return None
def guarantee_trailing_slash(directory_name: str) -> str: """Adds a trailling slash when missing Params: :directory_name: str, required A directory name to add trailling slash if missing Returns: A post processed directory name with trailling slash """ if not directory_name.endswith('/'): return directory_name + '/' return directory_name
def _create_base_lambda_policy(): """Generate base secure policy for handling logs As the policy is created before Lambdas, do not put anything here that needs to be scoped to the Lambda. Instead attach that policy associated with Lambda's execution role after creation, see _generate_lambda_log_event_policy Note: CreateLogGroup can not be restricted by resource https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatchlogs.html """ return { 'Version': "2012-10-17", 'Statement': [ { 'Sid': "WriteLogs", 'Effect': "Allow", 'Action': [ "logs:CreateLogGroup" ], 'Resource': [ "*" ] } ] }
def contain_origin(triangle): """ this function takes a triangle, i-e 3 tuples, coordinates of triangle's vertices, and returns whteher or not the origin is inside the triangle. We search the barycentric coordinates of the origin : O = A + (B-A)*s + (C-A)*t if 0 <= s <= 1 and 0 <= t <= 1 and s+t<=1, the origin is inside the triangle """ s = 0 t = 0 [A, B, C] = triangle if B[0] != A[0]: if C[0] != A[0]: if not (A[1] == 0 and B[1] == 0 and C[1] == 0): t = (B[1]*A[0]-A[1]*B[0])/(C[1]*(B[0]-A[0]) + B[1]*(A[0]-C[0])+A[1]*(C[0]-B[0])) s = ((A[0]-C[0])*t-A[0])/(B[0]-A[0]) else: if C[1] != A[1]: s = A[0]/(A[0]-B[0]) t = (A[1]*B[0] - B[1]*A[0])/((A[0]-B[0])-(C[1]-A[1])) else: if A[0] != C[0]: t = A[0]/(A[0]-C[0]) s = (A[1]*C[0]-C[1]*A[0])/((A[0]-C[0])*(B[1]-A[1])) if 0 <= s <= 1 and 0 <= t <= 1 and 0 <= s+t <= 1: return True return False
def avg_dict(dict1): """ Calculates the average per key for a dictionary :param dict1: dict where averages are caluclated :return: dictionary with rounded values in keys """ for key, value in dict1.items(): dict1[key] = round(sum(dict1[key]) / len(dict1[key]), 3) return dict1
def _async_attr_mapper(attr_name, val): """The `async` attribute works slightly different than the other bool attributes. It can be set explicitly to `false` with no surrounding quotes according to the spec.""" if val in [False, 'False']: return ' {}=false'.format(attr_name) elif val: return ' {}'.format(attr_name) else: return ''
def GetPersistentDeviceList(file_name): """Returns a list of devices. Args: file_name: the file name containing a list of devices. Returns: List of device serial numbers that were on the bot. """ with open(file_name) as f: return f.read().splitlines()
def _create_uniform_task_map(annotators, tasks, redudancy): """ Creates task maps, uniformly distributed across given annotators. """ _total_tasks = tasks * redudancy if annotators == 0 or _total_tasks % annotators > 0: return None _tasks_per_annotator = _total_tasks // annotators _results = [] _current_task_id = 0 for annotator_id in range(annotators): _annotator_tasks = [] for annotator_task in range(_tasks_per_annotator): task_id = (_current_task_id + annotator_task) % tasks _annotator_tasks.append(task_id) _current_task_id = task_id _current_task_id += 1 _results.append(tuple(_annotator_tasks)) return _results
def read_txt_into_list(file_path): """ read the list from the file, each elem in a line compose a list, each line compose to a list, the elem "None" would be filtered and not considered :param file_path: the file path to read :return: list of list """ import re lists= [] with open(file_path,'r') as f: content = f.read().splitlines() if len(content)>0: lists= [[x if x!='None'else None for x in re.compile('\s*[,|\s+]\s*').split(line)] for line in content] lists = [list(filter(lambda x: x is not None, items)) for items in lists] lists = [item[0] if len(item) == 1 else item for item in lists] return lists
def shp2topojson(layer): """Shapefile to TopoJSON conversion using mapshaper.""" cmd = 'mapshaper {layer}.shp'\ + ' -proj wgs84'\ + ' -o format=topojson precision=0.00000001'\ + ' {layer}.topojson' cmd = cmd.format(layer=layer) return cmd
def transform_region_to_coordinates(x_coord, y_coord, prefix_len, image_bit_level=10): """Transforms (x,y)-bit region into a square for a final level. This method converts a leaf on some level `prefix_len` to a square region at the final level `2^image_bit_level`. For example, a first leaf on the smallest prefix 2x2 will occupy (0:512, 0:512) region of the 10-bit image. Args: x_coord: y_coord: prefix_len: image_bit_level: Returns: A square region coordinates. """ shift = image_bit_level - prefix_len x_bot = x_coord << shift x_top = ((x_coord + 1) << shift) - 1 y_bot = y_coord << shift y_top = ((y_coord + 1) << shift) - 1 return (x_bot, x_top, y_bot, y_top)
def get_hexagonal(n: int) -> int: """Get `n`-th hexagonal number Args: n: Index of hexagonal number Examples: >>> print(get_hexagonal(10)) 190 """ return n * (2 * n - 1)
def _rewrite_server_name(server_name, new_port): """Rewrite server port in ``server_name`` with ``new_port`` value.""" sep = ':' if sep in server_name: server_name, port = server_name.split(sep, 1) return sep.join((server_name, new_port))
def lower(text): """ Creates a copy of ``text`` with all the cased characters converted to lowercase. The lowercasing algorithm used is described in section 3.13 of the Unicode Standard. :param text: The string to convert :type text: ``str`` :return: A copy of ``text`` with all the cased characters converted to lowercase. :rtype: ``str`` """ assert isinstance(text,str), '%s is not a string' % text return text.lower()
def add_parent_doc(parent): """add parent documentation for a class""" return """ Parent Class Documentation ========================== """ + parent.__doc__
def _sole_list_element(l, exception_message): """Assures that every element in a list is identical. Args: l: The list to consider. exception_message: A message used to convey failure if every element in l is not identical. Returns: The value of each identical element in the list. """ s = set(l) if len(s) != 1: raise Exception(exception_message) return l[0]
def gen_rg_lib_id(unit): """generate read group lib id from readunit""" if unit['library_id']: return unit['library_id'] else: return "LIB-DUMMY"
def relative_frequency_ratio(irony, non_irony): """Returns a list of tuples containing an n-gram and its relative frequency ratio, which is the quotient of the relative frequencies of the n-gram in the ironic and non-ironic corpora respectively. :param irony: A list of n-gram, frequency tuples generated from ironic tweets. :param non_irony: A list of n-gram, frequency tuples generated from non-ironic tweets. """ frequency_ratios = [] for ngram, non_ironic_frequency in non_irony: # For each n-gram in non-ironic tweets, find its respective frequency # in ironic tweets try: ironic_frequency = float([x for x in irony if x[0] == ngram][-1][-1]) except IndexError: ironic_frequency = 0 # Compute the frequency ratio frequency_ratio = ironic_frequency / float(non_ironic_frequency) frequency_ratios.append((ngram, frequency_ratio)) # Sort from high to low return sorted(frequency_ratios, key=lambda x: x[1], reverse=True)
def find_all_substrings(string, sub): """ http://code.activestate.com/recipes/499314-find-all-indices-of-a-substring-in-a-given-string/ """ import re starts = [match.start() for match in re.finditer(re.escape(sub), string)] return starts
def label_convert(label_string: str) -> str: """ Convert the label string for easier matches """ return label_string.lower().replace(" ", "")
def mongo_logical_or(document_filters): """ Combine document filters with logical or operation. """ if not any(document_filters): return None return {"$or": [_ for _ in document_filters if _]}
def leading_digit(n): """Returns the leading digit of `n` """ # str manipulation is slow #first_digit = int(str(n)[0]) shifted = n / 10 while shifted > 0: n = shifted shifted /= 10 first_digit = n return first_digit
def SPHERE(X): """ Sphere benchmark function D-dimension """ DIM = len(X) SUM = 0 for I_COUNT in range(DIM): X_I = X[I_COUNT] SUM += X_I ** 2 Y = SUM return Y
def chooseColorMap(sMin, sMax, difference=None): """ Based on the min/max extremes of the data, choose a colormap that fits the data. """ if difference == True: return 'dunnePM' elif sMin<0 and sMax>0: return 'dunnePM' #elif sMax>0 and sMin<0.1*sMax: return 'hot' #elif sMin<0 and sMax>0.1*sMin: return 'hot_r' else: return 'dunneRainbow'
def getfilteredlist(inputlist,acceptedvaluelist): """returns a list containing only the accepted values """ outputlist=[i for i in inputlist if i in acceptedvaluelist] return outputlist
def unk_emb_stats(sentences, emb): """Compute some statistics about unknown tokens in sentences such as "how many sentences contain an unknown token?". emb can be gensim KeyedVectors or any other object implementing __contains__ """ from collections import Counter stats = { "sents": 0, "tokens": 0, "unk_tokens": 0, "unk_types": 0, "unk_tokens_lower": 0, "unk_types_lower": 0, "sents_with_unk_token": 0, "sents_with_unk_token_lower": 0} all_types = set() for sent in sentences: stats["sents"] += 1 any_unk_token = False any_unk_token_lower = False types = Counter(sent) for ty, freq in types.items(): all_types.add(ty) stats["tokens"] += freq unk = ty not in emb if unk: any_unk_token = True stats["unk_types"] += 1 stats["unk_tokens"] += freq if unk and ty.lower() not in emb: any_unk_token_lower = True stats["unk_types_lower"] += 1 stats["unk_tokens_lower"] += freq if any_unk_token: stats["sents_with_unk_token"] += 1 if any_unk_token_lower: stats["sents_with_unk_token_lower"] += 1 stats["types"] = len(all_types) return stats
def numformat(number: int) -> str: """Format a number according to our convention Parameters ---------- number : int A number to format Returns ------- str The formatted number """ return f"{number:,.0f}"
def args_to_string(list): """return a string argument space seperated""" arg = '' for item in list: arg = "%s%s " % (arg, item) return arg[:-1]
def closest_interval(dict_keys, value_to_fit): """Find closest interval to provided value in transcription""" return dict_keys[min(range(len(dict_keys)), key=lambda i: abs(dict_keys[i][1] - value_to_fit) + abs(dict_keys[i][0] - value_to_fit))]
def _convert_path_to_ee_sources(path: str) -> str: """Get the remote module path from the 'ee-sources' GCS bucket. Args: path: str Returns: An ee-sources module url. """ if path.startswith("http"): eempath = path else: bpath = path.replace(":", "/") eempath = f"https://storage.googleapis.com/ee-sources/{bpath}" return eempath
def list_remove_duplicates(lst): """Removes duplicates from list elements whilst preserving element order Args: list with string elements Returns: Sorted list without duplicates """ seen = set() seen_add = seen.add return [x for x in lst if not (x in seen or seen_add(x))]
def fibonacci(n): """Return the n-th Fibonacci number""" if n in (0,1): return n return (fibonacci(n - 2) + fibonacci(n - 1)) # Can trace the recursive function with a decorator
def lr_poly(base_lr, curr_iter, max_iter, warmup_iter=0, power=0.9): """Polynomial-decay learning rate policy. Args: base_lr: A scalar indicates initial learning rate. curr_iter: A scalar indicates current iteration. max_iter: A scalar indicates maximum iteration. warmup_iter: A scalar indicates the number of iteration before which the learning rate is not adjusted. power: A scalar indicates the decay power. Return: A scalar indicates the current adjusted learning rate. """ if curr_iter < warmup_iter: alpha = curr_iter / warmup_iter return min(base_lr * (1 / 10.0 * (1 - alpha) + alpha), base_lr * ((1 - float(curr_iter) / max_iter)**(power))) return base_lr * ((1 - float(curr_iter) / max_iter)**(power))
def get_aar_url(build_info): """Given the json build info, find the URL to the tensorflow.aar artifact.""" base_url = build_info.get('url') if not base_url: raise ValueError('Missing url: %s' % build_info) build_class = build_info.get('_class') if (build_class == 'hudson.model.FreeStyleBuild' or build_class == 'hudson.matrix.MatrixRun'): aar_info = next( a for a in build_info.get('artifacts') if a.get('fileName') == 'tensorflow.aar') if not aar_info: raise ValueError('Missing aar artifact: %s' % build_info) return '%s/artifact/%s' % (base_url, aar_info.get('relativePath')) raise ValueError('Unknown build_type %s' % build_info)
def IsCorrectConfigVersion(targetversion, configversion): """Decide whether configuration version is compatible with the target. @param targetversion: The version to upgrade to as (major, minor, revision) @type targetversion: tuple @param configversion: The version of the current configuration @type configversion: tuple @rtype: bool @return: True, if the configversion fits with the target version. """ return (configversion[0] == targetversion[0] and configversion[1] == targetversion[1])
def str2ascii(string: str) -> list: """Convert a string to a list of ascii-codes""" return [ord(i) for i in string]
def set_to_dm_limits(ptt_list, limit=5.): """ Check that the values for piston, tip, and tilt are not exceeding the hardware limit and reset to limit if limit is exceeded. These limits are the same as what the IrisAO GUI has set. :param ppt_list: list, of tuples existing of piston, tip, tilt, values for each segment in a pupil, in DM units :param limit: float, in DM units. Default = 5. :return: list of tuples of the piston, tip, tilt values in DM units for each segment listed such that none of the values exceed the limit """ updated = [tuple(min(i, limit) for i in ptt) for ptt in ptt_list] return updated
def logfile_opt_to_str(x): """ Detypes a $XONSH_TRACEBACK_LOGFILE option. """ if x is None: # None should not be detyped to 'None', as 'None' constitutes # a perfectly valid filename and retyping it would introduce # ambiguity. Detype to the empty string instead. return "" return str(x)
def writeFile( fnm, content ): """ Creates a new file with contents Parameters: fnm (str): name of file to be created/written to content (str): content to be added to the file Returns: * bool: True if file has been successfully created False if file isn't created due to an error """ try: f = open( fnm, 'w' ) f.write( content ) f.close() except: return False return True
def filterWang(item): """ filters an ASSET CONTROL generated csv list to find items that belong to Honggang Wang """ return 'Honggang' in item['Custodian'] or '224' in item['Location'] or '209' in item['Location']
def end_on_root(genotype, weight=20): """fitness function handling last note Args: genotype ((int, int)[]): list of tuples (pitch, dur) representing genotype of a chromosome weight (int, optional): Defaults at 20. Defines penalty/reward rate Returns: int: aggregate fitness value based on last note """ total = 0 if genotype[-1][0] == 1 or genotype[-1][0] == 8 or genotype[-1][0] == 15: total += weight * 1 else: total += weight * -1 return total
def kilos(x, pos): """The two args are the value and tick position""" return '%1.0f\\,K' % (x * 1e-3)
def count_list(arr): """ Count elements in list """ c = len(arr) return c
def parse_str_with_space(var: str) -> str: """return string without multiply whitespaces Example: var = "My name is John " Return var = "My name is John" """ str_list = list(filter(None, var.split(' '))) return ' '.join(x for x in str_list)
def linear_cell_to_tuple(c1, repeat_units): """ Convert a linear index into a tuple assuming lexicographic indexing. :arg c1: (int) Linear index. :arg repeat_units: (3 int indexable) Dimensions of grid. """ c1 = int(c1) c1x = c1 % repeat_units[0] c1y = ((c1 - c1x) // repeat_units[0]) % repeat_units[1] c1z = (c1 - c1x - c1y * repeat_units[0]) // (repeat_units[0] * repeat_units[1]) return c1x, c1y, c1z
def rename_context(path, name): """ Returns a string which is what the path of the context would be if the context pointed to by `path` would be renamed with `name`.""" return '.'.join(path.split('.')[:-1]+[name])
def get_openslide_query( x, y, level, tile_width, tile_height, level_heights, level_downsamples): """Make OpenSlide's read_region query from given positions (OpenGL cs) Note that given x and y coordinates is under OpenGl coordinate system, the y-axis points up. In contrast, OpenSlide adopted DirectX like coordinate system, the y-axis points down. """ slide_x = x slide_y = level_heights[level] - y - tile_height if not level == 0: scale = level_downsamples[level] slide_x = scale * slide_x slide_y = scale * slide_y location = (slide_x, slide_y) size = (tile_width, tile_height) return location, level, size
def type_meta(class_or_object, prefix="class:"): """extracts type metadata from the given class or object, can be used as Name server metadata.""" if hasattr(class_or_object, "__mro__"): return {prefix + c.__module__ + "." + c.__name__ for c in class_or_object.__mro__ if c.__module__ not in ("builtins", "__builtin__")} if hasattr(class_or_object, "__class__"): return type_meta(class_or_object.__class__) return frozenset()
def clean_travel_info(travel_details: dict) -> dict: """Extracts & prepares the travel time information. Converts: - Metres to kilometres - Seconds to minutes Args: travel_details (dict): Raw travel info Returns: dict: Travel info in human friendly units """ if travel_details == {} or travel_details["distance"] == "No Result": distance = "No Result" else: distance_in_km = travel_details["distance"] / 1000 distance = f"{round(distance_in_km, 2)}km" if travel_details == {} or travel_details["duration"] == "No Result": travel_time = "No Result" else: duration_in_mins = travel_details["duration"] / 60 travel_time = f"{round(duration_in_mins, 2)}mins" return {"distance": distance, "travel_time": travel_time}
def length(array) -> int: """ Returns the length of an 1D-array-like object. Args: array: Returns: """ try: return len(array) except TypeError: # array has no function len() -> either float, int, or CasADi type try: if len(array.shape) >= 1: return array.shape[0] else: raise AttributeError except AttributeError: # array has no attribute shape -> either float or int return 1
def sort_by_onset(sounding_notes): """ Sort a list of notes by pitch Parameters ---------- sounding_notes : list List of `VSNote` instances Returns ------- list List of sounding notes sorted by onset """ return sorted(sounding_notes, key=lambda x: x.onset)
def calculate_multiplicative_score(penalties): """ Calculate a multiplicate score on the passed in penalties. """ cumulative_score = 9 for penalty in penalties: current_score = 100 - penalty.value cumulative_score = cumulative_score * (current_score/100.0) cumulative_score = cumulative_score + 1 cumulative_score = round(cumulative_score, 1) return cumulative_score
def unflatten(d): """ This unflattens a flattened dictionary. Args: d (dict|list): The object to flatten. Returns: items (dict): The unflattened object. """ items = {} for k, v in d.items(): keys = k.split('.') sub_items = items for ki in keys[:-1]: try: sub_items = sub_items[ki] except KeyError: sub_items[ki] = {} sub_items = sub_items[ki] sub_items[keys[-1]] = v return items
def make_label(ss, lab='<no_axe_label>', use_mpl=True): """ make a label from title and units """ if ss is None: return lab if ss.title: label = ss.title # .replace(' ', r'\ ') else: label = lab if '<untitled>' in label: label = 'values' if use_mpl: if ss.units is not None and str(ss.units) not in ['dimensionless', 'absolute_transmittance']: units = r"/\ {:~L}".format(ss.units) units = units.replace('%', r'\%') else: units = '' label = r"%s $\mathrm{%s}$" % (label, units) else: if ss.units is not None and str(ss.units) != 'dimensionless': units = r"{:~H}".format(ss.units) else: units = '' label = r"%s / %s" % (label, units) return label
def BRIGHT(n): """ Returns control codes to set or unset bright text. Use this in a ``PRINT`` or ``SET`` command. Example: ``PRINT("normal",BRIGHT(1),"bright",BRIGHT(0),"normal")`` Args: - n - integer - bright or not (0-1) """ return "".join((chr(19),chr(int(n))))
def is_node(relative_link): """In directory listings a folder is indeicated by a trailing slash (/).""" node = not relative_link.endswith('/') return node
def find_area_perim(array): """ Scalar! """ a = 0 p = 0 ox, oy = array[0] for x, y in array[1:]: a += (x*oy-y*ox) p += abs((x-ox)+(y-oy)*1j) ox, oy = x, y return a/2, p
def tex_coord(x, y, n=16): """Return the bounding vertices of the texture square.""" m = 1.0 / n dx = x * m dy = y * m return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m
def combIndexBul(combs, weight): """ Gecerli tum kombinasyonlari bulunan agirlik degerlerinin kullanicidan alinan agirlik degerleri dizisinde sahip oldugu indexleri bulur """ indexler = combs for i in range(0, len(combs)): for j in range(0, len(combs[i])): for k in range(0, len(weight)): # Eger kombinasyonlar icerisinde bulunan agirlik orijinal agirlik dizisinde bulunan bir elemanin agirligina esitse o elemanin agirliklar dizisindeki indexini dondurur if combs[i][j] == weight[k]: indexler[i][j] = k return indexler
def get_elems_set(entries): """ Args: entries: Returns: """ return {str(elem) for e in entries for elem in e.composition.elements}
def strtobool (val): """Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. """ val = val.lower() if val in ('y', 'yes', 't', 'true', 'on', '1'): return 1 elif val in ('n', 'no', 'f', 'false', 'off', '0'): return 0 else: raise ValueError("invalid truth value %r" % (val,))
def pluralize(args, premise): """Return plural equivalent POS-tag for given POS-tag.""" pos = args[0] # Pluralize dict pluralize_dict = {'NN':'NNS', 'NNS':'NNS', 'VBZ':'VBP', 'VBD':'VBD', 'VB':'VB', 'VBP':'VBP', 'NNP':'NNPS', 'VBN':'VBN', 'MD':'MD',} # Test whether POS exists in dict, otherwise return unaltered pos try: plural_pos = pluralize_dict[pos] except KeyError: return pos return plural_pos
def _adjust_block_size(width, height, blocksize): """Adjusts blocksize by adding 1 if the remainder from the division of height/width by blocksize is 1. """ if width % blocksize == 1: blocksize += 1 elif height % blocksize == 1: blocksize += 1 return blocksize
def return_code_from_exception(exception): """Returns the exit code that would result of raising the exception.""" if exception is None: return 0 if isinstance(exception[1], SystemExit): return exception[1].code return 1
def string_to_list(string): """ To convert a string of entities diveded by commas into a list. """ if string is None or string == "": return [] return list(map(lambda x: x.strip(), string.split(",")))
def expand_dictionary_single_recursive(index: int, key_list: list, value) -> dict: """ Takes a list of keys and a value and turns it into a nested dictionary. This is a recursive function. Parameters ---------- index : int The current index of the key in the list of keys key_list : list[str] The list of keys to turn into a nested dictionary value : any The final value to match against the last (deepest) key Returns ------- dict The final value to match against the last (deepest) key """ # Gets the current key in the list key = key_list[index] # If it is the last key in the list return a dictionary with it keyed against the value if key == key_list[-1]: return {key: value} # Otherwise if it is not the last key, key it against calling this function recursively with the next key return {key: expand_dictionary_single_recursive(index + 1, key_list, value)}
def extract_category(line_array): """ Given a line of the input csv as an array of values. Compare the last value (the category) and return -1.0 if income is less than or equal to 50k 1.0 if income is greater than 50k """ neg_cat = "<=50K" category = line_array[-1].strip() return -1.0 if category == neg_cat else 1.0
def feature_str2dict(featurestring,position='top'): """converts string of secondary structure annotation (like in VMD) to our type of dict""" #numbering should be 0 based #HHHHHHEEEEEBBBBBBCCCCCbTGI features=[] style='' begin=-1 for i,s in enumerate(featurestring): if(s in ['H','G','I']): #helices if style!='helix': style='helix' begin=i else: if style=='helix': style='' end=i-1 features.append({'style':'helix','sel':[begin,end],'position':position}) for i,s in enumerate(featurestring): if(s in ['E','B','b']): #helices if style!='-->': style='-->' begin=i else: if style=='-->': style='' end=i-1 features.append({'style':'-->','sel':[begin,end],'position':position}) return features #prof=cons_prof(alignment) #pylab.plot(prof)
def Flatten(xs, **unused_kwargs): """Flatten lists.""" res = [] for x in xs: if isinstance(x, (list, tuple)): res.extend(list(x)) else: res.append(x) return tuple(res)
def _safe_translate(translations, locale, fallback_locale='en', default=None): """return `locale` translation from `translations`. Fallback on `fallback_locale` if `locale` translation can't be found and eventually fallback on `default` is none of the locales translations can't be found. """ if locale in translations: return translations[locale] if fallback_locale in translations: return translations[fallback_locale] return default
def str2b(data): """Unescape P2/P3 and convert to bytes if Python3.""" # Python2: Unescape control chars try: return data.decode('string_escape') except AttributeError: pass except UnicodeDecodeError: pass # Python3: Unescape control chars and convert to byte try: return data.encode("utf-8").decode('unicode-escape').encode("latin1") except UnicodeDecodeError: pass
def _build_rebind_dict(args, rebind_args): """Build a argument remapping/rebinding dictionary. This dictionary allows an atom to declare that it will take a needed requirement bound to a given name with another name instead (mapping the new name onto the required name). """ if rebind_args is None: return {} elif isinstance(rebind_args, (list, tuple)): rebind = dict(zip(args, rebind_args)) if len(args) < len(rebind_args): rebind.update((a, a) for a in rebind_args[len(args):]) return rebind elif isinstance(rebind_args, dict): return rebind_args else: raise TypeError('Invalid rebind value: %s' % rebind_args)
def find_intersected_neuron_ids(neurons): """ Calculate intersected neuron identities for given labels :param neurons: labels type :return: """ ids = [set(v.keys()) for v in neurons.values()] intersection = ids[0].intersection(*ids[1:]) return sorted(list(intersection))
def make_edge(u, v): """ Create a tuple representing an undirected edge. @param u, v endpoints of the edge @return a tuple (u, v) representing the edge. It is guaranteed that u <= v. """ if u > v: u, v = v, u return (u, v)
def get_functions(template): """ Extracts functions and environment variables from a template This returns a dict with the function name as the key and a list of variable names as value. """ # Extract variables from the Globals section try: global_variables = list(template["Globals"]["Function"]["Environment"]["Variables"].keys()) except KeyError: global_variables = [] # Extract functions in the Resources section functions = {} for name, resource in template.get("Resources", {}).items(): if resource.get("Type", "") != "AWS::Serverless::Function": continue try: local_variables = list(resource["Properties"]["Environment"]["Variables"].keys()) except KeyError: local_variables = [] # Don't add functions without environment variables if local_variables or global_variables: functions[name] = global_variables + local_variables return functions
def get_range_values(field, db_object): """Retrieves the list of ranges associated to the address pool.""" result = [] for entry in getattr(db_object, 'ranges', []): result.append([entry.start, entry.end]) return result
def cdf_url_and_filename(dmsp_number, year, month, day): """ Download a DMSP file from NOAA Parameters ---------- dmsp_number : int DMSP satellite number (e.g. 15) year : int Data year month : int Data month (1-12) day : int Data day of month Returns ------- cdf_url : string URL to retrieve file at cdffn : string CDF filename """ cdffn = ('dmsp-f%.2d' % (dmsp_number) +'_ssj_precipitating-electrons-ions_' +'%d%.2d%.2d_v1.1.2.cdf' % (year,month,day)) root_url = 'https://satdat.ngdc.noaa.gov/dmsp/data/' one_month_ssj_data_url = 'f%.2d/ssj/%d/%.2d/' % (dmsp_number, year, month) #Expected CDF file cdf_url = root_url + one_month_ssj_data_url + cdffn return cdf_url, cdffn
def equals(a,b): """Return true iff each gene in a is identical in b (and vice versa)""" if (a==b): return True if (len(a) != len(b)): return False for key in a.keys(): if a[key] != b[key]: return False return True
def cmp_lines(path_1, path_2): """Compare content of two files""" l1 = l2 = True with open(path_1, 'r') as f1, open(path_2, 'r') as f2: while l1 and l2: l1 = f1.readline() l2 = f2.readline() if l1 != l2: return False return True
def lmap(*args): """For compatibility with map() in python 2""" return list(map(*args))
def dictionary_union(dict1, dict2): """Take two dictionaries, return dictionary union. Parameters: ----------- dict1: Python dictionary dict2: Python dictionary Returns: -------- A union of the dictionaries. It assumes that values with the same keys are identical. """ keys1 = list(dict1.keys()) keys2 = list(dict2.keys()) result = {} for key1 in keys1: result[key1] = dict1[key1] for key2 in keys2: result[key2] = dict2[key2] return result
def stderr_is_polluted(line): """May occur depending on the environment in which py4j is executed. The stderr ccanot be relied on when it occurs. """ return "Picked up _JAVA_OPTIONS" in line
def get_partial_match(question_tokens, idx, header_tokens): """ Try to find partial matches (e.g. for the question tokens "release year" and the column header "song release year") """ def check_in(list_one, list_two): if len(set(list_one) & set(list_two)) == len(list_one) and (len(list_two) <= 3): return True for endIdx in reversed(range(idx + 1, len(question_tokens))): sub_toks = question_tokens[idx: min(endIdx, len(question_tokens))] if len(sub_toks) > 1: # a match is defined by a minimum of 2 matching tokens flag_count = 0 tmp_heads = None for heads in header_tokens: if check_in(sub_toks, heads): flag_count += 1 tmp_heads = heads if flag_count == 1: return endIdx, tmp_heads return idx, None
def get_unique_table_configs(test_cases): """Returns all unique table setup commands and table data for test cases.""" table_configs = [ (test_case['ss_cli_setup_cmds'], test_case['table_setup_cmd_data']) for test_case in test_cases ] # Can't just use set() as cmd_data is unhashable (and important). unique_table_configs = [] for table_config in table_configs: if table_config not in unique_table_configs: unique_table_configs.append(table_config) return unique_table_configs
def validate_card(card_number): """ Luhn algorithm to check Credit Card Number """ inverted_number = card_number[::-1] number_to_luhn = inverted_number.replace(" ", "") luhn_list = [] for index in range(len(number_to_luhn)): if index % 2 != 0: odd_index = int(number_to_luhn[index]) * 2 if odd_index <= 9: luhn_list.append(odd_index) else: odd_index = str(odd_index) sum_odd = int(odd_index[0]) + int(odd_index[1]) luhn_list.append(sum_odd) else: even_index = int(number_to_luhn[index]) luhn_list.append(even_index) luhn_sum = sum(luhn_list) if luhn_sum % 10 == 0: return True else: return False
def remove_bad(string): """ remove problem characters from string """ remove = [':', ',', '(', ')', ' ', '|', ';', '\''] for c in remove: string = string.replace(c, '_') return string
def handle_time_period_options(args): """ Convert the "on_or_before", "on_or_after" and "between" options we support for defining time periods into a single "between" argument. This makes the code simpler, although we want to continue supporting the three arguments for the sake of clarity in the API. """ if "between" not in args: return args on_or_after = args.pop("on_or_after", None) on_or_before = args.pop("on_or_before", None) between = args["between"] if between and (on_or_after or on_or_before): raise ValueError( "You cannot set `between` at the same time as " "`on_or_after` or `on_or_before`" ) if not between: between = (on_or_after, on_or_before) if not isinstance(between, (tuple, list)) or len(between) != 2: raise ValueError("`between` should be a pair of dates") args["between"] = between return args
def compress_string_nogroupby(string) -> str: """Compress string to custom character-number format (see task2 description) Iterate through array to find groups of characters Use str.join instead of string concatenation to speed up compression Arguments: string -- string to compress Returns: compressed_string if length of compressed string is less than or equal to length of compressed string string if length of compressed string is larger than length of uncompressed string Raises: TypeError if argument string is not a builtins.str or not any subclass of str """ if not isinstance(string, str): raise TypeError if len(string) == 0: raise ValueError("Expected len(string) > 0, got 0") chars_groups = [] # [ [char, char_cnt] ] chars_groups.append([string[0], 1]) for idx, char in enumerate(string[1:], 1): prev_char = chars_groups[-1][0] if char == prev_char: chars_groups[-1][1] += 1 else: chars_groups.append([char, 1]) compressed_chars_groups = [ ''.join([group_items[0], str(group_items[1])]) for group_items in chars_groups ] compressed_string = ''.join(compressed_chars_groups) string_result = string if len(compressed_string) > len( string) else compressed_string return string_result
def facts_to_str(user_data): """Converts Dict value into Text message""" facts = list() for key, value in user_data.items(): facts.append('{} - {}'.format(key, value)) return "\n".join(facts).join(['\n', '\n'])
def file_type(filename, stream=False): """ Detect potential compressed file Returns the gz, bz2 or zip if a compression is detected, else None. """ magic_dict = { "\x1f\x8b\x08": "gz", "\x42\x5a\x68": "bz2", "\x50\x4b\x03\x04": "zip" } max_len = max(len(x) for x in magic_dict) if not stream: with open(filename) as f: file_start = f.read(max_len) for magic, filetype in magic_dict.items(): if file_start.startswith(magic): return filetype else: for magic, filetype in magic_dict.items(): if filename[:len(magic)] == magic: return filetype return None
def any(iterable): """ any() Purpose: Returns true if any element in the iterable evaluates to true otherwise, return false Parameters: iterable [type=list,tuple] Any item that can be iterated over Returns: A boolean specifying whether any elements of the iterable evaluate to true """ for element in iterable: if element: return True return False
def get_region_index(data) -> list: """Get NEM region index""" return [i['@RegionID'] for i in (data.get('NEMSPDCaseFile') .get('NemSpdInputs') .get('RegionCollection') .get('Region'))]
def str_info_noduri(l): """ o functie folosita strict in afisari - poate fi modificata in functie de problema """ sir="[" for x in l: sir+=str(x)+" " sir+="]" return sir
def FieldCrossRefLabel(field_name): """Field cross reference label.""" return 'envoy_api_field_%s' % field_name
def _read_file(path): """Default read() function for use with hash_files().""" with open(path,"rb") as f: return f.read()
def snap_key_to_snapnum(snap_key): """ Given the name of a snapshot key, finds the associated snapshot number. This is necessary because the 0th snapshot key may not be snapshot 000 and there could be missing snapshots. This function searches backwards for a group of digits that identify the snapshot number. If there are numbers outside of this cluster they will be disregarded and a warning raised. For example, if the key is "Snap1_030", the function will return 30 and issue a warning that there were digits ignored. Parameters ---------- snap_key: String. The name of the snapshot key. Returns ---------- snapnum: Integer. The snapshot number that corresponds to the snapshot key. Examples ---------- >>> snap_key_to_snapnum('Snap_018') 18 >>> snap_key_to_snapnum('018_Snap') 18 >>> snap_key_to_snapnum('Sn3p_018') --WARNING-- For Snapshot key 'Sn3p_018' there were numbers that were not \ clustered together at the end of the key. We assume the snapshot number corresponding to this key is 18; \ please check that this is correct. 18 """ snapnum = "" reached_numbers = None for letter in reversed(snap_key): # Go backwards through the key. if letter.isdigit(): if reached_numbers == False and len(snapnum): print("--WARNING--") print("For Snapshot key '{0}' there were numbers that were not" " clustered together at the end of the key.\nWe assume " "the snapshot number corresponding to this key is {1}; " "please check that this is correct." .format(snap_key, int(snapnum[::-1]))) break # When a number is found, we concatenate it with the others and # flag that we have encountered a cluster of numbers. snapnum = "{0}{1}".format(snapnum, letter) reached_numbers = True else: # When we reach something that's not a number, turn flag off. reached_numbers = False snapnum = snapnum[::-1] # We searched backwards so flip the string around. return int(snapnum)