content
stringlengths
42
6.51k
def shorten_name(name, max_length): """ Shortens a name to the given number of characters. """ if len(name) <= max_length: return name q, r = divmod(max_length - 3, 2) return name[:q + r] + "..." + name[-q:]
def getNegBinomParams(mu, alpha): """ From https://stats.stackexchange.com/questions/260580/negative-binomial-distribution-with-python-scipy-stats Convert mean/dispersion parameterization of a negative binomial to the ones scipy supports Parameters ---------- mu : float Mean of NB distribution. alpha : float Overdispersion parameter used for variance calculation. See https://en.wikipedia.org/wiki/Negative_binomial_distribution#Alternative_formulations """ var = mu + alpha * mu ** 2 p = (var - mu) / var r = mu ** 2 / (var - mu) return r, p
def validate_training_proportion_input(input: str) -> float: """ Validates the training proportion input :param input: The training proportion before parsing :return: The training proportion after parsing """ training_proportion = float(input) if training_proportion > 1 or training_proportion <= 0: raise ValueError( "Invalid training proportion input. Must be strictly greater than 0 and less than or equal to 1") return training_proportion
def newman_conway(num): """ Returns a list of the Newman Conway numbers for the given value. Time Complexity: O(n) Space Complexity: O(n) """ if num == 0: raise ValueError("num cannot be zero") if num == 1: return "1" arr = [0, 1, 1] for i in range(3, num+1): arr.append(arr[arr[i-1]] + arr[i - arr[i-1]]) string = ' '.join([str(item) for item in arr[1:]]) return string
def sorted_d(in_dict): """Sort dicts in alphabetical order; aids in summarizing results from different tools prior to reporting to external agency.""" sortednames = sorted(in_dict.keys(), key=lambda x: x.lower()) out_dict = {k: in_dict[k] for k in sortednames} return out_dict
def determine_duration_and_scale_factor_from_parameters(chirp_mass): """Determine appropriate duration and roq scale factor from chirp mass Parameters ---------- chirp_mass: float The chirp mass of the source (in solar masses) Returns ------- duration: int roq_scale_factor: float """ roq_scale_factor = 1 if chirp_mass > 90: duration = 4 roq_scale_factor = 4 elif chirp_mass > 35: duration = 4 roq_scale_factor = 2 elif chirp_mass > 13.53: duration = 4 elif chirp_mass > 8.73: duration = 8 elif chirp_mass > 5.66: duration = 16 elif chirp_mass > 3.68: duration = 32 elif chirp_mass > 2.39: duration = 64 elif chirp_mass > 1.43: duration = 128 elif chirp_mass > 0.9: duration = 128 roq_scale_factor = 1 / 1.6 else: duration = 128 roq_scale_factor = 1 / 2 return duration, round(1 / roq_scale_factor, 1)
def list_to_eslist(pylist): """ Converts python list of strings to a string of that list that can be used in ES. Args: pylist (list): Each element is a str. Returns: str: A representation of the list with each item in double quotes. """ eslist = '[ ' for item in pylist[:-1]: eslist += '"' + item + '", ' eslist += '"' + pylist[-1] + '" ]' return eslist
def isdtype(x): """ isdtype(x) -> bool Returns whether an instance is a data type of numpy or pytorch or tensorflow etc. . Args: x (any): the input variable. Example:: >>> isdtype(np.int32) True >>> isdtype(torch.float64) True >>> isdtype(int) False """ return 'dtype' in repr(type(x)).lower()
def upload_file(word_id, body=None): # noqa: E501 """uploads an image # noqa: E501 :param word_id: ID of word to update :type word_id: str :param body: :type body: dict | bytes :rtype: ApiResponse """ """ # READY BUT COMMENTED OUT UNTIL SECURITY IS IMPLEMENTED" if connexion.request.is_json: body = Object.from_dict(connexion.request.get_json()) # noqa: E501 try: doc_ref = config.words_ref.document(word_id) name = doc_ref.get().to_dict()['name'] destination_blob_name = 'images/' + name except: print('Cannot get the filename') return None # Uploads the file to the bucket. # Instantiates a client storage_client = storage.Client() bucket = storage_client.bucket('words-storage-romank') blob = bucket.blob(destination_blob_name) # Upload the file blob.upload_from_string(body) # Update the filename in the Firestore database doc_ref.update({'imageurl': name}) """ return True
def toBool(value): """Convert any type of value to a boolean. The function uses the following heuristic: 1. If the value can be converted to an integer, the integer is then converted to a boolean. 2. If the value is a string, return True if it is equal to 'true'. False otherwise. Note that the comparison is case insensitive. 3. If the value is neither an integer or a string, the bool() function is applied. >>> [toBool(x) for x in range(-2, 2)] [True, True, False, True] >>> [toBool(x) for x in ['-2', '-1', '0', '1', '2', 'Hello']] [True, True, False, True, True, False] >>> toBool(object()) True >>> toBool(None) False """ try: return bool(int(value)) except (ValueError, TypeError): return value.lower() in ['true'] if isinstance(value, str) else bool(value)
def _join_codes(fg, bg): """Join `fg` and `bg` with ; and surround with correct esc sequence.""" colors = ';'.join(filter(lambda c: len(c) > 0, (fg, bg))) if colors: return '\x1b[' + colors + 'm' return ''
def is_stable(generation, max_rounds, prev, curr): """ Function: is_stable ------------------- Checks whether or not the epidemic has stabilized. """ if generation <= 1 or prev is None: return False if generation == max_rounds: return True for node, color in curr.items(): if not prev[node] == curr[node]: return False return True
def min_sec(secs): """ Takes an epoch timestamp and returns string of minutes:seconds. :param secs: Timestamp (in seconds) >>> import time >>> start = time.time() # Wait a few seconds >>> finish = time.time() >>> min_sec(finish - start) '0:11' """ secs = int(secs) return '%d:%02d' % (secs / 60, secs % 60)
def get_range(value): """ Find the max/min ranges for a Nagios range comparison. Nagios handles ranges by accepting them as strings. The curent rules according to the API assume [@]start:end and include: * If start is empty, start is 0. * If start is ~, start is -infinity. * Alert if value is <= start, or >= end. * If prepended with @, alert if value is between start and end. The fact 0 and : are optional make it somewhat confusing and counter- intuitive; most plugin authors don't know that <= 0 should trigger alerts. :param value: Nagios-compatible range string to parse. :return list: A single list with three elements representing the min and max boundaries for the range, and whether or not to invert the match. """ raw = value # If we find a '@' at the beginning of the range, we should invert # the match. invert = False if value.find('@') == 0: invert = True value = value.lstrip('@') # The : separates a max/min range. If it exists, there is at least # a minimum. We'll start our ranges at zero and infinity so we don't # have to worry about complex testing logic. bottom = 0 top = float('infinity') if value.find(':') > 0: (bottom, top) = value.split(':') if top == '': top = float('infinity') else: top = float(top) if bottom == '': bottom = 0 elif bottom == '~': bottom = -float('infinity') else: bottom = float(bottom) else: top = float(value) return (bottom, top, invert, raw)
def check_almost_complete_rec(node, depth, height): """ Check if given tree is almost complete tree recursively. """ if depth >= height - 1: return True if node.left is None or node.right is None: return False return check_almost_complete_rec(node.left, depth + 1, height) \ and \ check_almost_complete_rec(node.right, depth + 1, height)
def _succ(p, l): """ retrieve the successor of p in list l """ pos = l.index(p) if pos + 1 >= len(l): return l[0] else: return l[pos + 1]
def Ht(vesels): """ *Distribution function of Hematocrit* Still pending to implement the right distribution of the hematocrit function """ htc = vesels * .45 return htc
def simplify_logger_name(logger_name: str): """Simple function to reduce the size of the loggers name. Parameters: logger_name (str): Name of the logger to simplify. e.g path.to.my_module Examples: simplify_logger_name('path.to.my_module') = 'p.t.mm' """ modules = [module.split('_') for module in logger_name.split('.')] simplified = '.'.join([''.join(element[0] for element in elements) for elements in modules]) return simplified
def out_of_china(lng, lat): """No offset when coordinate out of China.""" if lng < 72.004 or lng > 137.8437: return True if lat < 0.8293 or lat > 55.8271: return True return False
def _cost(action): """ action is (movement_str, (forward, h_angle, v_angle)) """ forward, h_angle, v_angle = action[1] cost = 0 if forward != 0: cost += 1 if h_angle != 0: cost += 1 if v_angle != 0: cost += 1 return cost
def int2roman(num): """ Convert an integer to Roman numeral """ if not isinstance(num,int): raise TypeError("expected integer, got %s" % type(input)) if not 0 < num < 4000: raise ValueError("Argument must be between 1 and 3999") ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1) nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I') result = "" for i,n in zip(ints, nums): count = int(num / i) result += n * count num -= i * count return result
def file_playable(pathname): """ Returns True if 'pathname' is playable by liquidsoap. False otherwise. """ #currently disabled because this confuses inotify.... return True #remove all write permissions. This is due to stupid taglib library bug #where all files are opened in write mode. The only way around this is to #modify the file permissions os.chmod(pathname, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH) # when there is an single apostrophe inside of a string quoted by # apostrophes, we can only escape it by replace that apostrophe with # '\''. This breaks the string into two, and inserts an escaped # single quote in between them. command = ("airtime-liquidsoap -c 'output.dummy" + \ "(audio_to_stereo(single(\"%s\")))' > /dev/null 2>&1") % \ pathname.replace("'", "'\\''") return_code = subprocess.call(command, shell=True) #change/restore permissions to acceptable os.chmod(pathname, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | \ stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH) return (return_code == 0)
def _batch_aggregation(batch_loss_list): """Returns the aggregated loss.""" loss_sum = 0. weight_sum = 0. for loss, count in batch_loss_list: loss_sum += loss weight_sum += count return loss_sum / weight_sum
def getid(unit_id): """Utility function to return the unit number.""" return unit_id.split("/")[1]
def parse_accelerators(accelerators): """Transforms string that specifies accelerators to dictionary. The string that is parsed has the following format: n1device1,n2device2,n3device3,... and is transformed to the dictionary: {'device1': n1, 'device2': n2, 'device3': n3, ...} Example: 2/GPU:0,2/GPU:1 --> {'/GPU:0': 2, '/GPU:1': 2} """ if accelerators is None: return None def read_digit(x): i = 0 while x[i].isdigit(): i += 1 return x[i:], int(x[:i]) acc_dict = {} for entry in accelerators.split(","): device, n = read_digit(entry) if device in acc_dict: acc_dict[device] += n else: acc_dict[device] = n return acc_dict
def extractMSG(msg): """To extract the message sent in an unprocessed message. :param msg: unprocessed message :type msg: str :return: Processed message :rtype: str :Example: .. code-block:: python extractMSG(x) *x is an unprocessed message* """ try: return msg.split('"text D(ib) Va(t)">')[1].split('</span></div></div>')[0] except: try: return msg.split('Mb(-10px)--ml">')[1].split('</span></div></div>')[0] except: return ''
def add_values_in_dict(sample_dict, key, list_of_values): """Append multiple values to a key in the given dictionary""" if key not in sample_dict: sample_dict[key] = list() sample_dict[key].extend(list_of_values) return sample_dict
def top_k_frequent(words, k): """ Input: words -> List[str] k -> int Output: List[str] """ # Your code here # use the python built in dictionary dictionary = dict() # iterate over each word in the words list for word in words: # if the word is in our dictionary if word in dictionary: # then increment the count of that word dictionary[word] += 1 # otherwise else: # set the count of that word to 1 dictionary[word] = 1 # sort the words / keys in our dictionary in descending order word_list = sorted(dictionary, key=lambda x: (-dictionary[x], x)) # return a slice of the sorted words from start of list up to the k - 1 element return word_list[:k]
def avp_from_rhmax(svp_tmin, rh_max): """ Estimate actual vapour pressure (*e*a) from saturation vapour pressure at daily minimum temperature and maximum relative humidity Based on FAO equation 18 in Allen et al (1998). :param svp_tmin: Saturation vapour pressure at daily minimum temperature [kPa]. Can be estimated using ``svp_from_t()``. :param rh_max: Maximum relative humidity [%] :return: Actual vapour pressure [kPa] :rtype: float """ return svp_tmin * (rh_max / 100.0)
def key_output(job_name: str) -> str: """Returns the output ID for the cache key""" return f"key-{job_name}"
def skip_dict(args): """Skip specific files""" if args.file: return {} return { "skip_script": args.skip_script, "skip_local": args.skip_local, "skip_access": args.skip_access, }
def _contains_hidden_files(n): """Return True if n contains files starting with a '.', False otherwise.""" for sub in n: name = sub.name if len(name)>1 and name.startswith('.'): return True return False
def manhattanDist(cell1, cell2): """ Manhattan distance calculation suitable for use as a heuristic function. Note: for some cost functions, this may not be admissible! Admissibility requires that the heuristic never overestimate the true cost, i.e. h(n) <= h*(n) for all n. """ return abs(cell1[0]-cell2[0]) + abs(cell1[1]-cell2[1]);
def sort_diffs(diff): """Sort diffs so we delete first and create later""" if diff["action"] == "delete": return 1 else: return 2
def _check_histories(history1, history2): """Check if two histories are the same.""" if history1.replace("\n", "").replace(" ", "") == history2.replace( "\n", "" ).replace(" ", ""): return True else: return False
def _gc_commands_to_rmfiles(hsize, files): """Return the history files to remove to get under the command limit.""" rmfiles = [] n = 0 ncmds = 0 for ts, fcmds, f in files[::-1]: if fcmds == 0: # we need to make sure that 'empty' history files don't hang around rmfiles.append((ts, fcmds, f)) if ncmds + fcmds > hsize: break ncmds += fcmds n += 1 rmfiles += files[:-n] return rmfiles
def get_recipients(obj): """ Given obj, return obj's recipients. """ if not obj: return [] model_name = obj._meta.verbose_name if model_name == "contact": return [(obj.first_name, obj.email)] elif model_name == "estimate": return [(i.first_name, i.email) for i in obj.contacts.all()] elif model_name == "note": return [(i.first_name, i.email) for i in obj.contacts.all()] elif model_name == "project": return [ (i.first_name, i.email, i.profile.notifications) for i in obj.team.all() ] elif model_name == "time": return [("Alex", "aclark@aclark.net")]
def calc_cigar_bit(cigar_op): """Given a cigar operation integer, return the cigar bit.""" # taken from htslib bam_cigar_type function return 0x3c1a7 >> (cigar_op << 1) & 3
def _use_regex(on, **kwargs): """ Colour and change the text for simple/regex search """ text = "simple search" if not on else "regular expression" classname = "colour-off" if not on else "colour-on" return text, classname
def parser_CA_identifier_Descriptor(data,i,length,end): """\ parser_CA_identifier_Descriptor(data,i,length,end) -> dict(parsed descriptor elements). This descriptor is not parsed at the moment. The dict returned is: { "type": "CA_identifier", "contents" : unparsed_descriptor_contents } (Defined in ETSI EN 300 468 specification) """ return { "type" : "CA_identifier", "contents" : data[i+2:end] }
def toJsString(s): """ This function converts a string to the Javascript literal syntax """ # [MTAB-366] # If we have unicode entities in this attribute value we must replace # the corresponding unicode character with the javascript escape sequence result = "\"" for c in s: if c=='\"': result += "\\\"" elif ord(c)>=32 and ord(c)<=126: result += c else: result += "\\u"+("%0.4X" % ord(c)) result += "\"" return result
def nuget_deps_helper(frameworks, packages): """Convert frameworks and packages into a single list that is consumable by the nuget_fetch deps attribute A list item is in the format "<package_name>/<version>:tfm,tfm,tfm". For a list of frameworks, we'll just omit the left half of the item. """ res = [",".join(frameworks)] for (pkg, tfms) in packages.items(): res.append(pkg + ":" + ",".join(tfms)) return res
def _process_index(item): """Process and normalize the index.""" if not isinstance(item, (slice, tuple)): if not isinstance(item, int): raise ValueError('The index should be a integer.') item = (item,) if not isinstance(item, tuple): item = tuple([item]) starts, sizes = [], [] for i, elem in enumerate(item): if isinstance(elem, slice): if elem.start is None: starts.append(0) else: starts.append(elem.start) if elem.stop is None: sizes.append(-1) else: sizes.append(elem.stop - starts[-1]) if sizes[-1] == 0: raise ValueError( 'The starts and ends of axis {} can not be equal' ', got {}:{}.'.format(i, starts[-1], elem.stop)) if elem.step is not None: raise NotImplementedError elif isinstance(elem, int): starts.append(elem) sizes.append(0) else: raise TypeError('Unsupported index type: {}'.format(type(elem))) return starts, sizes
def serialize_biomass_table_v2(analysis, type): """Convert the output of the biomass_loss analysis to json""" rows = [] for year in analysis.get('biomassLossByYear'): rows.append({'year': year, 'biomassLossByYear': analysis.get('biomassLossByYear', None).get(year, None), 'cLossByYear': analysis.get('cLossByYear', None).get(year, None), 'co2LossByYear': analysis.get('co2LossByYear', None).get(year, None), 'areaHa': analysis.get('area_ha', None) }) return { 'id': None, 'type': type, 'attributes': rows }
def vector_sub(vector1, vector2): """ Subtracts one vector to another :param vector1: list(float, float, float) :param vector2: list(float, float, float) :return: list(float, float, float) """ return [vector1[0] - vector2[0], vector1[1] - vector2[1], vector1[2] - vector2[2]]
def football_points(win: int, draw: int, loss:int): """Calculate the number of points for a football team.""" return (win * 3 + draw)
def binaryComplement(binaryString: str): """this function can be used for to calculate the complement of a binary number Args: binaryString (str): string that contains only `1` and `0`. Raises: ValueError: if inputted string contains (`2` ~ `9`). Returns: str: complement of the inputted binary number. """ Answer = [] for i in binaryString: if i not in '01': raise ValueError("it's not binary!") if i == '1': Answer.append('0') else: Answer.append('1') return ''.join(Answer)
def time_serializer(a): """ Simply rounds a floating point value for its input, as we must store dates and times as integers in SQLite. """ return round(float(a))
def find_default_image(images): """Search the list of registered images for a defult image, if any. Return the image if found, otherwise None """ for img in images: if img['default']: print("The default image is currently: '%s'" % img['imagename']) return img return None
def combine_prob(p1,p2): """Returns the probability that event 1 or 2 happens""" return 1. - (1-p1)*(1-p2)
def permute(n, seq, permutation): """ This is the code to permute the solution without having to perform the 10000 instruction sequence. """ for i in range(n): seq = seq[permutation] return seq
def res_compare_hours(dict_res): """Set the alexa response of CompareHours from the dict obtained from the API requests""" result = f"Il tragitto da " \ f"{dict_res['first_route']['start']} a " \ f"{dict_res['first_route']['end']} prevede " \ f"{dict_res['first_route']['duration']} di viaggio partendo alle " \ f"{dict_res['first_route']['hour']}, " \ f"{dict_res['second_route']['duration']} partendo alle " \ f"{dict_res['second_route']['hour']}, " \ f"{dict_res['third_route']['duration']} partendo alle " \ f"{dict_res['third_route']['hour']}, " \ f"{dict_res['fourth_route']['duration']} partendo alle " \ f"{dict_res['fourth_route']['hour']}" return result
def addon(lists, togo, target, size): """Extends each of the lists by a single integer, with the aim of creating lists whose members sum to the target value. Each list contains non-decreasing elements, so the added number must be at least as large as the final element of the input list. The size value determines the largest value that may be added to a list, and the togo value indicates how many more elements will be added to the input lists. Args: lists (list): contains elements which are lists of integers in the range 1 - 9. All members are of equal length. togo (int): how many more elements will be added after this. target (int): the number to which the elements of each list will sum when completed. size (int): the largest integer allowed in a list. Returns: (list): the list of lists. """ return [s + [i] for s in lists for i in range(s[-1],size+1) if (sum(s) + (togo + 1)*i) <= target if (i + sum(s) + togo*size) >= target]
def get_compiler_only(s): """Get only the compiler (removing ccache) from the compiler string ($CC, $CXX)""" l = s.split() return ' '.join( [ i for i in l if i != "ccache" ] )
def to_unicode(text): """Return *text* as a (unicode) str. *text* can be str or bytes. A bytes object is assumed to be encoded as UTF-8. If *text* is a str object it is returned unchanged. """ if isinstance(text, str): return text try: return text.decode("utf-8") except AttributeError: raise TypeError("expected unicode string, got %s value %s" % (type(text), text))
def arrays_avg(values_array, weights_array=None): """ Computes the mean of the elements of the array. Parameters ---------- values_array : array. The numbers used to calculate the mean. weights_array : array, optional, default None. Used to calculate the weighted average, indicates the weight of each element in the array (values_array). Returns ------- result : float. The mean of the array elements. """ n = len(values_array) if weights_array is None: weights_array = [1] * n elif len(weights_array) != n: raise ValueError( "values_array and qt_array must have the same number of rows" ) result = 0 for i, j in zip(values_array, weights_array): result += i * j return result / n
def nonEmpty(v): """ If v is a container (tuple, list or dictionary), return None if it is empty, otherwise return v itself. """ if isinstance(v,(tuple,list,dict)): if len(v) == 0: return None return v
def diff_cpu_data(prev, cur, ticks_elapsed): """Calculate the different between two sets of cpu data. A new set with updated data is returned. """ if not prev or not cur: return None diff_cpus = {} for cpu_name, prev_cpu in prev.iteritems(): # If a cpu is not included in both sets, skip it. if cpu_name not in cur: continue cur_cpu = cur[cpu_name] diff_cpu = {'name': cpu_name} diff_cpus[cpu_name] = diff_cpu for column_name in prev_cpu: if column_name == 'name' or column_name not in cur_cpu: continue try: # This calculates the amount of time spent # doing a cpu usage type, in percent. # The diff value (cur-prev) is the amount of # ticks spent on this task since the last # reading, divided by the total amount of ticks # elapsed. diff_cpu[column_name] = float(int(cur_cpu[column_name]) - int(prev_cpu[column_name])) / ticks_elapsed * 100 except ValueError: pass return diff_cpus
def calc_mbar(m_0, m_curr): """ Calculate the ch4 concentration average between the historical ch4 and current ch4 concentrations Parameters ---------- m_0 : float Historical ch4 concentration, in ppm m_curr : float Current ch4 concentration, in ppm Return ------ m_bar : float Averaged concentration, in W m^-2 ppb^-1 """ m_bar = 0.5 * (m_0 + m_curr) return m_bar
def recite(start_verse, end_verse): """ Returns the lyrics to 'The Twelve Days of Christmas' beginning from start_verse,and ending at end_verse """ counting_string = " twelve Drummers Drumming, eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, a Partridge in a Pear Tree" # I have just copied the last line of the song. I have removed the `and`from the sentence, I will manipulate it later numbers_list = ["first", "second", "third", "fourth", "fifth", "sixth", "seventh", "eighth", "ninth", "tenth", "eleventh", "twelfth"] # I need this for the starting portion of every line counting_list = counting_string.split(",") # this turn all the one ..., two ..., etc into a list # next I reverse it to get it in the ascending order counting_list.reverse() # return_verse is the list of all return_verse = [] # double looping to print all the verses # putting -1 in the for statement means I don't have to worry about Python indexing for i in range(start_verse-1, end_verse): verse_ith = f"On the {numbers_list[i]} day of Christmas my true love gave to me:" # We start very line of the lyrics this way # i here represents the line number in the lyrics if i == 0: # there is no `and` in the first line of the lyrics, so I am treating it as a separate case verse_ith = f"{verse_ith}{counting_list[0]}." else: j = i while j > 0: # starting from the ith count in the list, adding them to our string verse_ith verse_ith = f"{verse_ith}{counting_list[j]}," j -= 1 # there is an `and` from second line onwards in the lyrics verse_ith = f"{verse_ith} and{counting_list[0]}." return_verse.append(verse_ith) return return_verse
def get_dict_optional_value(d,keys_to_try_in_order, default_value=None): """ Tries each key in order, if not value is found, returns default_value """ for key in keys_to_try_in_order: if key in d and d.get(key): return d[key] return default_value
def read_file(filename: str) -> str: """Read file Nothing special Args: filename (str): File to read Returns: str: content of file """ try: with open(filename, "r", encoding='utf8') as file_object: # Append 'hello' at the end of file content = file_object.read() except FileNotFoundError: print(f"{filename} not found.") content = "" return content
def pretty_name(name): """Convert 'first_name' to 'First name'.""" if not name: return '' return name.replace('_', ' ').capitalize()
def check_line2(line): """Parse line password policy line and see if it is valid example line: 6-9 z: qzzzzxzzfzzzz """ line = line.strip() indices, target_char, password = line.split(" ") target_char = target_char.strip(":") index1, index2 = tuple(int(x) - 1 for x in indices.split("-")) return ( password[index1] == target_char or password[index2] == target_char ) and not (password[index1] == password[index2])
def is_lambda(function): """ Checking function to be a lambda function. :param function: function to be checked. :returns: True when given function is a lambda function. >>> square = lambda value: value**2 >>> is_lambda(square) True >>> def square2(value): return value**2 >>> is_lambda(square2) False >>> is_lambda("hello world") False """ LAMBDA = lambda: 0 return isinstance(function, type(LAMBDA)) and function.__name__ == "<lambda>"
def dumb_input(instring): """ Mock input function. :param instring: Does nothing. :type instring: str """ print(instring) return "snek"
def path_to_location(path): """Messages take a `repeated string` of path components.""" if path.startswith('/'): # `path` is already meant to be relative to repo root path = path[1:] return path.split('/')
def generate_month_work_days(month_first_work_day, month_days, id_picker): """ generate the list of working days based on the first day configured by the user the id_picker specifies which day of the week the month_first_work_day is, regarding the entire week """ work_days = [] id = id_picker for count in range(month_first_work_day - 1, len(month_days)): if(id <= 5): work_days.append(month_days[count]) id += 1 elif(id == 6): # skip a free day id += 1 elif(id == 7): # skip a free day # reset the index id = 1 return work_days
def get_height(n: int) -> int: """Returns height of rangoli.""" return 2*n - 1
def optional(*typs): """ Convenience function to specify that a value may be of any of the types in type 'typs' or None """ return tuple(typs) + (type(None),)
def avg_and_sort_contributions(contrib_dict, feature_val_dict): """ Get the mean value (of data for a predictory) and contribution from each predictor and sort" Args: ----------- the_dict: dictionary to process performance_dict: if using performance based apporach, this should be the dictionary with corresponding indices Return: a dictionary of mean values and contributions """ avg_contrib_dict = {} avg_feature_val_dict = {} # for hits, misses, etc. for key in contrib_dict.keys(): contrib_df = contrib_dict[key] feature_val_df = feature_val_dict[key] contrib_series = contrib_df.mean(axis=0) feature_val_series = feature_val_df.mean(axis=0) feature_val_series["Bias"] = 0.0 indices = contrib_series.abs().sort_values(ascending=False).index sorted_contrib_df = contrib_series.reindex(indices) sorted_feature_val_df = feature_val_series.reindex(indices) top_contrib = { var: contrib_series[var] for var in list(sorted_contrib_df.index) } top_values = { var: feature_val_series[var] for var in list(sorted_feature_val_df.index) } avg_contrib_dict[key] = top_contrib avg_feature_val_dict[key] = top_values return avg_contrib_dict, avg_feature_val_dict
def get_filecontent(filename): """Get file data as text.""" with open(filename) as fo: return fo.read()
def same_modules(s1, s2): """Compare two module strings where submodules of an illegal parent module should also be illegal. I.e. blacklisting 'foo.bar' should also make 'foo.bar.baz' illegal. The first argument should 'encompass' the second, not the other way around. I.e. passing same_modules('foo', 'foo.bar') will return True, but same_modules('foo.bar', 'foo') will not. """ modules1 = s1.split(".") modules2 = s2.split(".") return ( len(modules1) <= len(modules2) and all(m1 == m2 for (m1, m2) in zip(modules1, modules2)) )
def calc_production(wind): """[summary] Calculates the production for a house hold dependning on wind data. Args: wind ([type]): [Wind data from SMHI that has noice added to it. Wind is in m/s] Returns: [type]: [Production in kWh] """ return int(wind * 16.5)
def getFrameworkSet(): """Return the set of supported frameworks """ return set(("tk", "twisted"))
def bytes_to_unicode(ob): """ Byte to unicode with exception... Only for py3, will be removed on future version... """ t = type(ob) if t in (list, tuple): try: l_ = [str(i, 'utf-8') if isinstance(i, bytes) else i for i in ob] except UnicodeDecodeError: l_ = [i for i in ob] # keep as bytes l_ = [bytes_to_unicode(i) if type(i) in ( list, tuple, dict) else i for i in l_] ro = tuple(l_) if t is tuple else l_ elif t is dict: byte_keys = [i for i in ob if isinstance(i, bytes)] for bk in byte_keys: v = ob[bk] del(ob[bk]) try: ob[str(bk, 'utf-8')] = v except UnicodeDecodeError: ob[bk] = v # keep as bytes for k in ob: if isinstance(ob[k], bytes): try: ob[k] = str(ob[k], 'utf-8') except UnicodeDecodeError: ob[k] = ob[k] # keep as bytes elif type(ob[k]) in (list, tuple, dict): ob[k] = bytes_to_unicode(ob[k]) ro = ob else: ro = ob return ro
def max_sub_array(nums): """ Returns the max subarray of the given list of numbers. Returns 0 if nums is None or an empty list. Time Complexity: O(n) Space Complexity: O(1) """ if not nums: return 0 if max(nums) < 0: return max(nums) max_seen = 0 max_ending_here = 0 for num in nums: max_ending_here += num if max_ending_here < 0: max_ending_here = 0 if max_seen < max_ending_here: max_seen = max_ending_here return max_seen
def parse_configurations(payload): """ Parses the configurations to perform from the given payload and returns a list containing tuples with the ID of the sensor to configure and the value to set. Args: payload (list): array of bytes to parse. Returns: A list containing tuples with the ID of the sensor to configure and the value to set. """ # Initialize variables. index = 1 configurations = [] # Get the configurations from the payload. num_configurations = payload[index] index += 1 for i in range(num_configurations): sensor_id = payload[index] index += 1 value = int.from_bytes(bytearray(payload[index:index + 4]), "big") index += 4 configurations.append((sensor_id, value)) return configurations
def snake_to_camel(name): """Converts Python Snake Case to Zscaler's lower camelCase.""" # Edge-cases where camelCase is breaking edge_cases = { "routable_ip": "routableIP", "is_name_l10n_tag": "isNameL10nTag", "name_l10n_tag": "nameL10nTag", "surrogate_ip": "surrogateIP", "surrogate_ip_enforced_for_known_browsers": "surrogateIPEnforcedForKnownBrowsers", } ret = edge_cases.get(name, name[0].lower() + name.title()[1:].replace("_", "")) return ret
def insensitive_glob(pattern_glob, recursive=False): """This function is the glob.glob() function that is insensitive to the case. Args: pattern_glob: sensitive-to-the-case pattern recursive: recursive parameter for glob.glob() Returns: insensitive-to-the-case pattern """ from glob import glob def either(c): return "[%s%s]" % (c.lower(), c.upper()) if c.isalpha() else c return glob("".join(map(either, pattern_glob)), recursive=recursive)
def join_str(the_list, none_value='-'): """ :return: string """ if not the_list: return '' result = [] for item in the_list: if not item: result.append(none_value) elif type(item) is str: result.append(item) else: result.append(item.__str__()) return ', '.join(result)
def print_sentences(text): """Prints the sentences by Formats the text string into a list containing individual sentences. Returns this list. """ sentences = text for i, token in enumerate(sentences): print('Sentence #%d: %s' % ((i + 1), token)) return sentences
def get_answers(question): """extract unique answers from question parses.""" answers = set() for parse in question["Parses"]: for answer in parse["Answers"]: answers.add((answer["AnswerArgument"], answer["EntityName"])) return answers
def _BackslashEscape(s): """Double up backslashes. Useful for strings about to be globbed and strings about to be IFS escaped. """ return s.replace('\\', '\\\\') # Similar to GlobEscape and splitter.Escape(). escaped = '' for c in s: if c == '\\': escaped += '\\' escaped += c return escaped
def reduce(f, x, iv=None): """Apply associative function f of two arguments to the items of iterable x. The applications of f are arranged in a binary tree of logarithmic depth, thus limiting the overall round complexity of the secure computation. In contrast, Python's functools.reduce() higher-order function arranges the applications of f in a linear chain (a binary tree of linear depth), and in this case f is not required to be associative; the arguments to f may even be of different types. If iv is provided, it is placed before the items of x (hence effectively serves as a default when x is empty). If iv is not given and x contains only one item, that item is returned. """ x = list(x) if iv is not None: x.insert(0, iv) while len(x) > 1: x[len(x)%2:] = (f(x[i], x[i+1]) for i in range(len(x)%2, len(x), 2)) return x[0]
def create_output(instance, item): """Create Terraform Module for Output the Defined Parameters.""" value = "${module.%s.%s}" % (instance, item) tf_output = {"value": [value]} return tf_output
def string_concatenator(string1, string2, separator): """ Function to concatenate two strings Function taken from A3 Inputs: list, list, list Outputs: string """ output = string1 + separator + string2 return output
def mean(numbers): """ Calculates the mean of an arrary """ return float(sum(numbers)) / max(len(numbers), 1)
def intersection(table1, table2): """ Established intersection function to perform the intersection set operation on tables 1 and 2. Table 3 variable is established to represent the unique rows that appear in both table 1 and table 2. :param table1: a table (a List of Lists) :param table2: a table (a List of Lists) :return: table3: a table with the header from table1/table2 and unique rows that appear in both tables :raises: MismatchedAttributesException: if tables table1 and table2 don't have the same attributes """ table3 = [] i = 0 j = 0 if table1[0] != table2[0]: raise Exception("MismatchedAttributesException") else: while i < len(table1): while j < len(table2): if table1[i] == table2[j]: table3.append(table2[j]) j += 1 else: j += 1 j = 0 i += 1 if len(table3) == 1: table3 = None return table3
def __remove_duplicate_chars(string_input, string_replace): """ Remove duplicate chars from a string. """ while (string_replace * 2) in string_input: string_input = \ string_input.replace((string_replace * 2), string_replace) return string_input
def split_reaches(list_of_reaches, new_reach_pts): """splits l into sections where new_reach_pts contains the starting indices for each slice""" new_reach_pts = sorted(new_reach_pts) sub_list = [list_of_reaches[i1:i2] for i1, i2 in zip(new_reach_pts, new_reach_pts[1:])] last_index = new_reach_pts[-1] sub_list.append(list_of_reaches[last_index:]) return sub_list
def get_argnames(func): """Get the argument names from a function.""" # Get all positional arguments in __init__, including named # and named optional arguments. `co_varnames` stores all argument # names (including local variable names) in order, starting with # function arguments, so only grab `co_argcount` varnames. code = func.__code__ argcount = code.co_argcount return code.co_varnames[:argcount]
def unit_to_pc_linear(tensor): """Input assumed to be unit range. Linearly scales to -1 <= x <= 1""" return (tensor * 2.0) - 1.0 # Theoretical range limits -1 : 1
def human_readable_number(number: float) -> str: """Print a large number in a readable format. Return a readable format for a number, e.g. 123 milions becomes 123M. Args: number: a float to be printed in human readable format. Returns: readable_number: a string containing the formatted number. """ number = float('{:.3g}'.format(number)) magnitude = 0 while abs(number) >= 1000 and magnitude < 4: magnitude += 1 number /= 1000.0 readable_number = '{}{}'.format('{:f}'.format(number).rstrip('0').rstrip('.'), ['', 'K', 'M', 'B', 'tn'][magnitude]) return readable_number
def _is_unit_allowed(unit: str): """Allows basic units, like "uM" and composite units, like "ng/ml".""" if unit.count("/") > 1: return False # pragma: no cover for forbidden_character in "().,&*^%# ": if forbidden_character in unit: return False # pragma: no cover return True
def prod(xs): """Computes the product along the elements in an iterable. Returns 1 for empty iterable. Args: xs: Iterable containing numbers. Returns: Product along iterable. """ p = 1 for x in xs: p *= x return p
def get_name(name='', index=0, suffix_name=''): """ generic name return. :param name: <str> base name :param suffix_name: <str> the suffix name to append to the base name. :param index: <int> the index to use for the name. :return: <str> return the modified name. """ index_name = '_{}'.format(index) suffix_name = '_{}'.format(suffix_name) if index_name not in name: name += index_name if not name.endswith(suffix_name): name += suffix_name return name
def early_stopping_monitor(i, est, locals): """Returns True on the 10th iteration. """ if i == 9: return True else: return False
def find_all_cell_types_from_module(module, modules, primitive_cells): """ Determine all of the cells types used in this module. This includes children of this module, and also includes primitive_cells. The primitive_cells lists is used to determine when this search should stop and not examine within a cell. This is to prevent exposing yosys internal logic (e.g. specify cells) to the output logical netlist. Returns a set of all cell types uses within the specified module. """ cells_in_module = set() assert module in modules module_data = modules[module] for cell_name, cell_data in module_data['cells'].items(): cell_type = cell_data['type'] cells_in_module.add(cell_type) if cell_type not in primitive_cells: cells_in_module |= find_all_cell_types_from_module( cell_type, modules, primitive_cells) return cells_in_module
def sample_labels(model, wkrs, imgs): """ Generate a full labeling by workers given worker and image parameters. Input: - `model`: model instance to use for sampling parameters and labels. - `wkrs`: list of worker parameters. - `imgs`: list of image parameters. Output: 1. list [img id, wkr id, label] as provided by `model.sample_label`. """ labels = [[ii, wi, model.sample_label(wkrs[wi], imgs[ii])] \ for ii in range(len(imgs)) for wi in range(len(wkrs))] return labels