content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import argparse def valid_write_option(string): """ This method checks whether the string is one of the valid write options and if not raises an error. :param string: The write option :type string: str :return: str """ valid_options = ["w", "a"] if string not in valid_options: raise argparse.ArgumentTypeError(string + " isn't a valid write option. Must be either w or a") return string
8784d7af1bf4c35172bf61e8c08f08eed2ce68fa
700,496
from typing import List from typing import Tuple from typing import Any def min_w_ind(lst: List) -> Tuple[int, Any]: """ Returns the min value and its index :param lst: The list to get the max value in :return: Min value and its index """ val = min(lst) ind = lst.index(val) return ind, val
d0da7d1ab1c762f1af2333de6e8b829f8218fd25
700,498
def count(iterable): """ Returns the number of items in `iterable`. """ return sum(1 for whatever in iterable)
a9bb4ac70cef36613372c1225202f97f70a642cf
700,499
import torch def xy_to_cxcy(xy): """ Convert bounding boxes from boundary coordinates (x_min, y_min, x_max, y_max) to center-size coordinates (c_x, c_y, w, h). :param xy: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4) :return: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4) """ return torch.cat([(xy[:, 2:] + xy[:, :2]) / 2, # c_x, c_y xy[:, 2:] - xy[:, :2]], 1)
46eef5bb63c85a84050a57a2fb27618721b31eaa
700,500
def calculate_p_b(*args): """ Determines how difficult a text is according to the number of words that are, most likely, unknown to readers. The other formulas we referred to based the score on the number of words and sentences only """ words_count, sentences_count, diff_words_count = args[0][1], args[0][2], args[0][-1] dif_percent = diff_words_count / words_count * 100 score = 0.1579 * dif_percent + (0.0496 * words_count / sentences_count) if dif_percent >= 5: score += 3.6365 return round(score, 2) return round(score, 2)
420b8269390f50b163c89e97f95b27a282f6f829
700,501
def part1(entries: tuple) -> int: """part1 solver""" card_key, door_key = entries loop_size = 0 key = 1 while key not in (card_key, door_key): loop_size += 1 key = (key * 7) % 20201227 if key == card_key: return pow(door_key, loop_size, 20201227) return pow(card_key, loop_size, 20201227)
cb4e38704229c3a67a67d7ffd7d8a7e0d2d72628
700,502
def selection(triple, variables): """Apply selection on a RDF triple""" bindings = set() if variables[0] is not None: bindings.add((variables[0], triple[0])) if variables[1] is not None: bindings.add((variables[1], triple[1])) if variables[2] is not None: bindings.add((variables[2], triple[2])) return bindings
7f237f271d7c980e5ece21eacde671ee0f8a1d05
700,503
def daily_std_error(merged_data): """Calculates daily seasonal standard error of the timeseries data in a DataFrame Parameters ---------- merged_data: DataFrame A pandas DataFrame with a datetime index and columns containing float type values. Returns ------- DataFrame A pandas dataframe with a string type index of date representations and the daily seasonal standard error as float values in the columns. Examples -------- >>> import hydrostats.data as hd >>> import pandas as pd >>> pd.options.display.max_rows = 15 The data URLs contain streamflow data from two different models, and are provided from the Hydrostats Github page >>> sfpt_url = r'https://github.com/waderoberts123/Hydrostats/raw/master/Sample_data/sfpt_data/magdalena-calamar_interim_data.csv' >>> glofas_url = r'https://github.com/waderoberts123/Hydrostats/raw/master/Sample_data/GLOFAS_Data/magdalena-calamar_ECMWF_data.csv' >>> merged_df = hd.merge_data(sfpt_url, glofas_url, column_names=('Streamflow Prediction Tool', 'GLOFAS')) >>> hd.daily_std_error(merged_df) Streamflow Prediction Tool GLOFAS 01/01 558.189895 494.958042 01/02 553.290181 442.497656 01/03 535.002487 432.096928 01/04 514.511095 422.915060 01/05 489.287216 411.861086 01/06 463.321927 401.023620 01/07 441.666108 395.703128 ... ... 12/25 613.876851 566.669886 12/26 589.424434 567.179646 12/27 582.957832 557.932109 12/28 581.465297 540.021918 12/29 573.949000 517.494155 12/30 560.993945 495.040565 12/31 546.904139 474.742075 [366 rows x 2 columns] """ # Calculating the daily average from the database a = merged_data.groupby(merged_data.index.strftime("%m/%d")) return a.sem()
11b5f72ff1a68425cd17d7638590290f5ac883d8
700,504
def tanimoto_sparse(str1, str2): """! Calculate the tanimoto coefficient for a pair of sparse vectors @param str1 str: String of 1s and 0s representing the first compound fingerprint @param str2 str: String of 1s and 0s representing the second compound fingerprint @return Returns float """ n_c = 0.0 n_a = 0.0 n_b = 0.0 for i in range(len(str1)): if str1[i] == '1' and str2[i] == '1': n_c += 1 if str1[i] == '1': n_a += 1 if str2[i] == '1': n_b += 1 if n_c + n_a + n_b == 0: return 0.000 return float(n_c/(n_a+n_b-n_c))
2a7a6e7c585ceb3aaaa4dfed7e98b01f151e7c06
700,505
from bs4 import BeautifulSoup import codecs def get_saml_response(html): """ Parse SAMLResponse from Shibboleth page >>> get_saml_response('<input name="a"/>') >>> get_saml_response('<body xmlns="bla"><form><input name="SAMLResponse" value="eG1s"/></form></body>') 'xml' """ soup = BeautifulSoup(html, "html.parser") for elem in soup.find_all('input', attrs={'name': 'SAMLResponse'}): saml_base64 = elem.get('value') xml = codecs.decode(saml_base64.encode('ascii'), 'base64').decode('utf-8') return xml
24800c318f7f5f6000c6652d15421c68a97a04a6
700,506
def _is_namespace_visible(context, namespace): """Return true if namespace is visible in this context""" if context.is_admin: return True if namespace.get('visibility', '') == 'public': return True if namespace['owner'] is None: return True if context.owner is not None: if context.owner == namespace['owner']: return True return False
4f3119684681aa2f3c34a62e1674aaa58d26105e
700,507
def _exception_to_dict(exception): """Convert exception to an error object to be sent as response to APIs.""" return {"error": {"type": type(exception).__name__, "message": exception}}
4abb81516b604bdfd2f6360a1272556b0b6f6932
700,508
def _any_none(*args): """Returns a boolean indicating if any argument is None""" for arg in args: if arg is None: return True return False
fadb9330fc1f4ffca2cf0105d8513d0be0d5fae1
700,510
def precision(judged_data): """ Our relevance judgments are on a graded scale of 0-3, where scores of 1-3 are considered relevant, and less than 1 is irrelevant. We compute precision of the result set based on this quanitization. Args: judged_data (pandas.DataFrame): A DataFrame with at a minimum query, domain, judgment, result_fxf, and result_position columns Returns: A floating point value corresponding to the precision of the result set. """ return (len(judged_data[judged_data["judgment"] >= 1]) / float(len(judged_data))) if len(judged_data) else 1.0
6d4c4ee7793625e4590c01120318afe33cba0b0c
700,511
def filter_cycles(cycle_list, **filters): """Puts cycles through filters to meet specific requirements. cycle_list is a list of tuples where the first item is an intake and the second action is a placement or drop. filters are the specifications that certain data points inside the cycles must fit to be included in the returned cycles. example for filter - 'level=1' as an argument, '{'level': 1}' inside the function.""" filtered_cycles = [] # For each cycle, if any of the specifications are not met, the # loop breaks and moves on to the next cycle, but if all the # specifications are met, the cycle is added to the filtered cycles. for cycle in cycle_list: for data_field, requirement in filters.items(): # Handling for the cargo ship in level 1 placements. if data_field == 'level' and requirement == 1: # If no level is specified, it is a cargo ship placement. if cycle[1].get('level', 1) != 1: break # Otherwise, the requirement is checked normally else: if cycle[1].get(data_field) != requirement: break # If all the requirements are met, the cycle is added to the # (returned) filtered cycles. else: filtered_cycles.append(cycle) return filtered_cycles
1abdcf76365cbaced4ac1dcfff96a91282c38370
700,512
def ansi_escape(text): """ Replace characters with a special meaning. """ return text.replace('\x1b', '?').replace('\b', '?')
86ff3e7cfce8fcc17e826896e32c28e0d30ee83c
700,513
def _GetAndroidVersionFromMetadata(metadata): """Return the Android version from metadata; None if is does not exist. In Android PFQ, Android version is set to metadata in master (MasterSlaveLKGMSyncStage). """ version_dict = metadata.GetDict().get('version', {}) return version_dict.get('android')
e1d1ed9d0bbf2f65d646c11007739f6b5a9b78ec
700,514
from functools import reduce def get_in(d, t, default=None): """ look up if you can get a tuple of values from a nested dictionary, each item in the tuple a deeper layer example: get_in({1: {2: 3}}, (1, 2)) -> 3 example: get_in({1: {2: 3}}, (2, 3)) -> {} """ result = reduce(lambda d, t: d.get(t, {}), t, d) if result is False: return result elif not result: return default else: return result
f92e6c94e3485f4b02a8487f832064de7a42eba5
700,515
def divisible_by_five(n: int) -> bool: """Return True if an integer is divisible by 5, and false otherwise.""" if n % 5 > 0: return False return True
e4cd2adf7067000f10aa655bb9fd819e525527d2
700,516
def _should_package_clang_runtime(ctx): """Returns whether the Clang runtime should be bundled.""" # List of crosstool sanitizer features that require packaging some clang # runtime libraries. features_requiring_clang_runtime = { "asan": True, "tsan": True, "ubsan": True, } for feature in ctx.features: if feature in features_requiring_clang_runtime: return True return False
7f54b903f4acc288abca294d857e774ce3daa8b3
700,517
import math def round_nearest_towards_infinity(x, infinity=1e+20): """ Rounds the argument to the nearest integer. For ties like 1.5 the ceiling integer is returned. This is called "round towards infinity" Args: x: the value to round infinity: the model's infinity value. All values above infinity are set to +INF Returns: an integer value Example: round_nearest(0) = 0 round_nearest(1.1) = 1 round_nearest(1.5) = 2 round_nearest(1.49) = 1 """ if x == 0: return 0 elif x >= infinity: return infinity elif x <= -infinity: return -infinity else: raw_nearest = math.floor(x + 0.5) return int(raw_nearest)
e3edd7dfca255648f02eb9845f411f52c24d3120
700,518
def create_html_popup(friends: list) -> str: """creates html popup for markrer Args: friends (list): list of names Returns: str: html in string format """ html_template = "Friends:" for friend in friends: html_template += f"""<br> <p>{friend}</p><br> """ return html_template
54b1ac02cf59a086a711dcb0b47f14c0ea4e782d
700,519
def euler_step(theta,dtheta,ddtheta,dt): """ Euler Step Parameters ---------- theta (tf.Tensor): Joint angles (N,nq) dtheta (tf.Tensor): Joint velocities (N,nq) ddtheta (tf.Tensor): Joint accelerations (N,nq) dt (float): Delta t Returns ------- (thetalistNext, dthetalistNext) (tupe of tf.Tensor): Next joint angles and velocities (N,nq), (N,nq) """ return theta + dt * dtheta, dtheta + dt * ddtheta
402b30a3f24440707ef56b7113b545eb827c704e
700,520
import subprocess def git_diff_files(oldrev, newrev): """Get list of files in diff.""" files_modified = subprocess.check_output(["git", "diff", "--name-only", oldrev + ".." + newrev]) return files_modified.splitlines()
4bfe2982a66b2bdb2c186cced1f4619965f58064
700,521
def beta1() -> float: """Mock beta1.""" return 0.9
1152a45a891c749b196cab780f8f76afc70c2a65
700,522
import base64 def b64s_to_s(b64s: str) -> str: """convert base 64 strting to string :param b64s: input base 64 string :type b64s: str :return: output string :rtype: str """ s = base64.b64decode(b64s.encode('utf8')).decode('utf8') return s
beb49deb87d45da43cc8f12b3aacec37d7bfd1ed
700,523
from typing import Counter def counter(data, n=None): """counter方法用于统计元素数量 Parameters ---------- data : list 列表数据 n : int 前n个元素 Returns ---------- """ ret = Counter(data) if not n: ret = ret.most_common(len(ret)) else: ret = ret.most_common(n) return ret
6fed62dd1478992c13c8dee5dd2cbb7556a5d0fe
700,524
def col255_from_RGB(red,green,blue): """ returns a term256 colour index from RGB value found at : https://unix.stackexchange.com/questions/269077/ """ if red > 255: red = 255 if green > 255: green = 255 if blue > 255: blue = 255 if red < 75: red = 0 else: red = ((red -35)/40) red = red*6*6 if green < 75: green = 0 else: green = ((green - 35)/40) green = green*6 if blue<75: blue = 0 else: blue = ((blue -35)/40) j = int(red+green+blue+16) if j>255: j=255 return j
6d896f557d5903708f5072d1a875313633cea6b4
700,525
def first_or_default(iterable, predicate=None, default=None): """First the first value matching a perdicate otherwise a default value. :param iterable: The items over which to iterate. :param predicate: A predicate to apply to each item. :param default: The value to return if no item matches. :return: The first value matching a predicate otherwise the default value. """ for item in iterable: if not predicate or predicate(item): return item return default
94ee1c97cc752b1f5bb0ac4cc7c3a6a8f3fe4675
700,526
def image_md(image_location, caption="", link=None, tooltip=""): """ Returns the html code for the individual plot """ image = f""" <a href="{link}" name="gallery-href"> <img src="{image_location}" name="gallery-image" alt="{tooltip}" style="width:100%" id="{"".join(image_location.split("-")[:-1])}"/> </a>""" if link else f"""<img src="{image_location}" name="gallery-image" style="width: 100 % " id="{"".join(image_location.split("-")[:-1])}"/>""" return f"""<div class="gallery"> {image} <div class="gtooltip"> {caption}<span class="gtooltiptext">{tooltip}</span> </div> </div>"""
3e847459f14d8cfd2c66bed633640bbbb0442321
700,527
def fix_models_py(models_py): """ The output of inspectdb is pretty messy, so here we trim down some of the mess, add mandatory ID field and fix the bad conversion to CharField (we use TextField instead). """ lines = models_py.split("\n") fixed_lines = [] for line in lines: # Postgres TextFields are preferable. No performance # hit compared to CharField and we don't need to # specify a max length line = line.replace( "CharField", "TextField" ) # Strip out comments. They're super long and repetitive try: comment_ix = line.index("#") except ValueError as e: comment_ix = -1 if comment_ix == 0: continue elif comment_ix != -1: line = line[0:comment_ix] # Skip the managed part. We're going to actually # use a managed model, derived from the auto-generated # models.py if "class Meta:" in line: continue elif "managed = False" in line: continue elif "db_table = " in line: continue fixed_lines.append(line) return "\n".join(fixed_lines)
428cce3315a2e47d44e35889e0cccd7d840794aa
700,529
def sum_of_years_digits(year): """Calculate the sum of years' digits. Arguments: year (``int``): The year to calculate up to. Returns: int: The sum of years' digits. """ if year <= 0: return 0 return year + sum_of_years_digits(year-1)
aa3d67cde4af6c8565ef6fdcd8becb0cb3b1fa95
700,531
import subprocess def resolve_ref(repo_url, ref): """ Return resolved commit hash for branch / tag. Return ref unmodified if branch / tag isn't found Notes ----- Author: Yuvi Panda Copied from https://github.com/yuvipanda/repo2charliecloud/blob/ 44a508b632e801d3b1f0dd1360e38f73d80efc74/repo2charliecloud/__init__.py#L6 """ stdout = subprocess.check_output(['git', 'ls-remote', repo_url]).decode() # ls-remote output looks like this: # <hash>\t<ref>\n # <hash>\t<ref>\n # Since our ref can be a tag (so refs/tags/<ref>) or branch # (so refs/head/<ref>), we get all refs and check if either # exists all_refs = [line.split('\t') for line in stdout.strip().split('\n')] for git_hash, ref in all_refs: if ref in (f'refs/heads/{ref}', f'refs/heads/{ref}'): return git_hash[:7] # Return first 7 characters if stdout: return stdout.split()[0][:7] return ref[:7]
4def5ab1e791ab0d3179418070580a11ac82f75f
700,532
def get_values(): """ Retrieves two values to multiply """ user_values = input("On the next line, enter the values to multiply,\ separated by commas. \n>> ") input_list = user_values.split(",") final_list = [] #We've split the user's input into a list, but have no idea if it's any good. #Let's check. If it's good, we'll add the integer to the final list. for val in input_list: try: final_list.append(int(val)) except ValueError: print("We need the list to be all integers!") return get_values() #Is it alright to return a list instead of a tuple now? They work similarly, so yes. return final_list
ea0fb353d61514e67970d673bbe9fb45a9b20123
700,533
def parse_simple_expression_list(l): """ This parses a comma-separated list of simple_expressions, and returns a list of strings. It requires at least one simple_expression be present. """ rv = [ l.require(l.simple_expression) ] while True: if not l.match(','): break e = l.simple_expression() if not e: break rv.append(e) return rv
8c3772e1bd4cc9587c8095dcba640878c01fc0f4
700,534
from typing import Iterable from typing import Any from typing import List import itertools def flatten(val: Iterable[Iterable[Any]]) -> List[Any]: """ Flatens a list of list into a list >>> flatten( [['abc','def'],[12,34,46],[3.14, 2.22]]) ['abc', 'def', 12, 34, 46, 3.14, 2.22] """ return list(itertools.chain(*val))
cd9ae9e393569ba7800735d09c8621f0d64beed3
700,535
import io import csv def to_csv(members): """Convert JSON data structure to CSV string.""" with io.StringIO() as fp: cw = csv.writer(fp) if len(members): cw.writerow(members[0].keys()) for member in members: cw.writerow(member.values()) value = fp.getvalue() return value
0ed85a10417dee1ca98a28e05e1ae2d24dcebfe0
700,536
def split_sentences(inp_text, nlp_model): """ Splits an input string into sentence determined by spacy model :param inp_text: string with input text :param nlp_model: sciSpacy model :return: list of sentences in string format """ doc = nlp_model(inp_text) sentences_list = [sentence.text for sentence in doc.sents] return sentences_list
b5617d9334509edf09bf4a0360d3030e67ddf800
700,537
def _point(x,index=0): """Convert tuple to a dxf point""" return '\n'.join([' %s\n%s'%((i+1)*10+index,float(x[i])) for i in range(len(x))])
b5d2b3345b53bf2bd5e7fb93ee57fa9eb2a599b9
700,539
def get_video_dimensions(lines): """Has it's own function to be easier to test.""" def get_width_height(video_type, line): dim_col = line.split(", ")[3] if video_type != "h264": dim_col = dim_col.split(" ")[0] return map(int, dim_col.split("x")) width, height = None, None video_types = ("SAR", "hevc", "h264") for line in lines: for video_type in video_types: if video_type in line: width, height = get_width_height(video_type, line) break else: # necessary to break out of nested loop continue break portrait = False portrait_triggers = ["rotation of", "DAR 9:16"] for line in lines: for portrait_trigger in portrait_triggers: if portrait_trigger in line: portrait = True if portrait: width, height = height, width return width, height
7df1dbc3e190908852684735bc1242db4e3c456b
700,541
def _check_selection(selection): """Handle default and validation of selection""" available = ["counts", "exposure", "background"] if selection is None: selection = available if not isinstance(selection, list): raise TypeError("Selection must be a list of str") for name in selection: if name not in available: raise ValueError("Selection not available: {!r}".format(name)) return selection
454b36833a5117b2d1bab377094e8e3ec0c06969
700,542
def force(value): """ This helper function forces evaluation of a promise. A promise for this function is something that has a __force__ method (much like an iterator in python is anything that has a __iter__ method). """ f = getattr(value, '__force__', None) return f() if f else value
eccdbfe927eeac54246ac777ed16864a7da38ca7
700,543
import array def remove_glyf_instructions(tt) -> int: """Removes instruction set bytecode from glyph definitions in the glyf table.""" glyph_number: int = 0 for glyph in tt["glyf"].glyphs.values(): glyph.expand(tt["glyf"]) if hasattr(glyph, "program") and glyph.program.bytecode != array.array("B", []): if glyph.isComposite(): del glyph.program glyph_number += 1 else: glyph.program.bytecode = array.array("B", []) glyph_number += 1 return glyph_number
1b73d9b2fd53a2568f953faab65d5bfae5351189
700,544
import sys def tweet_with_image(twtr, filename, msg, force=False): """ tweet する """ if "ipy" in sys.argv[0] and not force: return #print(msg) with open(filename, "rb") as imagefile: imagedata = imagefile.read() params = {"media[]": imagedata, "status": msg + " #COVID19"} req = twtr.statuses.update_with_media(**params) print(req['created_at']) parent_tweet_id = req['id'] print("Tweeted:{}.".format(parent_tweet_id)) return parent_tweet_id
094983d5469a740a63e2f4c74929cce316b1abf0
700,545
from typing import Dict from typing import Any def _make_version_config(version, scaling: str, instance_tag: str, instances: int = 10) -> Dict[str, Any]: """Creates one version config as part of an API response.""" return {scaling: {instance_tag: instances}, 'id': version}
1d4f613f35ace2f540629e6c79b3f571b3c2a255
700,546
import os def calculate_destination(prefix, cuda, lib, lib_ver): """Calculates the installation directory.""" return os.path.join(prefix, ".data")
c7b471fb191daa345ddb44ef033d18d96198455c
700,547
def sec_from_hms(start, *times): """ Returns a list of times based on adding each offset tuple in times to the start time (which should be in seconds). Offset tuples can be in any of the forms: (hours), (hours,minutes), or (hours,minutes,seconds). """ ret = [] for t in times: cur = 0 if len(t) > 0: cur += t[0] * 3600 if len(t) > 1: cur += t[1] * 60 if len(t) > 2: cur += t[2] ret.append(start+cur) return ret
4c279736f173cbec4170880cfdf279e801847b5a
700,548
def remove_missing_targets(this_data,target_var): """ Gets raw data and removes rows with missing targets Parameters ---------- this_data : dataframe The raw data which has been compiled from Yahoo!Finance target_var : string Column name of target variable Returns ------- this_data A dataframe without missing targets """ this_data = this_data[~this_data[target_var].isnull()] this_data = this_data[~this_data["Date"].isnull()] return this_data
82d9d5f77821c7ee1b6eec53d3b9bb474922beac
700,550
def tee(iterable, n=2): """Return n independent iterators from a single iterable. :param iterable: the iterator from which to make iterators. :param n: the number of iterators to make (default is 2) """ return [iter(iterable) for _ in range(n)]
cb35d518b90ddf66627e171b00e977823d4d3d17
700,551
import pandas as pd def pandas_extract_rows(data_frame, ugridtype_str, index_names): """removes the t2-t6 for S and E points""" letter_dims = [ ('G', 6), ('E', 1), ('S', 1), ('H', 6), ('L', 6), ] cat_keys = [] for (letter, dim) in letter_dims: if letter not in ugridtype_str: continue if dim == 1: # Note that I'm only keeping every 6th row eig = data_frame.xs(letter, level=1).iloc[0::6] eig = eig.reset_index() #print(eig.columns) #print(eig) #item = eig.loc[:, 1] #item = eig.loc[:, 'Item'] #print(dir(eig)) #print(eig.loc) #item = eig['Item'] #print(item) try: eig = eig.replace({'Item' : {'t1' : letter}}).set_index(index_names) except (TypeError, NotImplementedError): print(f'skipping pandas cleanup due to issue with complex {letter} points') return data_frame #continue elif dim == 6: eig = data_frame.xs(letter, level=1) else: raise RuntimeError(dim) #log.info('eig = %s' % eig) cat_keys.append(eig) data_frame = pd.concat(cat_keys) return data_frame
2b297aa5e6944d9eaa6b18031045d3b03e7c6d9f
700,552
def reverse_grammar(pgrammar): """reverses the order of the keys appearing in the parsed grammar.""" pgrammar = dict(reversed(list(pgrammar.items()))) return pgrammar
f4f1b6a5e9f8ad062dde6f6322cec771e792bc6d
700,553
import fileinput def read_input() -> str: """Reads Brainfuck application from file or stdin""" return "".join(line for line in fileinput.input())
7d3b8776cf95a254a414e91ff9e236575ddfc2ea
700,554
def is_search_in_version(search, version): """ Determine if the given version is a valid member of the given version search text. """ search_segments = search.split('.') seg_count = len(search_segments) version_segments = version['full'].split('.') full_version_count = len(version_segments) for i in range(0, seg_count): if i > full_version_count: return False install_ver = int(version_segments[i]) search_ver = search_segments[i] if search_ver.find('-') != -1: r = search_ver.split('-') if r[0] == '': r[0] = -99999999 if r[1] == '': r[1] = 99999999 low = int(r[0]) high = int(r[1]) if low > high: # swap numbers low = low + high high = low - high low = low - high if install_ver >= low and install_ver <= high: continue else: return False else: if search_ver == str(install_ver): continue else: return False return True
0a7bc028985d6d18bb4fd1eb745082756be4707f
700,555
def parse_live_msg(user, msg, title, game): """Turns the bot's <user> <title> etc format into a readable message""" msg = str(msg) user = str(user) word = "" words = [] r = 0 for i in range(len(msg)): if msg[i] == "<" or msg[i] == ">": if r == 1: if "everyone" in word: words.append("@" + word) else: words.append("<@&" + word + ">") r = 0 else: words.append(word) word = "" elif msg[i] == "&" or msg[i] == "@": r = 1 else: word += msg[i] words.append(word) word = "" linked = 0 for i in range(len(words)): if str(words[i]) == "user": word += user elif str(words[i]) == "link": word += "https://twitch.tv/" + user linked = 1 elif str(words[i]) == "title": word += title elif str(words[i]) == "game": word += game elif str(words[i]) == "br": word += "\n" else: word += words[i] if linked == 0 and "https://www.twitch.tv/" not in str(msg) and "https://twitch.tv/" not in str(msg): word += "\nhttps://twitch.tv/" + user return word
07a4269d24df3f69d5ab215c0e9579a3bb691b63
700,556
def remove_child_items(item_list): """ For a list of filesystem items, remove those items that are duplicates or children of other items Eg, for remove_child_items['/path/to/some/item/child', '/path/to/another/item', '/path/to/some/item'] returns ['/path/to/another/item', '/path/to/some/item'] If one if the items is root, then it wins Also, all items should be the full path starting at root (/). Any that aren't are removed """ if '/' in item_list: return ['/'] # Remove duplicates and any non-full path items item_list = sorted(list(set(filter(lambda x: x.startswith('/'), item_list)))) remove_items = set([]) for i, item1 in enumerate(item_list[:-1]): for item2 in item_list[i + 1:]: if item1 != item2 and item2.startswith(item1 + '/'): remove_items.add(item2) for remove_item in remove_items: item_list.remove(remove_item) return sorted(list(set(item_list)))
02f92094cb697be40a4c16d90e8ee6b33f965438
700,557
def isen_nozzle_mass_flow(A_t, p_t, T_t, gamma_var, R, M): """ Calculates mass flow through a nozzle which is isentropically expanding a given flow Input variables: A_t : nozzle throat area gamma_var : ratio of specific heats p_t : pressure at throat T_t : temperature at throat M : Mach number at throat R : Perfect gas constant """ m_dot = (A_t * p_t * (T_t**0.5)) * ((gamma_var / R)**0.5) * \ M * ((1 + (((gamma_var - 1) / 2) * \ (M**2)))**(-((gamma_var + 1) / (2 * (gamma_var - 1))))) return m_dot
d2cb14d099167c4dbca4ce51a67d248bd15a4a88
700,558
def calc_overturning_stf(ds,grid,doFlip=True): """ Only for simple domains, compute meridional overturning streamfunction Parameters ---------- ds : xarray Dataset from MITgcm output, via e.g. xmitgcm.open_mdsdataset must contain 'V' or 'VVELMASS' fields grid : xgcm grid object defined via xgcm.Grid(ds) doFlip : if true, compute by accumulating from bottom to top Output ------ ov_stf : xarray DataArray containing 2D field with overturning streamfunction in Sv above """ # Grab the right velocity field from dataset if 'V' in ds.keys(): vstr = 'V' elif 'VVELMASS' in ds.keys(): vstr = 'VVELMASS' else: raise TypeError('Could not find recognizable velocity field in input dataset') # Compute volumetric transport v_trsp = ds[vstr] * ds['dxG'] * ds['drF'] if vstr != 'VVELMASS': print(f' *** Multiplying {vstr} by hFacS***') v_trsp = v_trsp * ds['hFacS'] v_trsp = v_trsp.sum(dim=['XC']) # flip dim, accumulate in vertical, flip back if doFlip: v_trsp = v_trsp.isel(Z=slice(None,None,-1)) ov_stf = grid.cumsum(v_trsp,'Z',boundary='fill') if doFlip: ov_stf = -ov_stf.isel(Zl=slice(None,None,-1)) # Convert m/s to Sv ov_stf = ov_stf * 10**-6 return ov_stf
d7d25368268dc16c4603a88a3a11607772f04da4
700,559
from typing import List def ways_to_fill(amount: int, containers: List[int]) -> int: """The number of ways to pour amount of liquid into containers. All containers used must be filled up entirely.""" if amount == 0: # Reached an acceptable configuration. return 1 if amount < 0 or not containers: # No longer satisfiable. return 0 # Either the first container is used or it is skipped. return ways_to_fill(amount - containers[0], containers[1:]) + ways_to_fill( amount, containers[1:])
098a4b22d29f9527dfcf04d48e2f58784903101d
700,561
def value_at_diviner_channels(xarr): """Return value of xarr at each diviner channel.""" dwls = [ 3, 7.8, 8.25, 8.55, (13 + 23) / 2, (25 + 41) / 2, (50 + 100) / 2, (100 + 400) / 2, ] # [microns] return xarr.interp({"wavelength": dwls})
4897ed8250e2f02409205c02a26d63cfafb50a52
700,562
from typing import List def txt2list(path: str) -> List: """ Opens a text file and converts to list by splitting on new lines """ with open(path, "r") as f: txt = f.read() return list(filter(None, txt.split("\n")))
02feac063537e3434e053556f19cf8bf66b5df68
700,563
def obtain_value(entry): """Extract value from entry. The entries could be like: '81.6', ': ', '79.9 e', ': e'. """ entry = entry.split(' ', maxsplit=-1)[0] # Discard notes. if not entry or entry == ':': return None return float(entry)
cabb8c9314716fe988102a6226734dd7408be736
700,564
def time_str_fixer(timestr): """ timestr : str output rval : str if year is 2006, hysplit trajectory output writes year as single digit 6. This must be turned into 06 to be read properly. """ if isinstance(timestr, str): temp = timestr.split() year = str(int(temp[0])).zfill(2) month = str(int(temp[1])).zfill(2) temp[0] = year temp[1] = month rval = str.join(" ", temp) else: rval = timestr return rval
546ede251de8a8e30831851d645cbb0143fc01db
700,565
import torch def psnr_compute(img_batch, ref_batch, batched=False, factor=1.0, clip=False): """Standard PSNR.""" if clip: img_batch = torch.clamp(img_batch, 0, 1) if batched: mse = ((img_batch.detach() - ref_batch) ** 2).mean() if mse > 0 and torch.isfinite(mse): return 10 * torch.log10(factor ** 2 / mse) elif not torch.isfinite(mse): return [torch.tensor(float("nan"), device=img_batch.device)] * 2 else: return [torch.tensor(float("inf"), device=img_batch.device)] * 2 else: B = img_batch.shape[0] mse_per_example = ((img_batch.detach() - ref_batch) ** 2).view(B, -1).mean(dim=1) if any(mse_per_example == 0): return [torch.tensor(float("inf"), device=img_batch.device)] * 2 elif not all(torch.isfinite(mse_per_example)): return [torch.tensor(float("nan"), device=img_batch.device)] * 2 else: psnr_per_example = 10 * torch.log10(factor ** 2 / mse_per_example) return psnr_per_example.mean().item(), psnr_per_example.max().item()
e507e4ae1aa3034319421d8e8bf156f67646fec2
700,566
def extend_dict(d1, d2): """Extends d1 with d2, removing duplicates from d1""" d1_keys = set(d1.keys()) d2_keys = set(d2.keys()) intersect_keys = d1_keys.intersection(d2_keys) return dict(**{o: d1[o] for o in d1_keys - intersect_keys}, **d2)
fdbcb83e1f14098d51a6ca77d6b0085dd6ad915c
700,567
import re def RemoveTime(output): """Removes all time information from a Google Test program's output.""" return re.sub(r'\(\d+ ms', '(? ms', output)
1269b502fdf7d46165b0c0dca3dab50cd2e36550
700,568
def IsInclude(line): """Return True if line is an include of another file.""" return line.startswith("@include ")
42278a2d0ea3582111a9cbae26860e1f229398b3
700,569
import heapq def largest_n_element(m_dict, n): """Get the largest n words from the dict Args: m_dict (dict): the dict with word as key and the count of word as value n (int): the number of elements we have to return Returns: list_: the n largest pairs in the dict order by value """ # use priorityqueue to find the largest n words in map heap = [(x, m_dict[x]) for x in m_dict] return heapq.nlargest(n, heap, key = lambda x : x[1])
80723dd91aae6ffa837b1c3794a730331798de55
700,570
def err_ratio(predict, dataset, examples=None, verbose=0): """Return the proportion of the examples that are NOT correctly predicted. verbose - 0: No output; 1: Output wrong; 2 (or greater): Output correct""" examples = examples or dataset.examples if len(examples) == 0: return 0.0 right = 0 for example in examples: desired = example[dataset.target] output = predict(dataset.sanitize(example)) if output == desired: right += 1 if verbose >= 2: print(' OK: got {} for {}'.format(desired, example)) elif verbose: print('WRONG: got {}, expected {} for {}'.format( output, desired, example)) return 1 - (right/len(examples))
9159fdb65e47a817381fe08dfdee77f967a47ef5
700,571
def _get_label_from_dv(dv, i): """Return label along with the unit of the dependent variable Args: dv: DependentVariable object. i: integer counter. """ name, unit = dv.name, dv.unit name = name if name != "" else str(i) label = f"{name} / ({unit})" if unit != "" else name return label
fd18e3d4b5f61bec6febc27b1d7d86b378205a0a
700,572
def get_land_only(dtset, tolerance=0.5): """ Returns True where land. :param dtset: :return: """ if 'LANDFRAC' in dtset: mask = dtset['LANDFRAC']>tolerance if 'time' in mask.dims: return mask.isel(time=0).squeeze() else: return mask else: print('LANDFRAC not in dataset') """ print('DEPRICATED: DOESNT WORK ANYMORE!! -- find better solution') bm = Basemap() lat = dtset['lat'] lon = dtset['lon'] land = np.empty([len(lat), len(lon)]) # land=list(land) map = Basemap(llcrnrlon=-180, llcrnrlat=-90., urcrnrlon=180, urcrnrlat=90, projection='cyl') for i in np.arange(len(lat)): for j in np.arange(len(lon)): land[i, j] = map.is_land(lon[j], lat[i]) island = (land == 1) return island """
2d9ae86edbb43f8dcd0402e263b6d732e7e02c03
700,573
def get_seconds(value, scale): """Convert time scale dict to seconds Given a dictionary with keys for scale and value, convert value into seconds based on scale. """ scales = { 'seconds': lambda x: x, 'minutes': lambda x: x * 60, 'hours': lambda x: x * 60 * 60, 'days': lambda x: x * 60 * 60 * 24, 'weeks': lambda x: x * 60 * 60 * 24 * 7, 'months': lambda x: x * 60 * 60 * 24 * 30, 'years': lambda x: x * 60 * 60 * 24 * 365, } return scales[scale](value)
92245d0568ea44eb5a906021badcb888535dc5a0
700,574
def points_2_inches(points) -> float: """ Convert points to inches """ return points / 72
8744659807046c892f2d1f36c3dc47a44417ca96
700,576
import os import wave def estimate_recording(path): """ Args: Take the path to the folders Return: Number of voice recording and total voice recording made. """ waves = [] for path , _ , files in os.walk(path): for record in files: if record.endswith("wav"): waves.append(os.path.join(path,record)) total_duration = 0 for recording in waves: try: with wave.open(recording,"r") as file: frames = file.getnframes() rates = file.getframerate() total_duration += (1.0 * frames) / rates except: print("Faulty file in the directory: ") print("The faulty file: ", recording) return waves , total_duration / 3600.0
398a95a6958cb944dca69987dd1da7b022e5fb26
700,577
import importlib def my_import(class_name): """Return a python class given a class name. Usage example:: Report = my_import('myclass.models.Report') model_instance = Report() model_instance.name = 'Test' model_instance.save() :param str class_name: Class name :returns: Class object """ packages = class_name.split('.')[:-1] class_name = class_name.split('.')[-1] try: module = importlib.import_module('.'.join(packages)) klass = getattr(module, class_name) return klass except (ImportError, AttributeError) as e: msg = 'Could not import "{}" from {}: {}.'.format( class_name, e.__class__.__name__, e) raise ImportError(msg)
214bf9efed78a9e2dcc9897a28f389ef40918063
700,578
import sys import subprocess import os def spawn_command(*command): """ Run the given command, without waiting for completion. Return the Popen object (which provides .send_signal, .kill(), .wait(), etc), or None if the command couldn't be run at all. The command is created with its own process group; this mean that signals (eg. Ctrl+C) won't be forwarded to it. """ sys.stderr.write('Spawning command: "%s".\n' % ' '.join(command)) try: return subprocess.Popen(command, shell=False, preexec_fn=os.setpgrp) except OSError as e: sys.stderr.write(' -> Execution failed: %s\n' % e) return None
6e0757379eee2ccb79a2b104f45c40e6705719f2
700,579
def rename_motifs(motifs, stats=None): """Rename motifs to GimmeMotifs_1..GimmeMotifs_N. If stats object is passed, stats will be copied.""" final_motifs = [] for i, motif in enumerate(motifs): old = str(motif) motif.id = "GimmeMotifs_{}".format(i + 1) final_motifs.append(motif) if stats: stats[str(motif)] = stats[old].copy() if stats: return final_motifs, stats else: return final_motifs
bcc7ea61f7791b3c6f9d5b26cde1f8a7ac79bb9b
700,580
def _get_region(zone): """ Get region name from zone :param zone: str, zone :return: """ return zone if 'gov' in zone else zone[:-1]
aa272e6e6e00b444f29ffbe821b2d255196d55f6
700,581
def conv_bright_ha_to_lib(brightness) -> int: """Convert HA brightness scale 0-255 to library scale 0-16.""" if brightness == 255: # this will end up as 16 which is max brightness = 256 return int(brightness / 16)
45782f53a41605b20230c71c7e2ccf713d10c6dc
700,582
def findIntersection3(p1, p2, p3, p4): """ p1, p2 on the same line p3, p4 on the another line """ px = ((p1[0]*p2[1] - p1[1]*p2[0]) * (p3[0]-p4[0]) - (p1[0]-p2[0]) * (p3[0]*p4[1] - p3[1]*p4[0])) / ((p1[0]-p2[0]) * (p3[1]-p4[1]) - (p1[1]-p2[1]) * (p3[0]-p4[0])) py = ((p1[0]*p2[1] - p1[1]*p2[0]) * (p3[1]-p4[1]) - (p1[1]-p2[1]) * (p3[0]*p4[1] - p3[1]*p4[0])) / ((p1[0]-p2[0]) * (p3[1]-p4[1]) - (p1[1]-p2[1]) * (p3[0]-p4[0])) return (px, py)
6605f2756d1812c42b6793468366071f4ed4abb8
700,583
def retrieve_item(item_id): """ This is a stubbed method of retrieving a resource. It doesn't actually do anything. """ return { "id": item_id, "brand_name": "Clean Breathing", "name": "Air Purifier", "weight": 12.3, }
eedeca7580bd74244c62da798ac19d6b815ba445
700,584
from datetime import datetime def log_file_name(extension): """ Create a file name in the logfiles directory, based on current data and time Requires the computer to have an RTC or synched clock """ now = datetime.now() # Linux file_name = '%0.4d%0.2d%0.2d-%0.2d%0.2d%0.2d' % (now.year, now.month, now.day, now.hour, now.minute, now.second) return file_name + extension
795ab0623ee6a563cfc4e2531cd8745f0ff2fac3
700,585
def TreeImportanceArray(reg): """Get important array if `reg` is tree regression.""" return reg.feature_importances_
59f3b0bcc9a4c60af71b62163e3e087137c64f49
700,586
from typing import Any from typing import Dict def _check_headers(headers: Any) -> Dict: """Check headers format an validate content type. :param headers: configured headers :return Dict: request headers """ if headers is None: headers = {} if 'Content-Type' not in headers.keys(): headers['Content-Type'] = 'application/json' return headers
f7dc965ffd740ea94ca3485f455cf6427ebc16d8
700,587
def get_middle_value(my_list): """Return the middle value from a list after sorting. :param list my_list: List of sortable values""" return sorted(my_list)[len(my_list) // 2] """Convert an integer resolution in base-pairs to a nicely formatted string. :param int window_size: Integer resolution in base pairs. :returns: Formatted resolution.""" return utils.format_genomic_distance(window_size, precision=0)
53e8724f1709429707766db45c34ebd0db62a686
700,588
def rip_svgi(content: str, embed_styles: bool = False) -> str: """ Rips SVG content (single diagram) and scripts for interaction out of HTML :param content: HTML page content in text format :param embed_styles: If specified as True then mentioned styles would be downloaded and embedded into SVG :return: text with content for HTML page containing all the scripts and SVG node """ return content
d5472b86be70e138128ff527f388a9b1ffa2602d
700,590
import numpy def _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, uZ): """ Parameters ---------- r_tgt_coa : numpy.ndarray r_dot_tgt_coa : numpy.ndarray arp_coa : numpy.ndarray varp_coa : numpy.ndarray gref : numpy.ndarray uZ : numpy.ndarray Returns ------- numpy.ndarray """ # Solve for the intersection of a R/Rdot contour and a ground plane. arpZ = numpy.sum((arp_coa - gref)*uZ, axis=-1) arpZ[arpZ > r_tgt_coa] = numpy.nan # ARP ground plane nadir aGPN = arp_coa - numpy.outer(arpZ, uZ) # Compute ground plane distance (gd) from ARP nadir to circle of const range gd = numpy.sqrt(r_tgt_coa*r_tgt_coa - arpZ*arpZ) # Compute sine and cosine of grazing angle cosGraz = gd/r_tgt_coa sinGraz = arpZ/r_tgt_coa # Velocity components normal to ground plane and parallel to ground plane. vMag = numpy.linalg.norm(varp_coa, axis=-1) vZ = numpy.dot(varp_coa, uZ) vX = numpy.sqrt(vMag*vMag - vZ*vZ) # Note: For Vx = 0, no Solution # Orient X such that Vx > 0 and compute unit vectors uX and uY uX = (varp_coa - numpy.outer(vZ, uZ))/vX[:, numpy.newaxis] uY = numpy.cross(uZ, uX) # Compute cosine of azimuth angle to ground plane point cosAz = (-r_dot_tgt_coa+vZ*sinGraz) / (vX * cosGraz) cosAz[numpy.abs(cosAz) > 1] = numpy.nan # R/Rdot combination not possible in given plane # Compute sine of azimuth angle. Use LOOK to establish sign. look = numpy.sign(numpy.dot(numpy.cross(arp_coa-gref, varp_coa), uZ)) sinAz = look*numpy.sqrt(1-cosAz*cosAz) # Compute Ground Plane Point in ground plane and along the R/Rdot contour return aGPN + uX*(gd*cosAz)[:, numpy.newaxis] + uY*(gd*sinAz)[:, numpy.newaxis]
d325471ce2f21562dfd38e536ce77eb215069349
700,591
def encode_string_text(text): """Replace special symbols with corresponding entities or magicwords.""" text = text.replace("<", "&lt;") text = text.replace(">", "&gt;") text = text.replace("[", "&#91;") text = text.replace("]", "&#93;") text = text.replace("{", "&#123;") text = text.replace("}", "&#125;") text = text.replace("|", "{{!}}") return text
c7887934983efda3a0d1e06451ccd85cab529a30
700,592
def _params_to_ints(qs): """Convert a (string) list of string ids to a list of integers""" return [int(str_id) for str_id in qs.split(',')]
7568eeb6cf28f1c8e696e2fab6e7a89dcbbc07fc
700,593
from typing import List def est_croissante2(l : List[float]) -> bool: """ ... cf. ci-dessus ... """ # Résultat b : bool = True # Indice courant i : int = 0 while (i < len(l) - 1) and b: b = l[i] < l[i + 1] i = i + 1 return b
74357fe91d42b3cb0d95e09346504b0701ab7164
700,594
def compose(f, g): """ Compose two filter f and g. :param f: Outer filter function. :type f: filter function. :param g: Inner filter function. :type g: filter function. :return: lambda x: f(g(x)) :rtype: filter function. """ def filter_fn(df): df = g(df) if len(df) > 0: return f(df) else: return df return filter_fn
d90cda63eb365219ce5f454265036c7d977da216
700,595
def stop_list_to_link_list(stop_list): """ [a, b, c, d] -> [(a,b), (b,c), (c,d)] """ return list(zip(stop_list[:-1], stop_list[1:]))
49046e60664cd9c19ab55c1254684932d937531a
700,596
def compare_metrics(best_eval_result, current_eval_result): """Compares two evaluation results.""" return best_eval_result["exact_match"] < current_eval_result["exact_match"]
ce3880c2cc271d9c8c766a2dfbd95d31f5fb651c
700,597
from typing import Dict import requests from bs4 import BeautifulSoup import re def scrap_allrecipes_recipe(url: str) -> Dict: """This function scraps a recipe of Allrecipes, given its URL, and prepare a JSON file to index in Elasticsearch. :param url: the URL of the recipe :return: the recipe as a JSON-like dictionary :raise: ConnectionError, if the connection against Allrecipes crashes """ def filter_noisy_chars(text: str) -> str: """Filter in a text new line symbols and excessive spaces""" return text.replace('\n', '').replace(' ', '').strip() # Data schema title = '' description = '' ingredients = [] calories = 0 # Recipe dictionary recipe = dict() try: request = requests.get(url) if request.ok: html = request.text soup = BeautifulSoup(html, 'lxml') # Title title_section = soup.select('h1') # Description description_section = soup.select('.recipe-summary p') # Ingredients ingredients_section = soup.select('.ingredients-section') # Calories nutrition_section = soup.select('.recipe-nutrition-section') # Pass the data if title_section: title = filter_noisy_chars(title_section[0].text) if description_section: description = filter_noisy_chars(description_section[0].text) if ingredients_section: ingredient_list = ingredients_section[0].text.split('\n') ingredient_list = [filter_noisy_chars(i) for i in ingredient_list] # Remove nulls ingredient_list = [i for i in ingredient_list if i] for ingredient in ingredient_list: ingredients.append(ingredient) if nutrition_section: nutrition_info = filter_noisy_chars(nutrition_section[0].text) calories = re.findall(r'(\d+) calories', nutrition_info)[0] calories = int(calories) recipe = {'title': title, 'description': description, 'ingredients': ingredients, 'calories': calories} else: raise ConnectionError('Exception trying yo connect with Allrecipes') except Exception: raise Exception('Exception while parsing') finally: return recipe
0c060ba40e784b7dad6ab2af40dd6f0aa4600173
700,598
def segmentPlaneIntersection(s0 = "const Dim<3>::Vector&", s1 = "const Dim<3>::Vector&", point = "const Dim<3>::Vector&", normal = "const Dim<3>::Vector&", tol = ("const double", "1.0e-8")): """Intersection of a line segment with a plane. The line segment is characterized by it's endpoints: seg = (s0, s1) The plane is characterized by a point in the plane and a unit normal: plane (point, normal) Return values are a tuple<char, Vector> The Vector is the intersection point (if any) The char is a code characterizing the intersection: "p" -> The segment lies in the plane (plane) "d" -> The p points do not define a unique plane (degenerate) "1" -> The segment intersects the plane properly "0" -> The segment does not intersect the plane""" return "py::tuple"
eb68974937de575702069565fc3ee34dfadf89cc
700,599
def maybeName(obj): """ Returns an object's __name__ attribute or it's string representation. @param obj any object @return obj name or string representation """ try: return obj.__name__ except (AttributeError, ): return str(obj)
2b83918c49fc6cd19a027c1d2db07fcb0d57166a
700,601
from typing import List import os import subprocess import glob def ped_datasets() -> List[str]: """Returns paths after downloading pedestrian datasets.""" if not os.path.exists("datasets"): subprocess.call( ["wget", "https://www.dropbox.com/s/8n02xqv3l9q18r1/datasets.zip"] ) subprocess.call(["unzip", "-q", "datasets.zip"]) subprocess.call(["rm", "-rf", "datasets.zip"]) else: print("Directory 'datasets' exists, skipping download") return glob.glob(f"datasets/*/*")
b322580b0ef544d0705d792c4be4f7cb13f2824e
700,602
import math import random def generate_random_pose(): """ generate a random rod pose in the room with center at (0, 0), width 10 meters and depth 10 meters. The robot has 0.2 meters as radius Returns: random_pose """ def angleRange(x, y, room, L): """ Compute rod angle based on a given robot position """ min = 0 max = 0 dMinX = abs(x-room[0]) dMaxX = abs(x-room[1]) dMinY = abs(y-room[2]) dMaxY = abs(y-room[3]) if dMinX < L: if dMinY < L: min = -0.5*math.pi+math.acos(dMinY/L) max = math.pi-math.acos(dMinX/L) elif dMaxY < L: min = -math.pi+math.acos(dMinX/L) max = 0.5*math.pi-math.acos(dMaxY/L) else: min = -math.pi + math.acos(dMinX/L) max = math.pi-math.acos(dMinX/L) elif dMaxX < L: if dMinY < L: min = math.acos(dMaxX/L) max = 1.5*math.pi-math.acos(dMinY/L) elif dMaxY < L: min = 0.5*math.pi+math.acos(dMaxY/L) max = 2*math.pi-math.acos(dMaxX/L) else: min = math.acos(dMaxX/L) max = 2*math.pi-math.acos(dMaxX/L) else: if dMinY < L: min = -0.5*math.pi+math.acos(dMinY/L) max = 1.5*math.pi-math.acos(dMinY/L) elif dMaxY < L: min = 0.5*math.pi+math.acos(dMaxY/L) max = 2.5*math.pi-math.acos(dMaxY/L) else: min = -math.pi max = math.pi return min, max random_pose = [] mag = 4.5 len_rod = 2 room = [-mag, mag, -mag, mag] # create a room with boundary # randomize robot position rx = random.uniform(-mag, mag) ry = random.uniform(-mag, mag) # randomize rod pose min_angle, max_angle = angleRange(rx, ry, room, len_rod) angle = random.uniform(min_angle, max_angle) x = rx + 0.5*len_rod*math.cos(angle) y = ry + 0.5*len_rod*math.sin(angle) # randomize robots orientation th_0 = random.uniform(-math.pi, math.pi) th_1 = random.uniform(-math.pi, math.pi) random_pose = [x, y, angle, th_0, th_1] return random_pose
81c26bbabb6b4386b4086b20a09fe8691add2fed
700,603
def constant_schedule_with_warmup(epoch, warmup_epochs=0, lr_start=1e-4, lr_max=1e-3): """ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between {lr_start} and {lr_max}. """ if epoch < warmup_epochs: lr = (lr_max - lr_start) / warmup_epochs * epoch + lr_start else: lr = lr_max return lr
e780c3946f0207f94065c3ce8333896c3513f25a
700,604
def nSpecies(): """ Returns the total number of species in the model. """ return 62
b9e77841cb46bba4faa1fab3c1ea00875753aaf1
700,605
def rom(a, b,f, eps = 1e-8): """Approximate the definite integral of f from a to b by Romberg's method. eps is the desired accuracy.""" R = [[0.5 * (b - a) * (f(a) + f(b))]] # R[0][0] #print_row(R[0]) n = 1 while True: h = float(b-a)/2**n R.append((n+1)*[None]) # Add an empty row. R[n][0] = 0.5*R[n-1][0] + h*sum(f(a+(2*k-1)*h) for k in range(1, 2**(n-1)+1)) # for proper limits for m in range(1, n+1): R[n][m] = R[n][m-1] + (R[n][m-1] - R[n-1][m-1]) / (4**m - 1) #print_row(R[n]) if abs(R[n][n-1] - R[n][n]) < eps: return R[n][n] n += 1
3dfd00652837b999187a30ace3c3143fa381cb2a
700,606