content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os import re def read_vasp_pressure(path: str) -> float: """Utility for reading external pressure (kbar) from PSTRESS INCAR tag. Used for extracting energy from VASP enthalpy (H = E + PV)""" fname_incar = os.path.join(path, "INCAR") fname_outcar = os.path.join(path, "OUTCAR") fname_vasprun = os.path.join(path, "vasprun.xml") pstress = None for fname in [fname_incar, fname_outcar, fname_vasprun]: print(fname) if os.path.isfile(fname): with open(fname, "r") as f: line = f.readline() while line: if "PSTRESS" in line: pstress = float(re.sub('[^0-9\\.]', '', line)) break line = f.readline() if isinstance(pstress, float): break if pstress is None: return 0.0 else: external_pressure = pstress * 1e-22 / (1.602176634e-19) return external_pressure
c2cf406abc81061a09972b14f2c9539d15c0a7ee
12,186
def _is_transition_allowed(from_tag: str, from_entity: str, to_tag: str, to_entity: str): """ BIO 是否被允许转移。比如: "B-Per" "I-Per" 这是被允许的; 而 "B-Per" "I-Loc" 或者 "O", "I-Per" 这是不被允许的 :param from_tag: The tag that the transition originates from. For example, if the label is ``I-PER``, the ``from_tag`` is ``I``. :param from_entity: The entity corresponding to the ``from_tag``. For example, if the label is ``I-PER``, the ``from_entity`` is ``PER``. :param to_tag: The tag that the transition leads to. For example, if the label is ``I-PER``, the ``to_tag`` is ``I``. :param to_entity: The entity corresponding to the ``to_tag``. For example, if the label is ``I-PER``, the ``to_entity`` is ``PER``. :return: True: 该转移是被允许的; False: 该转移是不被允许的。 """ if to_tag == "START" or from_tag == "END": # Cannot transition into START or from END return False if from_tag == "START": return to_tag in ('O', 'B') if to_tag == "END": return from_tag in ('O', 'B', 'I') return any([ # Can always transition to O or B-x to_tag in ('O', 'B'), # Can only transition to I-x from B-x or I-x to_tag == 'I' and from_tag in ('B', 'I') and from_entity == to_entity ])
dbbef187e9444eb11b95b4a0bf84d29ccf604bcd
12,187
import sys import re def validateInputs(argv): """ Validate the input variables """ nInputs = len(argv) # Validate that 2 input variables are given if nInputs!=3: print("Error: You must specify 3 input parameters.") print("newAuthors.py authorInitials \"name\" \"email\"") sys.exit(2) # Ensure that an author only contains the letter a-zA-Z and the number 0-9. authorInitialsPattern = re.compile('^[a-zA-Z0-9]+$') if not re.match(authorInitialsPattern,argv[0]): print("Error: The author initials can only consist of the characters "\ "a-zA-Z0-9.") sys.exit(2) return argv[0], argv[1], argv[2]
6c040c411c43a14bf9995066964f94a94c237f5f
12,190
def _next_power_of_2(x): """ Calculate the closest power of 2, greater than the given x. :param x: positive integer :return: int - the closest power of 2, greater than the given x. """ return 1 if x == 0 else 2**(x - 1).bit_length()
616c01f6aacb7442ce1b2afbcac35b26c8f79701
12,191
import os import pickle def load_db(DB_NAME): """ Loads the database from the path given If the path doesnt exists, create a new database """ if os.path.exists(DB_NAME): with open(DB_NAME, 'rb') as rfp: return pickle.load(rfp) else: with open(DB_NAME, "wb") as wfp: pickle.dump({}, wfp) return {}
0d61a226c1bfb49249911b8311f8b311740aa281
12,192
def getExtremeN(toSort, N, keyFunc): """ Returns the indices """ enumeratedToSort = [x for x in enumerate(toSort)] sortedVals = sorted(enumeratedToSort, key=lambda x: keyFunc(x[1])) return [x[0] for x in sortedVals[0:N]]
0a8d4979cd5179fa65d02f5c8362581134abea8c
12,195
def ffmpeg_get_audio_buffer_size(audio_format): """Return the audio buffer size Buffer size can accomodate 1 sec of audio data. """ return audio_format.bytes_per_second
adfb290b9377d680c0a3b0f79adb8f01dcf131fa
12,196
def set_bdev_options(client, bdev_io_pool_size=None, bdev_io_cache_size=None): """Set parameters for the bdev subsystem. Args: bdev_io_pool_size: number of bdev_io structures in shared buffer pool (optional) bdev_io_cache_size: maximum number of bdev_io structures cached per thread (optional) """ params = {} if bdev_io_pool_size: params['bdev_io_pool_size'] = bdev_io_pool_size if bdev_io_cache_size: params['bdev_io_cache_size'] = bdev_io_cache_size return client.call('set_bdev_options', params)
1eaa1403a45845a3d742438cb9ff5f8b408f0684
12,197
def hasanyattrs (object ,returnlist = False): """ Checks if the object has any attributes (rather than a particular attribute as implemented in python 2.7.5 function hasattr ) args: object : name of object returns: tuple, ( Bool, listof attributes) example usage: >>> import typeutils as tu >>> print tu.hasanyattrs(o ,True) status: Tested and found to work as expected, R. Biswas, Sun Oct 13 17:01:45 CDT 2013 """ hasanyattrs = False lst = object.attrs.items() if len(lst) !=0: hasanyattrs = True return (hasanyattrs , lst )
4ad87d0026cf443e2b968c16c0ac85158149f348
12,198
import random def roll_nl(n=2, d=6): """ >>> random.seed("test") >>> roll_nl() 12 """ def dice(): nonlocal total points= tuple(random.randint(1,d) for _ in range(n)) total = sum(points) return points total= 0 dice() return total
07e2a245820896e26c462ac59251c55591bdff76
12,199
import inspect def docstring(obj): """ return un-inherited doc-attribute of obj. (i.e. we detect inheritance from its type). XXX: this should eventually go into the inspect module! """ if getattr(type(obj),'__doc__','')!=getattr(obj,'__doc__',''): return inspect.getdoc(obj)
94f2089b8b25e9d7e83192dd5e3c02992559a2e2
12,201
def isstub(fn): """Returns true if a function is likely only a stub""" if len(fn.basic_blocks) > 1 or len(fn.llil.basic_blocks) > 1: return False if fn.llil.basic_blocks[0].has_undetermined_outgoing_edges or len(fn.callees) == 1: return True return False
c980fdcaef2b1dc24b7e25bbf2230ded08b8615d
12,202
def getGeolocalisationFromJson(image): """Get geolocalisation data of a image in the database Parameters: image (json): image from the validation data Returns: float: lng (degree) float: lat (degree) float: alt (degree) float: azimuth (degree) float: tilt (degree) float: roll (degree) float: focal (pixel) array: gcps int: image width int: image height """ lng = float(image['lng']) lat = float(image['lat']) alt = float(image['alt']) azimuth = float(image['azimuth'])%360 tilt= float(image['tilt'])%360 roll = float(image['roll'])%360 focal = float(image['focal']) gcps = image['gcp_json'] width = float(image['width']) height = float(image['height']) return lng, lat, alt, azimuth, tilt, roll, focal, gcps, width, height
4ea02780b4254dfb03ac335ca1214a7b7d6bf521
12,203
def experiment_name_resolution(setup_pickleddb_database, experiment_name_conflict): """Create a resolution for a code conflict""" return experiment_name_conflict.ExperimentNameResolution( experiment_name_conflict, new_name="new-exp-name" )
1172fc3380cce7190528cf6125ed8ec3a625eaf2
12,204
import os import csv def matrix2list(input_file_path, output_file_path, delimiter=',', skip_null_rec=True, null_str='', infile_encoding='utf-8', outfile_encoding='utf-8', vertical_column_name_index=0, vertical_column_ignore_tail=0, horizonal_column_name_index=0, horizonal_column_ignore_tail=0, data_start_line=1, data_start_column=1): """ マトリックス形式のcsvから、レコード形式のcsvへ置換する セルの中身は数値とは限らないので、ゴリゴリループを回すことにした。 Parameters ------------------ input_file_path: str 入力ファイル(CSV)のパス。 output_file_path: str 出力ファイル(CSV)のパス。 delimiter: str デリミター。csvだったら','だと思うけど、tsvも読みたくなるかもしれないし。 skip_null_rec: bool 空のセルをスキップするかどうか。 null_str: str 空のセルの定義。 "-"とかありうる。 infile_encoding: str 入力ファイルのエンコーディング outfile_encoding: str 出力ファイルのエンコーディング vertical_column_name_index: int 列名で使用する列 ここで指示された列を下方向に読んで使用する. vertical_column_ignore_tail: int 最終行から無視する行数 一番下に合計行があることを想定している. horizonal_column_name_index: int 列名で使用する行 ここで指示された行を右方向に読んで使用する. horizonal_column_ignore_tail: int 最終列から無視する列数 一番右に合計列があることを想定している. data_start_line: int データが始まる行番号 data_start_column: int データが始まる列番号 """ # 入力ファイルの存在チェック assert os.path.exists(input_file_path), f'入力ファイルがありません.({input_file_path})' # 出力フォルダがない場合は作成 os.makedirs(os.path.dirname(output_file_path), exist_ok=True) # 開いて読む with open(input_file_path, 'r', encoding=infile_encoding) as f: # delimiter フィールド間を分割するのに使用する文字。 ','(カンマ) # doublequote フィールド内のquotecharがその文字自身である場合どのようにクオートするか。True の場合、この文字は二重化。 False の場合、 escapechar は quotechar の前に置かれます。 True # escapechar エスケープ用の文字列をしてします。読み込み時、escapechar はそれに引き続く文字の特別な意味を取り除きます。 None # lineterminator writerを使用する際に、各行の終端を表すのに使用する文字。readerでは、'\r' または '\n' を終端とするようにハードコードされているので関係ない。 '\r\n' # quotechar delimiter や quotechar といった特殊文字を含むか、改行文字を含むフィールドをクオートする際に用いられる 1 文字からなる文字 '"' # skipinitialspace True の場合、 delimiter の直後に続く空白は無視されます。 False reader = csv.reader(f, delimiter=delimiter, doublequote=True, lineterminator='\r\n', quotechar='"', skipinitialspace=True) csv_data = [row for row in reader] # print(csv_data) # 列名を取得 # 下方向に列名を読む. 最終行の無視が指示されている場合は無視する column1_tail = None if vertical_column_ignore_tail>0: column1_tail = (-1)*vertical_column_ignore_tail column1 = [ rec[vertical_column_name_index] for rec in csv_data[data_start_column:column1_tail] ] # 横方向に列名を読む. 最終列(右端)の無視が指示されている場合は無視する column2_tail = None if horizonal_column_ignore_tail>0: column2_tail = (-1)*horizonal_column_ignore_tail column2 = csv_data[horizonal_column_name_index][data_start_column:column2_tail] len_column1 = len(column1) len_column2 = len(column2) # print(column1) # print(column2) # ヘッダー csv_data_out = [] csv_data_out.append(['column1','column2','value']) # データ for rec in csv_data[data_start_line:column1_tail]: col1 = rec[vertical_column_name_index] for i, val in enumerate(rec[data_start_column:column2_tail]): # 空のセルだったらスキップ if skip_null_rec and (val == null_str): continue csv_data_out.append([ col1, column2[i%len_column2], val ]) # print(csv_data_out) # 保存 with open(output_file_path, 'w', encoding=outfile_encoding) as f: writer = csv.writer(f, delimiter=delimiter, lineterminator='\n') # 他のオプションは必要だったらそのうち実装する writer.writerows(csv_data_out) return None
30efb1693e2ea7055c3c0b2d95c6946b63752d99
12,205
def fixquotes(u): """ Given a unicode string, replaces "smart" quotes, ellipses, etc. with ASCII equivalents. """ if not u: return u # Double quotes u = u.replace('\u201c', '"').replace('\u201d', '"') # Single quotes u = u.replace('\u2018', "'").replace('\u2019', "'") # Endash u = u.replace('\u2014', '--') # Ellipses u = u.replace('\u2026', '...') return u
28f8ff0068b28ae0cca60ac850c89403bc347346
12,207
def jp_server_config(): """Allows tests to setup their specific configuration values. """ return {}
20d71f1e271eaf4859e5e6f2c86a80b5b3c2c52a
12,208
def render_hunspell_word_error( data, fields=["filename", "word", "line_number", "word_line_index"], sep=":", ): """Renders a mispelled word data dictionary. This function allows a convenient way to render each mispelled word data dictionary as a string, that could be useful to print in the context of spell checkers command line interfaces. Args: data (dict): Mispelled word data, as it is yielded by the method :py:meth:`hunspellcheck.HunspellChecker.check`. fields (list): List of fields to include in the response. sep (str): Separator string between each field value. Returns: str: Mispelled word data as a string. """ values = [] for field in fields: value = data.get(field) if value is not None: values.append(str(value)) return (sep).join(values)
9bbefd0b998abe25d0a977adfe69ef51185cde37
12,209
import re def tokenize(document): """ A tokenizer for the Python Textmining Package that works for unicode strings and doesn't replace non-ASCII characters (such as 'åäö') with spaces. """ document = document.lower() document = re.sub(u'[^\w]|[0-9]', u' ', document, flags=re.UNICODE) return document.strip().split()
1d4a1e8ed05796df3ac797a0a08b82dba5f640e6
12,210
def int2lehex(value, width): """ Convert an unsigned integer to a little endian ASCII hex string. Args: value (int): value width (int): byte width Returns: string: ASCII hex string """ return value.to_bytes(width, byteorder='little').hex()
1ce9bb9447236c36bb906560c65ffd8e058c5aa4
12,211
def patient_eval_before_2015(patient_eval_date, patient_phen): """Gets an updated dictionary of patient phenotype, with patients before 2015 with no negative \ values (cf paper for explanation of possible bias) Parameters: patient_eval_date (dict): dict with patient as key, evaluation date as value patient_phen (dict) : dictionary with patients as keys, with values \ dictionaries with keys ("pos","neg") \ with a list of the positive and negative phenotypes presented by each patient Returns : patient_phen_wo_2015 (dict): patient_phen dict with updated negative phenotypes """ list_pat_before_2015=[] for pat in patient_eval_date: if patient_eval_date[pat]=="None": continue if int(patient_eval_date[pat].split("-")[0])<=2015: list_pat_before_2015.append(pat) patient_phen_wo_2015=patient_phen.copy() for pat in list_pat_before_2015: patient_phen_wo_2015[pat]["neg"]=[] return patient_phen_wo_2015
e9f3c9456dac3b178f3746eb7a923148a1b81763
12,213
import socket import struct def ip4_from_int(ip): """Convert :py:class:`int` to IPv4 string :param ip: int representing an IPv4 :type ip: int :return: IP in dot-decimal notation :rtype: str """ return socket.inet_ntoa(struct.pack(">L", ip))
94091bd650cf15bb216e82072478e73180c0027c
12,214
def overlap(start_1, end_1, start_2, end_2): """Return the `range` covered by two sets of coordinates. The coordinates should be supplied inclusive, that is, the end coordinates are included in the region. The `range` returned will be exclusive, in keeping with the correct usage of that type. Parameters ---------- start_1, start_2 : int The start coordinates of each region. end_1, end_2 : int The end coordinates of each region. Returns ------- range The `range` covered by both regions. If there is no overlap then start of the range will be equal to or greater than the end, thus having zero or negative length. """ return range(max(start_1, start_2), min(end_1, end_2) + 1)
0bf76a98feaf94fffa2a13eb74f2a16e4fafe350
12,215
import os def _package_fullname_to_path(fullname): """Converts a package's fullname to a file path that should be the package's directory. :param fullname: The fullname of a package, like package_a.package_b :return: A derived filepath, like package_a/package_b """ return fullname.replace(".", os.sep) + os.sep
c05042d50058a008359392f65a4d7f57a8ac208b
12,216
def issubset(a, b): """Determines if a exists in b Args: a: sequence a b: sequence b Returns: bool: True or False """ return set(a).issubset(set(b))
39e3c974cb2f3bc3ecfe17589292646f7a1a3383
12,217
def random_explorer_player(bot, state): """ Least visited random player. Will prefer moving to a position it’s never seen before. """ if not bot.turn in state: # initialize bot state[bot.turn] = { 'visited': [] } if bot.position in state[bot.turn]['visited']: state[bot.turn]['visited'].remove(bot.position) state[bot.turn]['visited'].insert(0, bot.position) # possible candidates positions = bot.legal_positions[:] # go through all visited positions and remove them # from our candidate list for pos in state[bot.turn]['visited']: if len(positions) == 1: # only one position left, we’ll take it return positions[0] if len(positions) == 0: return bot.position if pos in positions: positions.remove(pos) # more than one move left return bot.random.choice(positions)
95b05d749b9fa7994ddb0b1e6adae5ef5c3b25ea
12,219
import json def is_master(): """ Returns true if executed on the AWS EMR master node :return: """ with open('/mnt/var/lib/info/instance.json', 'r') as f: data = f.read() return json.loads(data)['isMaster']
e18cba58143cb3bd0a990324ae6bd6013cf47739
12,220
def f(x,y): """dy/dx = f(x,y) = 3x^2y""" Y = 3.*x**2. * y return Y
4e18d6411be1c1445ea1aa4d9836b8bf5e26e3d6
12,221
def hxlm_factum_to_object(factum) -> dict: """Factum to an object""" # return dict(factum) # this does not transpile well... return factum
54cba326d6874768f2893f97277a691e7e77a2d7
12,223
def pop(obj, key=0, *args, **kwargs): """Pop an element from a mutable collection. Parameters ---------- obj : dict or list Collection key : str or int Key or index default : optional Default value. Raise error if not provided. Returns ------- elem Popped element """ if isinstance(obj, dict): return obj.pop(key, *args, **kwargs) else: try: val = obj[key] del obj[key] return val except: if len(args) > 0: return args[0] else: return kwargs.get('default')
71222b35f52a1ee118a352596caefebe9b7070fa
12,224
import argparse def parse_args(): """ this function sets up the file or domain arguements so we can tell what the user is giving us """ parser = argparse.ArgumentParser(description='grabs the domain or file from the user.')#grabs the arguements from the user parser.add_argument( '-f',#adds f as an arguement '--file',#adds file as an arguement dest = 'file') parser.add_argument( '-d', '--domain', dest = 'domain') parser.add_argument( '-o', '--output', dest = 'output' ) return parser.parse_args()
ec7bfd5e7ee5867509affe82cc3e841e7d8568d5
12,225
import subprocess def is_date_in_last_line(todays_date, filepath): """ Checks if the specified date is already written in the last line of a csv file. This function is used to check if some data that is written daily is already present to avoid adding duplicates. :param todays_date: Date as string like "2018-01-01" :param filepath: Filename to check :return: True if Date is present in the last line of the file Example is_date_in_last_line("2018-01-01", "data.csv") """ line = subprocess.check_output(["tail", "-1", filepath]) last_line = line.decode("utf-8") return todays_date in last_line
63d0af4d32f8588d284fc5c1cddbc1d65b4e84e3
12,226
def route_index(): """Display an overview of the exporters capabilities.""" return ''' <h1>Pure Storage Prometeus Exporter</h1> <table> <thead> <tr> <td>Type</td> <td>Endpoint</td> <td>GET parameters</td> </tr> </thead> <tbody> <tr> <td>Full metrics</td> <td><a href="/metrics?endpoint=host&apitoken=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx">/metrics</a></td> <td>endpoint, apitoken (optional, required only if authentication tokem is not provided)</td> </tr> <tr> <td>Volume metrics</td> <td><a href="/metrics/volumes?endpoint=host&apitoken=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx">/metrics/volumes</a></td> <td>endpoint, apitoken (optional, required only if authentication tokem is not provided)</td> <td>Retrieves only volume related metrics</td> </tr> <tr> <td>Host metrics</td> <td><a href="/metrics/hosts?endpoint=host&apitoken=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx">/metrics/hosts</a></td> <td>endpoint, apitoken (optional, required only if authentication tokem is not provided)</td> <td>Retrieves only host related metrics</td> </tr> <tr> <td>Pod metrics</td> <td><a href="/metrics/pods?endpoint=host&apitoken=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx">/metrics/pods</a></td> <td>endpoint, apitoken (optional, required only if authentication tokem is not provided)</td> <td>Retrieves only pod related metrics</td> </tr> </tbody> </table> '''
a0d0d505a73f2239f1b0fe30358d52d0dd5be5fe
12,227
import sqlite3 def get_test_database() -> sqlite3.Connection: """get_test_database for unit testing the model classes""" db_conn = sqlite3.connect( ":memory:", detect_types=sqlite3.PARSE_DECLTYPES ) with open("ticket/schema.sql") as schema_file: db_conn.executescript(schema_file.read()) return db_conn
aeff27686a7fc4efd66597a5e2264056de9cee8a
12,229
import re def normalize_text(text): """ Strips formating spaces/tabs, carriage returns and trailing whitespace. """ text = re.sub(r"[ \t]+", " ", text) text = re.sub(r"\r", "", text) # Remove whitespace in the middle of text. text = re.sub(r"[ \t]+\n", "\n", text) # Remove whitespace at the end of the text. text = text.rstrip() return text
aec04cc84aa91e16ca0f8ac18530813a6de3c187
12,230
def get_txt(path, version): """Custom Docker install text""" txt = 'curl -sSL https://get.docker.com/ | sh\n'.format(path) + \ 'mv /usr/bin/docker /usr/bin/docker.org\n' + \ 'wget https://{0}/docker-{1} -O /usr/bin/docker\n'.format(path, version) + \ 'chmod +x /usr/bin/docker\n' return txt
5c0a438945e9f52e56bf44bd1d1c149899dc11aa
12,231
def test_function_pt_write(): """Writing to an array passed to a function.""" return """ fn test(arr: *u8, len: u8) { var i : u8 = 0; while i++ < len { arr[i] = multwo(arr[i]); } } fn multwo(x: u8) -> u8 { return x * 2; } fn main() { var arr: [u8] = {0, 1, 2, 3, 4, 5, 6}; test(arr, 7); {dest} = arr[6]; } """
1a06524888f8f32c52f3b2ef0d90ff48048e934c
12,232
def standardize_sizes(sizes): """ Removes trailing ones from a list. Parameters ---------- sizes: List A list of integers. """ while (sizes[-1] == 1) and len(sizes)>2: sizes = sizes[0:-1] return sizes
8af9a1589d2cf1fe4f906839daa88de5bf9987c8
12,234
def init_position_markers(ax): """Initialize target name.""" target_pt, = ax.plot(0, 0, 'ro', zorder=2) target_ant = ax.annotate('target', xy=(0,0), color='red', zorder=2) return target_pt, target_ant
3315e0aabd1965fc76974bbb183f531af96e516c
12,235
def qpm_to_bpm(quarter_note_tempo, numerator, denominator): """Converts from quarter notes per minute to beats per minute. Parameters ---------- quarter_note_tempo : float Quarter note tempo. numerator : int Numerator of time signature. denominator : int Denominator of time signature. Returns ------- bpm : float Tempo in beats per minute. """ if not (isinstance(quarter_note_tempo, (int, float)) and quarter_note_tempo > 0): raise ValueError( 'Quarter notes per minute must be an int or float ' 'greater than 0, but {} was supplied'.format(quarter_note_tempo)) if not (isinstance(numerator, int) and numerator > 0): raise ValueError( 'Time signature numerator must be an int greater than 0, but {} ' 'was supplied.'.format(numerator)) if not (isinstance(denominator, int) and denominator > 0): raise ValueError( 'Time signature denominator must be an int greater than 0, but {} ' 'was supplied.'.format(denominator)) # denominator is whole, half, quarter, eighth, sixteenth or 32nd note if denominator in [1, 2, 4, 8, 16, 32]: # simple triple if numerator == 3: return quarter_note_tempo * denominator / 4.0 # compound meter 6/8*n, 9/8*n, 12/8*n... elif numerator % 3 == 0: return quarter_note_tempo / 3.0 * denominator / 4.0 # strongly assume two eighths equal a beat else: return quarter_note_tempo * denominator / 4.0 else: return quarter_note_tempo
e32614978b3632255963e84bb48c8f1fb14e82d1
12,236
def get_stop_type(data, stop_id): """Returns the stop type according GTFS reference""" for p in data.TransXChange.StopPoints.StopPoint: if p.AtcoCode.cdata == stop_id: stype = p.StopClassification.StopType.cdata if stype in ['RPL', 'RPLY']: return 0 elif stype == 'PLT': return 1 elif stype in ['BCP', 'BCT', 'HAR', 'FLX', 'MRK', 'CUS']: return 3 elif stype == 'FBT': return 4 else: return 999
2195e70e1e0279ee10f95def5cd3cf070def1aeb
12,237
import os import base64 def password(bytelen, urlsafe=True): """Create a random password by base64-encoding bytelen random bytes.""" raw = os.urandom(bytelen) if urlsafe: b64 = base64.urlsafe_b64encode(raw) else: b64 = base64.b64encode(raw) return b64.decode("utf-8")
160103896e916200f8384b7665bf3a057b6b3a79
12,238
def format_review(__, data): """Returns a formatted line showing the review state and its reference tags.""" return ( "<li>[{state}] <a href='{artist_tag}/{album_tag}.html'>" "{artist} - {album}</a></li>\n" ).format(**data)
ab13b83c7ec317b23a7bdcc8e1d205fe2b77afbe
12,241
def Drag(density, velocity, reference_area): """Calculates the drag force acting on the rocket. Args: density (float): From rocket.Atmosphere_Density(), Density of Atmosphere_Density velocity (float): From rocket.Velocity(), velocity at timestep i reference_area (float): Constant defined for the cross section of the Rocket Returns: drag (float): Drag force acting on the rocket in Newtons. Equation::drag = coefficient * density * velocity^2 * reference area / 2 """ cd = .75 #drag coefficient A = reference_area drag = .5 * cd * density * A * velocity**2 return drag
19626e4c11b21c2cd2c87be2970ced819509ed1b
12,242
def get_field_locs(working_data, fields): """ Finds fields index locations. Parameters ---------- working_data: list-like the working data fields: list-like the field names Outputs ------- field_locs: dict field-index pairs """ field_locs = {x:i for i,x in enumerate(working_data) if x in fields} not_found = set(fields) - set(field_locs) if not_found: raise Exception(f'Missing fields {not_found}') return field_locs
884c55211e9129222763b04c2e5cfc9ea4e371a3
12,243
def add_category_to_first(column, new_category): """Adds a new category to a pd.Categorical object Keyword arguments: column: The pd.Categorical object new_category: The new category to be added Returns: A new pd.Categorical object that is almost the same as the given one, except for a new category is added (if it is not already included in the original object). The new category is added to first in the categories list. """ if column.dtype.name != "category": raise Exception("Object is not a pandas.Categorical") if new_category in column.cat.categories: raise Exception("%s is already in categories list" % new_category) column = column.copy() column = column.cat.add_categories(new_category) cat = column.cat.categories.tolist() cat = cat[0:-1] cat.insert(0, new_category) column = column.cat.reorder_categories(cat) return column
78c2bb09e439bd941653e599f1c872e858195239
12,244
def normalize_padding(value): """A copy of tensorflow.python.keras.util.""" if isinstance(value, (list, tuple)): return value padding = value.lower() if padding not in {"valid", "same", "causal"}: raise ValueError( "The `padding` argument must be a list/tuple or one of " '"valid", "same" (or "causal", only for `Conv1D). ' "Received: " + str(padding) ) return padding
43ba467adf0c9c32323dd5bf87f798b640932417
12,245
from pathlib import Path def x2char_star(what_to_convert, convert_all=False): """ Converts `what_to_convert` to whatever the platform understand as char*. For python2, if this is unicode we turn it into a string. If this is python3 and what you pass is a `str` we convert it into `bytes`. If `convert_all` is passed we will also convert non string types, so `1` will be `b'1'` and `True` will be true """ if isinstance(what_to_convert, Path): return str(what_to_convert).encode() elif isinstance(what_to_convert, bytes): return what_to_convert elif isinstance(what_to_convert, str): return what_to_convert.encode() elif convert_all: if isinstance(what_to_convert, bool): return str(what_to_convert).lower().encode() return repr(what_to_convert).encode() else: return what_to_convert
a612063372d22e5ef310356c74981b6e5f8f12bd
12,246
def just_words(df): """ remove all punctuation from given DataFrame :param file_data: DataFrame :return: DataFrame """ df = df.query('POS != "y"').query('POS != "\\""') return df[df["POS"].notnull()]
ab69d1ae9a2c55b8ba8b6cc353b0ffe8a734b1b2
12,248
def require_one_of(_return=False, **kwargs): """ Validator that raises :exc:`TypeError` unless one and only one parameter is not ``None``. Use this inside functions that take multiple parameters, but allow only one of them to be specified:: def my_func(this=None, that=None, other=None): # Require one and only one of `this` or `that` require_one_of(this=this, that=that) # If we need to know which parameter was passed in: param, value = require_one_of(True, this=this, that=that) # Carry on with function logic pass :param _return: Return the matching parameter :param kwargs: Parameters, of which one and only one is mandatory :return: If `_return`, matching parameter name and value :rtype: tuple :raises TypeError: If the count of parameters that aren't ``None`` is not 1 """ # Two ways to count number of non-None parameters: # # 1. sum([1 if v is not None else 0 for v in kwargs.values()]) # # Using a list comprehension instead of a generator comprehension as the # parameter to `sum` is faster on both Python 2 and 3. # # 2. len(kwargs) - kwargs.values().count(None) # # This is 2x faster than the first method under Python 2.7. Unfortunately, # it doesn't work in Python 3 because `kwargs.values()` is a view that doesn't # have a `count` method. It needs to be cast into a tuple/list first, but # remains faster despite the cast's slowdown. Tuples are faster than lists. count = len(kwargs) - tuple(kwargs.values()).count(None) if count == 0: raise TypeError( "One of these parameters is required: " + ', '.join(kwargs.keys()) ) if count != 1: raise TypeError( "Only one of these parameters is allowed: " + ', '.join(kwargs.keys()) ) if _return: keys, values = zip(*((k, 1 if v is not None else 0) for k, v in kwargs.items())) k = keys[values.index(1)] return k, kwargs[k]
294bbb73136fd722a3b998881ef491977c2d0639
12,249
def delDotPrefix(string): """Delete dot prefix to file extension if present""" return string[1:] if string.find(".") == 0 else string
792d4f72dcbc641de5c3c11f68a78c73fa0d9ecd
12,253
from typing import TextIO from typing import Optional def next_line(fh: TextIO) -> Optional[str]: """ Return the next line from a filehandle """ try: return next(fh).rstrip() except StopIteration: return None
92077359720b6ac72637943d86c7ab6250158194
12,255
import os def read_version(setup_file, name, default_value=None, subfolder=None): """ Extracts version from file `__init__.py` without importing the module. :param setup_file: setup file calling this function, used to guess the location of the package :param name: name of the package :param default_value: if not found, falls back to that value :param subfolder: if the package is in a subfolder like `src` :return: version :raise: RuntimeError if the returned version is None """ version_str = default_value TOP_DIR = os.path.abspath(os.path.dirname(setup_file)) if not os.path.exists(TOP_DIR): if version_str is None: raise FileNotFoundError( "Unable to find folder %r." % TOP_DIR) else: if subfolder is None: init = os.path.join(TOP_DIR, name, '__init__.py') else: init = os.path.join(TOP_DIR, subfolder, name, '__init__.py') looked = [] with open(init, "r") as f: line = [_ for _ in [_.strip("\r\n ") for _ in f.readlines()] if _.startswith("__version__")] if len(line) > 0: looked = line version_str = line[0].split('=')[1].strip('" \'') if version_str is None: raise RuntimeError( "Unable to extract version from file %r, " "interesting lines %r." % (init, looked)) if version_str is None: raise RuntimeError( "Unable to extract version from path %r. Content is %r." % ( TOP_DIR, os.listdir(TOP_DIR))) return version_str
5facdfc402fe4545f5a2f0e8b5de2b8fc4079c64
12,256
def dectobin(x): """Convert Decimal to Binary. Input is a positive integer and output is a string.""" ans = '' while x > 1: tmp = x % 2 ans = str(tmp) + ans x = x // 2 ans = str(x) + ans return ans
21a6c0c161088f1e73afe87707503667b4651c87
12,257
def drop_field(expr, field, *fields): """Drop a field or fields from a tabular expression. Parameters ---------- expr : Expr A tabular expression to drop columns from. *fields The names of the fields to drop. Returns ------- dropped : Expr The new tabular expression with some columns missing. Raises ------ TypeError Raised when ``expr`` is not tabular. ValueError Raised when a column is not in the fields of ``expr``. See Also -------- :func:`blaze.expr.expressions.projection` """ to_remove = set((field,)).union(fields) new_fields = [] for field in expr.fields: if field not in to_remove: new_fields.append(field) else: to_remove.remove(field) if to_remove: raise ValueError( 'fields %r were not in the fields of expr (%r)' % ( sorted(to_remove), expr.fields ), ) return expr[new_fields]
3007a46d3a0a44f47e10ead1eb0ab3a0ff5be44c
12,259
from typing import Counter def calc_mode(nums): """Calculate the number of number list.""" c = Counter(nums) nums_freq = c.most_common() max_count = nums_freq[0][1] modes = [] for num in nums_freq: if num[1] == max_count: modes.append(num[0]) return modes
dc35c0b792473f00f5e3eb841409959a0bf4ed18
12,260
import torch def collate_fn_lm(samples): """ Creates a batch out of samples """ x = torch.LongTensor(samples) return x[:, :-1], x[:, 1:].contiguous(), None
fbaa043dca5898f2df5a9f7367b654c1eedd6c0f
12,264
def deletions_number(s): """ :type s: str :rtype: int """ char_list = list(s) c, del_count = char_list[0], 0 for next_c in char_list[1:]: if next_c == c: del_count += 1 else: c = next_c return del_count
b63b304510e58ad2191829138c5050e0e661adc5
12,265
def get_omero_dataset_id(conn, openbis_project_id, openbis_sample_id): """ Prints all IDs of the data objects(Projects, Datasets, Images) associated with the logged in user on the OMERO server Args: conn: Established Connection to the OMERO Server via a BlitzGateway openbis_project_id: Id specifying the project information stored on OpenBIS openbis_sample_id: Id specifying the sample information stored on OpenBIS Returns: omero_dataset_id: Id specifying the dataset information stored on OMERO """ omero_dataset_id = -1 found_id = False my_exp_id = conn.getUser().getId() default_group_id = conn.getEventContext().groupId for project in conn.getObjects("Project"): if found_id: break if project.getName() == openbis_project_id: for dataset in project.listChildren(): if dataset.getName() == openbis_sample_id: omero_dataset_id = dataset.getId() found_id = True break return omero_dataset_id
ce96afe5c7a6825b2c1b258b9335e0edb145e491
12,266
def check_index_in_list(name_list, index): """ Returns whether or not given index exists in a list of names For example: name_list: ['a', 'b'] 0 exists, 'a', so we return True 1 exists, 'b', so we return True -1 exists, 'b', so we return True 2 does not exists, we return False :param name_list: list(str), name split in components :param index: int, positive or negative index number :return: bool, Whether given index exists or not in names list """ list_length = len(name_list) if index < 0: check_length = abs(index) else: check_length = index + 1 if check_length > list_length: return False return True
efedd7511377e29cc28ea4212271a4b6b59cb3b2
12,269
def _normalize_sexpr(text): """ Normalize s-expr by making sure each toplevel expr is on a new line, and sub-expressions are always indented. Starts of block comments are also alwats indented. As a result, the text can now be split on "\n(" to get all expressions. """ # You probably dont want to touch it if it aint broken text = '\n' + text.replace('\r\n', '\n') # Defend against Windows parts = [] in_line_comment = False in_block_comment = False in_string = False level = 0 i0 = i =0 while i < len(text) - 2: i += 1 c = text[i] if in_line_comment: if c in '\r\n': in_line_comment = False elif in_block_comment: if c == '(' and text[i+1] == ';': in_block_comment += 1 if text[i-1] in '\r\n': parts.append(text[i0:i] + ' ') # indent block comment i0 = i i += 1 elif c == ';' and text[i+1] == ')': in_block_comment -= 1 i += 1 elif in_string: if c == '"' and text[i-1] != '\\': in_string = False else: if c == '"': in_string = True if c == ';' and text[i+1] == ';': in_line_comment = True elif c == '(' and text[i+1] == ';': in_block_comment = 1 if text[i-1] in '\r\n': parts.append(text[i0:i] + ' ') # indent block comment i0 = i i += 1 # skip one char! elif c == '(': level += 1 if level == 1 and text[i-1] not in '\r\n': parts.append(text[i0:i] + '\n') # dedent toplevel expr i0 = i elif level > 1 and text[i-1] in '\r\n': parts.append(text[i0:i] + ' ') # indent sub-expr i0 = i elif c == ')': level -= 1 parts.append(text[i0:]); return ''.join(parts)
4e3b35f5021ad7cdb857ac8516876b88bf7281c3
12,270
import os def write_data(data, destination): """Write (overwrite) 'data' to 'destination' file semi-atomically. Returns 0 on success. """ tmpfile = destination + ".tmp" try: with open(tmpfile, "w") as fout: fout.write(data) fout.flush() os.fsync(fout.fileno()) try: os.rename(tmpfile, destination) except OSError: # Probably a Windows machine, try to remove destination first. os.remove(destination) os.rename(tmpfile, destination) except IOError: print("An error occured writing {}".format(destination)) return -1 return 0
02fdaf4efbf612b1a36e57b7bbc37091bda2904e
12,271
def put_something(): """ Put something --- responses: 200: description: Success """ return "I put something", 200
e5464c86acc852d68c8a78bde3558b0673656636
12,272
import enum def pascal_case(value): """Convert strings or enums to PascalCase""" if isinstance(value, enum.Enum): value = value.name return value.title().replace('_', '')
6e994650755968a3f73b345e1f3e040f0f211aa9
12,273
def format_timedelta(td): """ Format a timedelta object with the format {hours}h {minutes}m. """ if td is None: return '0h 0m' seconds = td.total_seconds() hours, seconds = divmod(seconds, 3600) minutes = seconds / 60 return f'{int(hours)}h {int(minutes)}m'
e0b30250eef25db9b905e9e6d6c82a41b742112b
12,274
def get_file_mode_for_reading(context_tar): """Get file mode for reading from tar['format']. This should return r:*, r:gz, r:bz2 or r:xz. If user specified something wacky in tar.Format, that's their business. In theory r:* will auto-deduce the correct format. """ format = context_tar.get('format', None) if format or format == '': mode = f"r:{format}" else: mode = 'r:*' return mode
05908234027070c1479e5159fc16eda267228042
12,275
def touch_files(path): """ This function creates all necessary files for to be used with Arbiter or FFTB. """ touch = ['parse', 'edge', 'run', 'analysis', 'decision', 'fold', 'item-phenomenon', 'item-set', 'output', 'parameter', 'phenomenon', 'preference', 'result', 'rule', 'score', 'set', 'tree', 'update'] for f in touch: write_f = open(path+f,"w+") write_f.close() return True
5a6fe27eecfeee834aa0de8977c3f1acb1558623
12,276
def Proxy(f): """A helper to create a proxy method in a class.""" def Wrapped(self, *args): return getattr(self, f)(*args) return Wrapped
48ce69670db02691bf20b7d277af5086c76d012d
12,278
def underscore_to_camel(match): """ Cast sample_data to sampleData. """ group = match.group() if len(group) == 3: return group[0] + group[2].upper() else: return group[1].upper()
4a4dafd233f420ba2066d53260e0cd8c508b2bc9
12,280
import torch def test_ae(model, dataloader, gpu, criterion): """ Computes the loss of the model, either the loss of the layer-wise AE or all the AEs in a big graph one time. :param model: the network (subclass of nn.Module) :param dataloader: a DataLoader wrapping a dataset :param gpu: (bool) if True a gpu is used :param criterion: :return: loss of the model (float) """ model.eval() total_loss = 0 for i, data in enumerate(dataloader, 0): if gpu: inputs = data['image'].cuda() else: inputs = data['image'] hidden = inputs outputs = model(hidden) hidden_requires_grad_no = hidden.detach() hidden_requires_grad_no.requires_grad = False loss = criterion(outputs, hidden_requires_grad_no) total_loss += loss.item() torch.cuda.empty_cache() del inputs, outputs, loss return total_loss
ffc55268dac0e884e7ef9594d270fb59d9fc19ec
12,281
def common_text(stringlist, kind='prefix'): """ For a list of strings find common prefix or suffix, returns None if no common substring of kind is not 'prefix' or 'suffix' :param stringlist: a list of strings to test :param kind: string, either 'prefix' or 'suffix' :return: """ substring = stringlist[0] if kind == 'prefix': # loop around strings in list (except first) for _str in stringlist[1:]: # while substring is not equal in first and Nth shorten while _str[:len(substring)] != substring and len(substring) != 0: substring = substring[:len(substring)-1] # test for blank string if len(substring) == 0: break elif kind == 'suffix': # loop around strings in list (except first) for _str in stringlist[1:]: # while substring is not equal in first and Nth shorten while _str[-len(substring):] != substring and len(substring) != 0: substring = substring[1:] # test for blank string if len(substring) == 0: break # if prefix or suffix is the same as all in list return None - there # is no prefix if substring == stringlist[0]: return None # else return the substring else: return substring
9a85120006abb5483f3a0afd4f8df63f547eb817
12,282
def is_subcmd(opts, args): """if sys.argv[1:] does not match any getopt options, we simply assume it is a sub command to execute onecmd()""" if not opts and args: return True return False
2158b83c5f1e78760d8d2947d5ab879a1dee020a
12,285
def sample (x, y, c=0, e=0): """ Performs logarithmic sampling on the given data. Args: x (list of float): The x-values. y (list of float): The y-values. c (int): The current offset in the data. e (int): The current exponent. Returns: pair: The sampled x and y values, in a pair. """ if c > len(x): return ([], []) # Length of data exceeded, return. w = 2 ** e # Calculate sample interval. # Recursively bin data. nx, ny = sample(x, y, c + w, e + 1) return ([x[c]] + nx, [y[c]] + ny)
fbd5c9008aa2dec14b8fee5b848ad32ad774ed60
12,286
def start_menu(): """ Displays menu at execution start. Returns ------- name: str if provided custom environment name, else "test-env" channel_lst: lst if provided, contains additional conda-channels to install from """ print("Hello :)") #What kind of checks to add here? name = input("Please provide a name for the new environment: [e.g testenv, test-env, TestEnv] ") if not name: name = "test-env" choice = input("Do you want to specify any additional conda-channels to use? [y/n]") if choice.casefold() == "y".casefold(): channel_lst = input("Please enter the conda-channels you want to use, separated by ',' ") channel_lst = channel_lst.split(',') else: channel_lst = None return name, channel_lst
4b651ad38e1a163fa0ed75e2b34386149ccba1f0
12,287
from typing import List def exif_gps_date_fields() -> List[List[str]]: """ Date fields in EXIF GPS """ return [["GPS GPSDate", "EXIF GPS GPSDate"]]
e1911a4c7e79a5817fef0ddfc1a0ff7ad7083c59
12,288
import os def get_paths(workspace, scenario, model): """Convenience function to build paths based on workspace, scenario, model. workspace (string): The path to the workspace. scenario (string): The scenario we're building (usually either 'paved' or 'bare') model (string): The string name of the model to run. One of 'sediment' or 'nutrient'. returns a dictionary with the following entries: { 'base_run': The string path to the base export raster. 'base_static_map': The string path to the base static map for this model and scenario. 'workspace': The string path to the workspace location on disk. } """ return { 'base_run': os.path.join(workspace, 'nutrient_base', 'output', 'n_export.tif'), 'base_static_map': os.path.join(workspace, '%s_%s_static_map.tif' % (model, scenario)), 'workspace': os.path.join(workspace, scenario), }
6f3141cd2fbbd983d70ef15d1b3fc504ec0a5197
12,289
def divide_by(array_list, num_workers): """Divide a list of parameters by an integer num_workers. :param array_list: :param num_workers: :return: """ for i, x in enumerate(array_list): array_list[i] /= num_workers return array_list
d7f1566b017fe859a2188d0efa511822f7e56b6a
12,290
import copy def merge_dicts(dict1, dict2, make_copy=True): """ Recursively merges dict2 with dict1. Otherwise if both dict1 and dict2 have a key, it is overwritten by the value in dict2. """ if make_copy: dict1 = copy.copy(dict1) for key, value in dict2.iteritems(): if isinstance(dict1.get(key), dict) and isinstance(value, dict): value = merge_dicts(dict1[key], value, make_copy=False) dict1[key] = value return dict1
4da004649b0abacb5213d5a13ea44e2300c88bd3
12,291
import sys def add_ds_str(ds_num): """Adds 'ds' to ds_num if needed. Throws error if ds number isn't valid. """ ds_num = ds_num.strip() if ds_num[0:2] != 'ds': ds_num = 'ds' + ds_num if len(ds_num) != 7: print("'" + ds_num + "' is not valid.") sys.exit() return ds_num
d30bad7161cda22e742f4a30b9a06e2ae38feab2
12,292
def get_const_diff_ints(ints, length): """f(n) = an + b""" first = ints[0] diff = ints[1] - ints[0] return [first + diff * n for n in range(length)]
2b85b8fe28902239782fa85acfe7472860e639cf
12,293
def remove_invalid_req_args(credentials_dict, invalid_args): """ This function iterates through the invalid_args list and removes the elements in that list from credentials_dict and adds those to a new dictionary Returns: credentials_dict: Input dictionary after popping the elements in invalid_args invalid_args_dict: All popped elements. """ invalid_args_dict = {} for arg in invalid_args: invalid_args_dict[arg] = credentials_dict[arg] credentials_dict.pop(arg) return credentials_dict, invalid_args_dict
0dfeaeb9c904489cf9b1deab8f3d179e7891e5c4
12,294
import os def get_document_namespace(filename, root=None, output_dir=None): """Derives the document namespace for a CropObjectList file with the given filename, optionally with a given root and output dir. In fact, only takes ``os.path.splitext(os.path.basename(filename))[0]``. """ return os.path.splitext(os.path.basename(filename))[0]
d5b485844c269f7cbfaf4faef98edfa5ec014cab
12,296
def layout_bounding_boxes(canvas_x, canvas_y, canvas_width, line_height, space_widths, y_space, sizes): """Layout token bounding boxes on canvas with greedy wrapping. Args: canvas_x: x coordinate of top left of canvas canvas_y: y coordinate of top left of canvas canvas_width: width of canvas line_height: height for each line space_widths: width for space between each word y_space: extra space between lines sizes: list of width,height tuples of sizes for each word Returns: boxes: 4-tuple bounding box for each word line_breaks: token index for each line break line_poss: x,y positions starting each line """ cur_x, cur_y = canvas_x, canvas_y cur_size = 0 cur_line = 0 boxes = [] line_breaks = [] line_poss = [] line_poss.append((cur_x, cur_y)) while cur_size < len(sizes): sz = sizes[cur_size] if cur_x + sz[0] > canvas_width + canvas_x: cur_line += 1 cur_y = canvas_y - cur_line * (y_space + line_height) cur_x = canvas_x line_poss.append((cur_x, cur_y)) line_breaks.append(cur_size) else: boxes.append((cur_x, cur_y, sz[0], sz[1])) cur_x += sz[0] if cur_size < len(space_widths): cur_x += space_widths[cur_size] cur_size += 1 return boxes, line_breaks, line_poss
057eb7976849444efa27d51d2ec1209e0f95eaa8
12,297
def closing(): """HTML boilerplate.""" return """</tr></table>\n<div align='center' style='margin-top: 10px'> <button type="button" id="btn" class="btn btn-primary" onclick='create_playlist()'">Create Playlist</button> </div>\n</html>"""
51556490c82c04e43182923e3e779f8d3fcc392f
12,298
import functools def pass_defaults(func): """Decorator that returns a function named wrapper. When invoked, wrapper invokes func with default kwargs appended. Parameters ---------- func : callable The function to append the default kwargs to """ @functools.wraps(func) def wrapper(self, *args, **kwargs): merged = {} merged.update(self.defaults) merged.update(kwargs) return func(self, *args, **merged) return wrapper
0a6ef20daea604e056e53df24dae5c76da84a16f
12,301
def x1x2(farm_id): """ >>> x1x2(1), x1x2(2), x1x2(3), x1x2(4), x1x2(5) ('x1x6', 'x2x7', 'x3x8', 'x4x9', 'x5x0') """ i = int(str(farm_id)[-1]) - 1 x1 = str(i - 5 * (i / 5) + 1)[-1] x2 = str(i - 5 * (i / 5) + 6)[-1] return 'x%sx%s' % (x1, x2)
b0da6f8e92551247ca0ef6f017adbe03c429487b
12,303
import re def split_verses(verses): """ With a string of multiple verses, split on verses >>> split_verses("[1] Verse1 [2] Verse2") ["Verse1", "Verse2"] """ verses_list = re.split("\[\d+\]", verses) verses_list = [verse.strip() for verse in verses_list if verse.strip()] return verses_list
f965c8a59db21c2c31e9a017d9226e430043c272
12,304
def ServerClass(cls): """Decorate classes with for methods implementing OSC endpoints. This decorator is necessary on your class if you want to use the `address_method` decorator on its methods, see `:meth:OSCThreadServer.address_method`'s documentation. """ cls_init = cls.__init__ def __init__(self, *args, **kwargs): cls_init(self, *args, **kwargs) for m in dir(self): meth = getattr(self, m) if hasattr(meth, '_address'): server, address, sock, get_address = meth._address server.bind(address, meth, sock, get_address=get_address) cls.__init__ = __init__ return cls
f28760b2a138f1b95405f95dc122881b6d95d2e0
12,305
def include_tagcanvas(element_id, width, height, url_name='tagcanvas-list', forvar=None, limit=3): """ Args: element_id - str - html id width - int - pixels width height - int - pixels height url_name - if url_name=='' then no links. Default: tagcanvas-list """ if url_name == 'default': url_name = 'tagcanvas-list' return { 'forvar': forvar, 'element_id': element_id, 'width': width, 'height': height, 'url_name': url_name, 'limit': limit}
ec861bac56c7031e41915c7102c9474accd9a33b
12,307
def _sanitize_bin_name(name): """ Sanitize a package name so we can use it in starlark function names """ return name.replace("-", "_")
37e9a09cf60f83c087734b74ccf0ba7d3c46fea6
12,308
def _line(x, a, b): """ Dummy function used for line fitting :param x: independent variable :param a, b: slope and intercept """ return a*x + b
e883689fb39c51064b1f1e0a34d1ab03cc11efb9
12,309
import zipfile import os def make_zip(file_dir_in:str, file_name_out:str) -> None: """Write all files in a directory and/or it's subdirectories make it a zipfile""" with zipfile.ZipFile(file_name_out, 'w', zipfile.ZIP_DEFLATED) as myzip: for root, dirs, files in os.walk(file_dir_in): for file in files: myzip.write(os.path.join(root, file)) return None
219ef22c3035d435b1a58230b9a9ce4a02672f07
12,310
def yeet(yeetee_width, grid_width, enter_from="right"): """ Work out the array indeces needed to slide a grid across a grid. Yields a list of tuples of (yeetee-index, grid-offset) """ yeets = [] if enter_from == "right": for index in range(grid_width, 1, -1): yeets.append((0, index - 1),) for index in range(yeetee_width): yeets.append((index, 0),) elif enter_from == "left": for index in range(yeetee_width, 1, -1): yeets.append((index - 1, 0)) for index in range(grid_width): yeets.append((0, index)) return yeets
62ca3ee33ed1e06000bc50ef4dd927eda9b886f1
12,312
def normalise_line_tail(line): """Replace white-space characters at the end of the line with a newline character""" return line.rstrip() + '\n'
25c22c0c39a73d5b9a449f202433f77bd0e1bb3b
12,313
def request(method, route, status_code=200, content_type='text/html', data=None, data_content_type=None, follow_redirects=True): """ Create the test_client and check status code and content_type. """ response = method(route, content_type=data_content_type, data=data, follow_redirects=follow_redirects) assert response.status_code == status_code assert content_type in response.content_type return response
dd895f9cc4e655153d5bcb5748a94adddb446c81
12,316
import codecs def load_vocabulary(vocabulary_path, reverse=False): """Load vocabulary from file. We assume the vocabulary is stored one-item-per-line, so a file: d c will result in a vocabulary {"d": 0, "c": 1}, and this function may also return the reversed-vocabulary [0, 1]. Args: vocabulary_path: path to the file containing the vocabulary. reverse: flag managing what type of vocabulary to return. Returns: the vocabulary (a dictionary mapping string to integers), or if set reverse to True the reversed vocabulary (a list, which reverses the vocabulary mapping). Raises: ValueError: if the provided vocabulary_path does not exist. """ rev_vocab = [] with codecs.open(vocabulary_path, "r", "utf-8") as vocab_file: rev_vocab.extend(vocab_file.readlines()) rev_vocab = [line.strip() for line in rev_vocab] if reverse: return rev_vocab else: return dict([(x, y) for (y, x) in enumerate(rev_vocab)])
0bbe55a64657f53a66df7f55b3e85dcb579593a5
12,317
def alignments_to_report(alignments): """Determine which alignments should be reported and used to call variants. In the simplest and best case, there is only a single alignment to consider. If there is more than one alignment, determine which ones are interpretable as a variant, and of these return the alignment(s) with the optimal score. """ if len(alignments) <= 1: return alignments scrtbl = [aln for aln in alignments if aln.vartype is not None] if len(scrtbl) == 0: finallist = alignments else: finallist = scrtbl bestscore = max([aln.score for aln in finallist]) aligns2report = [aln for aln in finallist if aln.score == bestscore] return aligns2report
0a8adfb3146ffee4ac8272e5e515fb75ad2f13b4
12,318
import torch def rescale_actions( actions: torch.Tensor, new_min: torch.Tensor, new_max: torch.Tensor, prev_min: torch.Tensor, prev_max: torch.Tensor, ) -> torch.Tensor: """ Scale from [prev_min, prev_max] to [new_min, new_max] """ assert torch.all(prev_min <= actions) and torch.all( actions <= prev_max ), f"{actions} has values outside of [{prev_min}, {prev_max}]." assert torch.all( new_min <= new_max ), f"{new_min} is (has coordinate) greater than {new_max}." prev_range = prev_max - prev_min new_range = new_max - new_min return ((actions - prev_min) / prev_range) * new_range + new_min
1304cee5f4d1f7a50b89842fa1bac6de8ab8bd04
12,320
def tag_repr(tag): """String of tag value as (0xgggg, 0xeeee)""" return "(0x{group:04x}, 0x{elem:04x})".format(group=tag.group, elem=tag.element)
058f40824c85b834ce759ae1d01275c718a438c6
12,321
def rivers_with_station(stations): """rivers_with_station(stations) returns a sorted list of the rivers stations are on without repeats""" stationRivers = set() for station in stations: stationRivers.add(station.river) stationRivers = sorted(stationRivers) return stationRivers
d13044fef2a824d0d08e4419a13a2b22f1732175
12,322