content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import time def sec_to_datetime(timestamp): """ Convert secondes since epoch to a date and time """ return time.strftime("%d/%m/%Y %H:%M:%S %Z", time.localtime(timestamp))
e1e72e2e9e34259fcf2e2d3fcd6a6598dc77e3ae
699,511
def contains_failure(lines): """Check if any line starts with 'FAILED'.""" for line in lines: if line.startswith('FAILED'): return True return False
c3a74373f351a946e7d8f9d9497c8ee9af32282e
699,512
import random def shuffle_list(lista, seed=42): """ Shuffle two list with the same seed. :param lista: List of elements :type lista: List :param listb: List of elements :type listb: List :param seed: Seed number :type seed: int :return: lista and listb shuffled :rtype: (List, List) """ random.seed(seed) random.shuffle(lista) return lista
743d2ae479422020dcaef8111b2ad12dc635dbcd
699,513
def looper(f, delimiter=''): """Returns a function that applies function f to every element of an iterable.""" return lambda iterable: delimiter.join([str(f(i)) for i in iterable])
fb7e52c0b1e8aac8919f6ed9041d7e2eaaca8816
699,514
def form_concat_RE(re1, re2): """Helper for del_one_gnfa_state --- Given two non-eps REs, form their concatenation. """ if re1=="": return re2 elif re2=="": return re1 else: return ('.', (re1, re2))
621906db027421ffb03569671163fd2f6c2cac8c
699,515
def Jaccard3d(a, b): """ This will compute the Jaccard Similarity coefficient for two 3-dimensional volumes Volumes are expected to be of the same size. We are expecting binary masks - 0's are treated as background and anything else is counted as data Arguments: a {Numpy array} -- 3D array with first volume b {Numpy array} -- 3D array with second volume Returns: float """ if len(a.shape) != 3 or len(b.shape) != 3: raise Exception(f"Expecting 3 dimensional inputs, got {a.shape} and {b.shape}") if a.shape != b.shape: raise Exception(f"Expecting inputs of the same shape, got {a.shape} and {b.shape}") ba = (a > 0).astype(int) bb = (b > 0).astype(int) n = (ba + bb == 2).sum() magna = ba.sum() magnb = bb.sum() return (n / (magna + magnb - n))
0c5e7be04f596736ab0184b2799708350691c240
699,516
def human_size(size): """ 递归实现,精确为最大单位值 + 小数点后三位 :param size: 字节大小 :return: """ def strofsize(file_size, file_remainder, unit_level): if file_size >= 1024: file_remainder = file_size % 1024 file_size //= 1024 unit_level += 1 return strofsize(file_size, file_remainder, unit_level) else: return file_size, round(file_remainder / 1024, 2), unit_level units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] integer, remainder, level = strofsize(size, 0, 0) if level + 1 > len(units): level = -1 return '{} {}'.format(integer + remainder, units[level])
c502a42fc111f6c985e6c5ccdca76b99a02707be
699,517
def strip(value): """ REMOVE WHITESPACE (INCLUDING CONTROL CHARACTERS) """ if not value or (ord(value[0]) > 32 and ord(value[-1]) > 32): return value s = 0 e = len(value) while s < e: if ord(value[s]) > 32: break s += 1 else: return "" for i in reversed(range(s, e)): if ord(value[i]) > 32: return value[s:i + 1] return ""
380f2cdf5580b1dff02ff8e377942d443917842f
699,518
def is_behind_right_down(tile_ref, tile_test): """ Les fonction is_behind_xxx permettent de savoir si une tile est "derrière" une autre tile, par rapport à la direction de conquête de Player (rightward, downward). Renvoie True si tile_test est derrière tile_ref. """ return tile_test.x <= tile_ref.x and tile_test.y <= tile_ref.y
18677d7d646ada9963bc0a15c88df59c38895605
699,519
def imputation(matrix, nan_filling): """ takes in matrix dataframe and value for filling in NaNs for imputation Parameters: matrix dataframe, NaN filling value Returns: imputed matrix dataframe """ matrix.fillna(nan_filling, inplace=True) return matrix
a6981e54f6228e9ebfcbb6ea65e5305f50a327ca
699,521
def index_types(): """Returns a list of the supported index types""" return ["UNIQUE", "CLUSTERED", "NONCLUSTERED"]
e6f8989b23c1800c8aec8aff12555c6ba0fb8c12
699,522
import pkg_resources def is_version_greater_equal(version1, version2): """Returns True if version1 is greater or equal than version2 else False""" return (pkg_resources.parse_version(version1) >= pkg_resources.parse_version(version2))
0618f18c22121d954ae156af7bb6eff80236ae29
699,524
def get_config(config, group): """Return the tuple of song_data, log_data and output_data paths. Arguments: config -- ConfigParser object used to extract data values group -- Top-level grouping of config values (AWS or LOCAL)""" group = group.upper() return config[group]['SONG_DATA'], config[group]['LOG_DATA'], config[group]['OUTPUT_DATA']
62af332b29e0a901c14143bd8b77834229ce0013
699,525
def singleton(cls): """ decorate a class and make it to generate singleton entity @singleton class xxx: """ instances = {} def _singleton(*args, **kw): """ a closure function for decorator to execute each time """ if cls not in instances: instances[cls] = cls(*args, **kw) return instances[cls] return _singleton
7a599472b7d1ba4a6f1b2a13fb600c98144c6ef5
699,527
import re def sanitize_jinja(arg): """ For some string, prevent usage of Jinja-like flags """ if isinstance(arg, str): # If the argument looks like it contains Jinja expressions # {{ x }} ... if re.search(r'\{\{[^}]+}}', arg) is not None: raise ValueError('Inline Jinja variables are not allowed.') # If the argument looks like it contains Jinja statements/control flow... # {% if x.foo() %} ... if re.search(r'\{%[^%]+%}', arg) is not None: raise ValueError('Inline Jinja variables are not allowed.') return arg
7c28e987b18e4395d2d80dead85f1ee699515309
699,528
def ancestor(synset1, synset2): """ Returns the common ancestor of both synsets. For example synsets("cat")[0].ancestor(synsets("dog")[0]) => Synset("carnivore") """ h1, h2 = synset1.hypernyms(recursive=True), synset2.hypernyms(recursive=True) for s in h1: if s in h2: return s
4f37b5895c7455e560847f00358b3538b43aff03
699,529
import asyncio def run_coro(coro): """Runs a co-routing in the default event loop and retuns it result.""" return asyncio.get_event_loop().run_until_complete(coro)
62f109ef4440ecb2807640ff91b27f07e91d8f9f
699,530
from typing import Dict from typing import Any import os import json import yaml def _read_data(path: str) -> Dict[str, Any]: """ Read data from JSON and YAML files. """ with open(path, "r") as f: ext = os.path.splitext(path)[-1] if ext == ".json": return json.load(f) elif ext in [".yaml", ".yml"]: return yaml.load(f, Loader=yaml.SafeLoader) else: raise ValueError("Invalid file type: `{}`".format(ext))
d3ff635c028cef9bc1be05a5c0c9be7e6d4f30c2
699,531
import numpy def charge_exchange(ods, nc=10): """ Adds some FAKE CER channel locations so that the overlay plot will work in tests. It's fine to test with dummy data as long as you know it's not real. This function can overwrite existing data if you're not careful. The original is modified, so deepcopy first if you want different ODSs. :param ods: ODS instance :param nc: Number of channels to add. :return: ODS instance with FAKE CER HARDWARE INFORMATION added. """ r = numpy.linspace(1.4, 2.2, nc) z = numpy.linspace(0.05, -0.07, nc) for i in range(nc): ch = ods['charge_exchange.channel'][i] ch['identifier'] = 'FAKE_CER_{:02d}'.format(i) # F for fake ch['name'] = 'Fake CER channel for testing {}'.format(i) for x in ['r', 'z', 'phi']: ch['position'][x]['time'] = numpy.array([0]) ch['position.phi.data'] = numpy.array([6.5]) ch['position.r.data'] = numpy.array([r[i]]) ch['position.z.data'] = numpy.array([z[i]]) return ods
1e0f169ffa12263715fffdd6476da37300340c66
699,532
import subprocess def run_pedr2tab(pedrlist): """ Use subprocess to call the external program, "pedr2tab." Relies on the external program to decide if arguments are valid or not. Pipe STDERR to STDOUT. Parameters ---------- pedrlist : str path to file containing list of PEDR binaries """ cmd_args = ["pedr2tab", pedrlist] result = subprocess.run(cmd_args,check=True,stderr=subprocess.STDOUT,encoding='utf-8') return result
05e16ebd12468a1e0c1bf30a6e928ece16c09022
699,533
def buildaff(ulx: float, uly: float, pixelres: float) -> tuple: """ Build a gdal GeoTransform tuple Args: ulx: projected geo-spatial upper-left x reference coord uly: projected geo-spatial upper-left y reference coord pixelres: pixel resolution Returns: affine tuple """ return ulx, pixelres, 0, uly, 0, -pixelres
43e593050a6b3ebef876f4347d3df787e2a49795
699,534
def parse_search_result_data(api_response): """ Parse the search result data from the given raw json by following the response style of tokopedia Arg: - api_response (list): list of products Returns: - product_list (list): list of compiled product properties """ product_list = [] for resp in api_response: product_results = resp.get('data').get('ace_search_product_v4').get('data').get('products') # Standard tokopedia response style for product in product_results: product_dict = dict( product_name=product.get('name'), product_image_link=product.get('imageUrl'), product_price=product.get('price'), product_rating=product.get('rating'), product_average_rating=product.get('ratingAverage'), product_merchant=product.get('shop').get('name'), product_target_url=product.get('url') # Going to be used to crawl further into the product page itself, to extract description ) product_list.append(product_dict) return product_list
ee1314589e1599f2d4a58523077abc8a7f270f88
699,535
def check_annulus(args): """ Checks and formats the annulus values. :param args: The command-line arguments. :type args: argparse.ArgumentParser Namespace :returns: argparse.ArgumentParser Namespace -- The updated command-line arguments. """ if not (args.annulus1 and args.annulus2) and not args.annulus: args.annulus = None elif args.annulus1 and args.annulus2: args.annulus = [args.annulus1, args.annulus2] else: args.annulus = list(args.annulus) if not (args.annulus1 and args.annulus2): args.annulus1 = args.annulus[0] args.annulus2 = args.annulus[1] return args
cd99b7200d226d9e36c644351d8e696a25a1a81d
699,536
def computeSignalValueInFrame(startbit, ln, fmt, value): """ compute the signal value in the frame """ frame = 0 if fmt == 1: # Intel # using "sawtooth bit counting policy" here pos = ((7 - (startbit % 8)) + 8*(int(startbit/8))) while ln > 0: # How many bits can we stuff in current byte? # (Should be 8 for anything but the first loop) availbitsInByte = 1 + (pos % 8) # extract relevant bits from value valueInByte = value & ((1<<availbitsInByte)-1) # stuff relevant bits into frame at the "corresponding inverted bit" posInFrame = ((7 - (pos % 8)) + 8*(int(pos/8))) frame |= valueInByte << posInFrame # move to the next byte pos += 0xf # discard used bytes value = value >> availbitsInByte # reduce length by how many bits we consumed ln -= availbitsInByte else: # Motorola # Work this out in "sequential bit counting policy" # Compute the LSB position in "sequential" lsbpos = ((7 - (startbit % 8)) + 8*(int(startbit/8))) # deduce the MSB position msbpos = 1 + lsbpos - ln # "reverse" the value cvalue = int(format(value, 'b')[::-1],2) # shift the value to the proper position in the frame frame = cvalue << msbpos # Return frame, to be accumulated by caller return frame
f35a63ea4811b248430ee457c64fda98ea17134d
699,537
import re def parse_show_spanning_tree_mst_config(raw_result): """ Parse the 'show spanning-tree mst-config' command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show spanning-tree \ mst-config command in a dictionary of the form: :: { 'mst_config_id': '70:72:cf:d9:2c:f6', 'mst_config_revision': '8' 'no_instances': '2', 'instance_vlan': {'1': ['1','2'], '2': ['3','4']} } """ mst_conf_re = ( r'\s*MST\s*config\s*ID\s*:\s*(?P<mst_config_id>[^ ]+)\s*\n' r'\s*MST\s*config\s*revision\s*:' r'\s*(?P<mst_config_revision>[0-9]+)\s*\n' r'\s*MST\s*config\s*digest\s*:\s*(?P<mst_digest>[^ ]+)\s*\n' r'\s*Number\s*of\s*instances\s*:\s*(?P<no_instances>[0-9]+)\s*\n' ) instance_re = ( r'(?P<instance>^[0-9]+)\s*(?P<vlan>.+)\s*' ) error = [ r'No\s*record\s*found\.', r'\s*Spanning-tree\s*is\s*disabled' ] instance = {} result = {} for error_str in error: re_result = re.search(error_str, raw_result) if (re_result): result['error'] = str(raw_result) return result re_result = re.search(mst_conf_re, raw_result) assert re_result result = re_result.groupdict() for line in raw_result.splitlines(): re_result = re.search(instance_re, line) if re_result: partial = re_result.groupdict() instance[partial['instance']] = partial['vlan'].split(',') result['instance_vlan'] = instance return result
2f07e13ff7f79e4c15e0fe5111cbc18fb638dae8
699,538
def do_logout(user): """ logout of Google """ return None
2d8b719c2f22de87bc2bdc6f0f6a27eb57ff4982
699,539
def read_sql_file(filename, encoding='utf-8') -> str: """Read SQL file, remove comments, and return a list of sql statements as a string""" with open(filename, encoding=encoding) as f: file = f.read() statements = file.split('\n') return '\n'.join(filter(lambda line: not line.startswith('--'), statements))
bd29c1080b2fd95d690b4696bf63c92932616964
699,540
def scenegraphLocationString(dataPacket): """ Given a datapacket, return a string representing its location in the scenegraph. (::UUID:OUTPUT) """ return "::"+str(dataPacket.sourceNode.uuid)+":"+dataPacket.sourceOutputName
027a69984acac4454e27a149ad822864751a0983
699,541
def get_data_to_update_from_broker(file_type, database_columns, broker_table, fiscal_year, fy_start, fy_end, unique_identifier): """ Generates SQL script to pull data from broker and compare it with website table Creates Temporary table of the rows that differ between the two databases """ is_active = 'is_active = TRUE and' if file_type == 'fabs' else '' columns = " ,".join(database_columns) columns_type = " ,".join(["{} text".format(column) for column in database_columns]) sql_statement = """ CREATE TEMPORARY TABlE {file_type}_transactions_to_update_{fiscal_year} AS SELECT * from dblink('broker_server',' SELECT {unique_identifier}, {columns} FROM {broker_table} WHERE {is_active} action_date:: date >= ''{fy_start}'':: date AND action_date:: date <= ''{fy_end}'':: date; ') AS ( {unique_identifier} text, {columns_type} ) EXCEPT SELECT {unique_identifier}, {columns} FROM transaction_{file_type} WHERE action_date:: date >= '{fy_start}':: date AND action_date:: date <= '{fy_end}':: date; -- Adding index to table to improve speed CREATE INDEX {file_type}_unique_idx ON {file_type}_transactions_to_update_{fiscal_year}({unique_identifier}); """.format(file_type=file_type, fiscal_year=fiscal_year, unique_identifier=unique_identifier, columns=columns, broker_table=broker_table, is_active=is_active, fy_start=fy_start, fy_end=fy_end, columns_type=columns_type) return sql_statement
675ccbce6e58e62568479c905a36b046e74e2399
699,542
import time def get_epoch(year, month='01'): """ calculate the epoch of first day of a year-month """ pattern = '%Y.%m.%d %H:%M:%S' return int(time.mktime(time.strptime(str(year) + '.' + str(month) + '.01 00:00:00', pattern)))
4126b720dedb0c91ec18f5198740896035da07e7
699,543
def get_directive_type(directive): """Given a dict containing a directive, return the directive type Directives have the form {'<directive type>' : <dict of stuff>} Example: {'#requirement': {...}} --> '#requirement' """ keys = list(directive.keys()) if len(keys) != 1: raise ValueError('Expected directive dict to contain a single key: {}'.format(directive)) return keys[0]
584c1f22fc120815e79c781fc7ace69940c0f651
699,544
import json def get_notebook_cells(path): """获取ipynb文档的cells""" with open(path, "r", encoding="utf-8") as f: data = json.load(f) return data["cells"]
a7f4c9b45b1c58bdcf049c0834199f1a36402fa0
699,546
def u8(x: int) -> bytes: """ Encode an 8-bit unsigned integer. """ assert 0 <= x < 256 return x.to_bytes(1, byteorder='little')
c23bb4b6ea6bb610da5c3ecd281129e0bcc40bb7
699,547
def ultimate_answer(question): """Provides an answer to the ultimate question. Returns '42' if the question is 'What is the meaning of Life, The Universe, Everything?' otherwise returns 'That is not much of a question' args: question (str): The question to be answered. returns: str """ if question == "What is the meaning of Life, The Universe, Everything?": answer = "42" else: answer = "That is not much of a question" return answer
5fc1c51c3901d3005adc78468668317f86e54d5c
699,548
def last(s): """Return a word string sorted alpha-ly by last char in each word. input = string, words output = string, sorted words alphabetically by last char ex: "man i need a taxi up to ubud") ==>["a", "need", "ubud", "i", "taxi", "man", "to", "up"] """ alpha = list(map(chr, range(97, 123))) output = [] sp_s = s.split() for ch in alpha: for word in sp_s: if ch == word[-1]: output.append(word) return output
ed1fc01420d1b89e6b52a54e7413e54fbc8ed691
699,549
def evaluate_poly(poly, x): """ Computes the polynomial function for a given value x. Returns that value. Example: >>> poly = (0.0, 0.0, 5.0, 9.3, 7.0) # f(x) = 7x^4 + 9.3x^3 + 5x^2 >>> x = -13 >>> print evaluate_poly(poly, x) # f(-13) = 7(-13)^4 + 9.3(-13)^3 + 5(-13)^2 180339.9 poly: tuple of numbers, length > 0 x: number returns: float """ eval_of_poly = 0.0 for idx, val in enumerate(poly): eval_of_poly += (val * (x ** idx)) return eval_of_poly
2705dbdaa937c564454078dc2cbc8dd4a9bd56e8
699,551
import random import os def combine(df_T, df_B, indices_T, indices_B, outfile, outdir="./data"): """Combines dataframes and saves as a file.""" list_TB = ( df_T.iloc[indices_T, 1].reset_index(drop=True) + df_B.iloc[indices_B, 1].reset_index(drop=True) ).to_list() list_BT = ( df_B.iloc[random.sample(indices_B, len(indices_B)), 1].reset_index(drop=True) + df_T.iloc[indices_T, 1].reset_index(drop=True) ).to_list() with open(os.path.join(outdir, outfile), "w") as f: f.write("\n".join(list_TB + list_BT)) print(len(list_TB + list_BT)) return list_TB + list_BT
2bc7631e0fc51891011cb55b0c85560aae00504c
699,552
def load_expected_whitelist(filename, logger): """ **Purpose** Load the expected whitelist and output a set """ expected_whitelist = [] oh = open(filename, 'rt') for line in oh: expected_whitelist.append(line.strip()) oh.close() expected_whitelist = set(expected_whitelist) logger.info('Found {0:,} expected barcodes'.format(len(expected_whitelist))) return expected_whitelist
097981e4d7e2c44931cdeebd935e6ba2b8f78e38
699,553
def split(string_to_split, separator): """Returns the list of strings divided by the separator.""" return string_to_split.split(separator)
c96d4ed6e3128617925c7d52c19d607cbfe1ece1
699,554
def _find_biggest_value(list): """ Get the intex of the largest value in list of values :return: -1 if list empty, otherwise index of biggest value """ if len(list) < 1: return -1 max = 0 for idx, val in enumerate(list): if val > list[max]: max = idx return max
e4c4267ac2bfe73ad3bdb11433935805f4859e95
699,555
import numpy def haversine(coord1, coord2, to_radians=True, earth_radius=6371): """ slightly modified version: of http://stackoverflow.com/a/29546836/2901002 Calculate the great circle distance between two points on the earth (specified in decimal degrees or in radians) All (lat, lon) coordinates must have numeric dtypes and be of equal length. https://stackoverflow.com/questions/43577086/pandas-calculate-haversine-distance-within-each-group-of-rows """ lat1, lon1 = coord1 lat2, lon2 = coord2 if to_radians: lat1, lon1, lat2, lon2 = numpy.radians([lat1, lon1, lat2, lon2]) a = numpy.sin((lat2-lat1)/2.0)**2 + \ numpy.cos(lat1) * numpy.cos(lat2) * numpy.sin((lon2-lon1)/2.0)**2 return earth_radius * 2 * numpy.arcsin(numpy.sqrt(a))
46d073421bed55e0ccd11f995dbcbfdaf5124720
699,556
def inline(s, fmt): """Wrap string with inline escape""" return "`\u200B{}\u200B`".format(fmt.format(s))
3f07a4f55e60c763a10956c9ac52314b6958eca8
699,557
from typing import Optional import cgi def format_download_filename(content_disposition: Optional[str], download_url: str) -> str: """Uses RFC-6266 'Content-Disposition' header to get file name""" value, params = cgi.parse_header(content_disposition or '') download_filename = params.get('filename') if download_filename: return download_filename return download_url.split('/')[-1]
0abd0d17db1b92afff1fc5163aadd6450dac7e1c
699,559
def _compute_cultural_score_building(building_geometry, historical_elements_gdf, historical_elements_gdf_sindex, score = None): """ Compute pragmatic for a single building. It supports the function "pragmatic_meaning" Parameters ---------- buildings_geometry: Polygon building_land_use: string historical_elements_gdf_sindex: Rtree spatial index score: string Returns ------- float """ possible_matches_index = list(historical_elements_gdf_sindex.intersection(building_geometry.bounds)) # looking for possible candidates in the external GDF possible_matches = historical_elements_gdf.iloc[possible_matches_index] pm = possible_matches[possible_matches.intersects(building_geometry)] if (score is None): cs = len(pm) # score only based on number of intersecting elements elif len(pm) == 0: cs = 0 else: cs = pm[score].sum() # otherwise sum the scores of the intersecting elements return cs
743be39a3c100f53787df8195298c43affac8b4d
699,561
def grayscale_surface(surf) : """ converts a pygame surface to gray scale values """ width , height = surf.get_size() for x in range(width): for y in range(height): red , green , blue, alpha = surf.get_at((x,y)) L = 0.3 * red + 0.59 * green + 0.11 * blue gs_color = (L, L, L, alpha) surf.set_at((x,y), gs_color) return surf
139e625ed448de84111e933dd3155fcef930ac19
699,562
import os def _IsPlatformSupported(): """Checks that this platform and build system are supported. Args: opts: The options parsed from the command line. Returns: True if the platform and build system are supported. """ # Haven't tested the script out on any other platforms yet. supported = ['posix', 'nt'] return os.name in supported
9b4c5696d4c49298d688ddd0b893e24332b2bd0a
699,563
import socket def find_port(): """ Return an unbound port """ s=socket.socket() s.bind(("127.0.0.1",0)) port=s.getsockname()[1] s.close() return port
a3a535e1bed9b079654a0ac8b3c329d8e575537c
699,564
def index(): """The index page which provide information about other API end points""" return """ <div> <h1> Inception REST API </h1> <h3> The following API end points are valid </h3> <ul> <h4> Inception V4 </h4> <li> <code>/inception/v4/ping </code> - <br/> <b> Description : </b> checks availability of the service. returns "pong" with status 200 when it is available </li> <li> <code>/inception/v4/classify/image</code> - <br/> <table> <tr><th align="left"> Description </th><td> This is a classifier service that can classify images</td></tr> <tr><td></td> <td>Query Params : <br/> <code>topn </code>: type = int : top classes to get; default : 5 <br/> <code>min_confidence </code>: type = float : minimum confidence that a label should have to exist in topn; default : 0.015 <br/> <code>human </code>: type = boolean : human readable class names; default : true <br/> </td></tr> <tr><th align="left"> How to supply Image Content </th></tr> <tr><th align="left"> With HTTP GET : </th> <td> Include a query parameter <code>url </code> which is an http url of JPEG image <br/> Example: <code> curl "localhost:8764/inception/v4/classify/image?url=http://xyz.com/example.jpg"</code> </td></tr> <tr><th align="left"> With HTTP POST :</th><td> POST JPEG image content as binary data in request body. <br/> Example: <code> curl -X POST "localhost:8764/inception/v4/classify/image?topn=5&min_confidence=0.015&human=false" --data-binary @example.jpg </code> </td></tr> </table> </li> <li> <code>/inception/v4/classify/video</code> - <br/> <table> <tr><th align="left"> Description </th><td> This is a classifier service that can classify videos</td></tr> <tr><td></td> <td>Query Params : <br/> <code>topn </code>: type = int : top classes to get; default : 5 <br/> <code>min_confidence </code>: type = float : minimum confidence that a label should have to exist in topn; default : 0.015 <br/> <code>human </code>: type = boolean : human readable class names; default : true <br/> <code>mode </code>: options = <code>{"center", "interval", "fixed"}</code> : Modes of frame extraction; default : center <br/> &emsp; <code>"center"</code> - Just one frame in center. <br/> &emsp; <code>"interval"</code> - Extracts frames after fixed interval. <br/> &emsp; <code>"fixed"</code> - Extract fixed number of frames.<br/> <code>frame-interval </code>: type = int : Interval for frame extraction to be used with INTERVAL mode. If frame_interval=10 then every 10th frame will be extracted; default : 10 <br/> <code>num-frame </code>: type = int : Number of frames to be extracted from video while using FIXED model. If num_frame=10 then 10 frames equally distant from each other will be extracted; default : 10 <br/> </td></tr> <tr><th align="left"> How to supply Video Content </th></tr> <tr><th align="left"> With HTTP GET : </th> <td> Include a query parameter <code>url </code> which is path on file system <br/> Example: <code> curl "localhost:8764/inception/v4/classify/video?url=filesystem/path/to/video"</code><br/> </td></tr><br/> <tr><th align="left"> With HTTP POST :</th><td> POST video content as binary data in request body. If video can be decoded by OpenCV it should be fine. It's tested on mp4 and avi on mac <br/> Include a query parameter <code>ext </code>this extension is needed to tell OpenCV which decoder to use, default is ".mp4" </br> Example: <code> curl -X POST "localhost:8764/inception/v4/classify/video?topn=5&min_confidence=0.015&human=false" --data-binary @example.mp4 </code> </td></tr> </table> </li> <ul> </div> """
7037c89f27342724960097db6518f30003c27f17
699,565
import requests def get_json_from_url(url): """Retrieve JSON from a URL, raising on failure.""" response = requests.get(url) response.raise_for_status() return response.json()
c88b36ff6d4911aaa6d4115b5c054fbab2f3051f
699,568
def is_param(arg): """Returns whether the passed argument is a list or dictionary of params.""" return isinstance(arg, tuple) and arg[0] == "params"
bedb4745f8fc7095e235be588ad53c28db9fb4d8
699,569
def evaluate_numeric_condition(target, reveal_response): """ Tests whether the reveal_response contains a numeric condition. If so, it will evaluate the numeric condition and return the results of that comparison. :param target: the questions value being tested against :param reveal_response: the numeric condition that will be evaluated against :return: boolean result of numeric condition evaluation or None if there is no numeric condition to evaluate. """ if target == '': # cannot evaluate if answer is blank return None if reveal_response.startswith('>='): return float(target) >= float(reveal_response[2:]) elif reveal_response.startswith('<='): return float(target) <= float(reveal_response[2:]) elif reveal_response.startswith('=='): return float(target) == float(reveal_response[2:]) elif reveal_response.startswith('<'): return float(target) < float(reveal_response[1:]) elif reveal_response.startswith('>'): return float(target) > float(reveal_response[1:]) return None
1eeb170635e5fb0ac5fd379af885a7a0222fb722
699,570
def render_parse_tag(): """Render a load more data button (only render the button).""" return {}
71f2f8ffe7fc961b99acdc49ee4ab5d9d7cd1f91
699,571
def strip_string(string): """ :param string: :return: """ return string.strip()
d57cae7b255d08505f05a7b97016e25f7a570995
699,572
import hashlib def checksum(filepath, hcnstrctr=hashlib.md5, enc="utf-8"): """Compute the checksum of given file, `filepath` :raises: OSError, IOError """ return hcnstrctr(open(filepath).read().encode(enc)).hexdigest()
a0a6c576ef9f22de106834088f69cbbc2b3c4bd0
699,573
import importlib def import_module(path): """ 基于指定的模块导入 点分割模块的导入路径。 """ module_path, module = str(path).rsplit('.', maxsplit=1) return getattr(importlib.import_module(module_path), module)
c79bbad91058b07f8b7ffd791e3db4d9988f6f46
699,574
def increment(x): """increments input by one""" return x+1
416451d93765ee148de9b69bd3e1af0e846d6fb5
699,575
def dict_to_rule(rule, clf_feature_dict): """ Function to accept rule dict and convert to Rule object Parameters: rule: list of dict of schema [ { 'feature': int, 'operator': str, 'value': float }, ] """ output = '' for condition in rule: output += '{} {} {} and '.format( clf_feature_dict[int(condition['feature'])], condition['operator'], condition['pivot'] ) return output[:-5]
0f3cc79a86e62a6154d6d83ff4fb695cd181f0b8
699,576
def set_publish_time_series_args(args): """Set args to publish time-series """ public_time_series = {} if args.public_time_series: public_time_series = {"private": False} if args.model_price: public_time_series.update(price=args.model_price) if args.cpp: public_time_series.update(credits_per_prediction=args.cpp) return public_time_series
90e3d362ba236752a18dd117b3bd9d23d2cefba1
699,577
from unittest.mock import patch async def init_integration(hass, mock_config_entry): """Create a fake SMA Config Entry.""" mock_config_entry.add_to_hass(hass) with patch("pysma.SMA.read"): await hass.config_entries.async_setup(mock_config_entry.entry_id) await hass.async_block_till_done() return mock_config_entry
b4a99d12143be7e6b114fa06d1b32aa1de57489e
699,578
def flatten(list_like, recursive=True): """ Flattens a list-like datastructure (returning a new list). """ retval = [] for element in list_like: if isinstance(element, list): if recursive: retval += flatten(element, recursive=True) else: retval += element else: retval.append(element) return retval
ad343decfa9e8ae949af303fc3f1a52179464009
699,579
from pathlib import Path import os import subprocess def bedtoolscoverage(gfile, outdir, sortedbam): """computes the coverage for each mapped region parameters ---------- bedtoolspath path to the bedtools installation gfile genome file build by preparebedtools() outdir string, the path of the output directory sortedbam name of the sorted bam file returns ---------- bg_file = the name of the bedgraph file """ stem = Path(sortedbam).stem bg_file = os.path.join(outdir, stem.split('.')[0] + ".bg") try: cmd_bedtools = f"bedtools genomecov -bga -ibam {sortedbam} > {bg_file}" res_bedtools = subprocess.check_output(cmd_bedtools, shell=True,\ stderr=subprocess.DEVNULL) except(subprocess.CalledProcessError): pass return (bg_file)
3225526419f19e337f4e5ee41f851b96a0aede28
699,580
def intents_to_string(intents, queryset=False): """ Args: intents: [{"action": "/application/json/view"}, ...] OR [models.Intent] (if queryset=True) Returns: ['<intent.action', ...] """ if queryset: new_intents = [i.action for i in intents] else: new_intents = [i['action'] for i in intents] return str(sorted(new_intents))
55e9350d2db63de474f46836e02ae32297e2c170
699,581
import re def greek_to_english(string): """ Converts all greek letters to the corresponding english letters. Useful for creating song slugs from greek song titles. """ GREEK_MAP = { 'α':'a', 'β':'b', 'γ':'g', 'δ':'d', 'ε':'e', 'ζ':'z', 'η':'h', 'θ':'th', 'ι':'i', 'κ':'k', 'λ':'l', 'μ':'m', 'ν':'n', 'ξ':'ks', 'ο':'o', 'π':'p', 'ρ':'r', 'σ':'s', 'τ':'t', 'υ':'y', 'φ':'f', 'χ':'x', 'ψ':'ps', 'ω':'w', 'ά':'a', 'έ':'e', 'ί':'i', 'ό':'o', 'ύ':'y', 'ή':'h', 'ώ':'w', 'ς':'s', 'ϊ':'i', 'ΰ':'y', 'ϋ':'y', 'ΐ':'i', 'Α':'A', 'Β':'B', 'Γ':'G', 'Δ':'D', 'Ε':'E', 'Ζ':'Z', 'Η':'H', 'Θ':'Th', 'Ι':'I', 'Κ':'K', 'Λ':'L', 'Μ':'M', 'Ν':'N', 'Ξ':'Ks', 'Ο':'O', 'Π':'P', 'Ρ':'R', 'Σ':'S', 'Τ':'T', 'Υ':'Y', 'Φ':'F', 'Χ':'X', 'Ψ':'Ps', 'Ω':'W', 'Ά':'A', 'Έ':'E', 'Ί':'I', 'Ό':'O', 'Ύ':'Y', 'Ή':'H', 'Ώ':'W', 'Ϊ':'I', 'Ϋ':'Y' } s = "".join(GREEK_MAP.keys()) result = '' for piece in re.compile('[%s]|[^%s]+' % (s,s)).findall(string): if piece in GREEK_MAP: result += GREEK_MAP[piece] else: result += piece return result.replace('oy', 'ou').replace('OY', 'OU').replace('Oy', 'Ou')
5c4752b1a1d08b0b37acc3f0e0c884d775eae637
699,582
def anum(self, num="", type_="", xhot="", yhot="", **kwargs): """Specifies the annotation number, type, and hot spot (GUI). APDL Command: /ANUM Parameters ---------- num Annotation number. ANSYS automatically assigns the lowest available number. You cannot assign a higher number if a lower number is available; ANSYS will substitute the lowest available number in place of any user-specified higher number. type\_ Annotation internal type number. If TYPE = DELE, delete annotation NUM. 1 - Text 2 - Block text (not available in GUI) 3 - Dimensions 4 - Lines 5 - Rectangles 6 - Circles 7 - Polygons 8 - Arcs 9 - Wedges, pies 11 - Symbols 12 - Arrows 13 - Bitmap xhot X hot spot (-1.0 < X < 2.0). Used for menu button item delete. yhot Y hot spot (-1.0 < Y < 1.0). Used for menu button item delete. Notes ----- This is a command generated by the GUI and will appear in the log file (Jobname.LOG) if annotation is used. This command is not intended to be typed in directly in an ANSYS session (although it can be included in an input file for batch input or for use with the /INPUT command). Type 13 (bitmap) annotation applies user defined bitmaps defined using the FILE option of the /TXTRE command. This command is valid in any processor. """ command = f"/ANUM,{num},{type_},{xhot},{yhot}" return self.run(command, **kwargs)
be3acd32deffcd31a3a83758b64d0d6950bbd791
699,583
def main(): """Manages the application root""" return ('', 404)
cf1d676fc24d1db79c567d6d37d7d6660aaca977
699,584
def parse_params_arg(params_arg): """ Parse Params Arg """ params = dict() args = params_arg.split(',') for arg in args: keyvalue = arg.split('=') params[keyvalue[0]] = keyvalue[1] return params
2e5673b2bbe552cb751b1250161745d7913df1be
699,585
def nthUglyNumber(n: int) -> int: """ 书中的解法 """ q = [1] t2, t3, t5 = 0, 0, 0 for i in range(n-1): a2, a3, a5 = q[t2]*2, q[t3]*3, q[t5]*5 to_add = min(a2, a3, a5) q.append(to_add) if a2 == to_add: t2 += 1 if a3 == to_add: t3 += 1 if a5 == to_add: t5 += 1 return q[-1]
a24806c1ff94ec1291c19bb1f1a8596e20c809b6
699,586
def livb_annotation(): """Returns an annotation with species entities (LIVB) identified. """ annotation = {"ents": [{"text": "mouse", "label": "LIVB", "start": 0, "end": 0}, {"text": "human", "label": "LIVB", "start": 0, "end": 0}], "text": "mouse and human", "title": ""} return annotation
78de25feab620347e08c8bc74ecf467db3bb477a
699,587
from typing import Union import os def ssh_get_submission_cmd( filename: str, cmd_line_arguments: Union[str, None], job_arguments: dict, ssh_settings: dict, ): """Create shell script string to execute on remote SSH server.""" if cmd_line_arguments is None: cmd_line_arguments = "" # Write the desired python/bash execution to slurm job submission file f_name, f_extension = os.path.splitext(filename) if f_extension == ".py": cmd = f"python {filename} {cmd_line_arguments}" elif f_extension == ".sh": cmd = f"bash {filename} {cmd_line_arguments}" else: raise ValueError( f"Script with {f_extension} cannot be handled" " by mle-toolbox. Only base .py, .sh experiments" " are so far implemented. Please open an issue." ) # Add conda environment activation if "use_conda_venv" in job_arguments: if job_arguments["use_conda_venv"]: script_cmd = "echo $$; /bin/bash -c 'source $(conda info --base)/etc/profile.d/conda.sh && conda activate {} && cd {} && {}'".format( job_arguments["env_name"], ssh_settings["remote_dir"], cmd ) else: script_cmd = "echo $$; /bin/bash -c 'cd {} && {}'".format( ssh_settings["remote_dir"], cmd ) elif "use_venv_venv" in job_arguments: if job_arguments["use_venv_venv"]: script_cmd = "echo $$; /bin/bash -c 'source {}/{}/bin/activate && cd {} && {}'".format( os.environ["WORKON_HOME"], job_arguments["env_name"], ssh_settings["remote_dir"], cmd, ) else: script_cmd = "echo $$; /bin/bash -c 'cd {} && {}'".format( ssh_settings["remote_dir"], cmd ) else: script_cmd = "echo $$; /bin/bash -c 'cd {} && {}'".format( ssh_settings["remote_dir"], cmd ) return script_cmd
7cac49ddabcc94fdfa1ee6d13384b12ba854f61d
699,588
def _to_int(parsed): """ Transforms the received parsed value into an integer. :param parsed: the parsed value :return: an integer created from the value """ if len(parsed) > 0: return int(parsed[0]) else: return None
1701648c7e42232c8be0c7451d19ffe43f322118
699,589
def get_model_string(model): """ :param model: model :returns: <app_label>.<model_name> string representation for the model """ return "{app_label}.{model_name}".format(app_label=model._meta.app_label, model_name=model._meta.model_name)
d54323680c4a15cc99bbbcea09cf319c571ff9cf
699,590
import torch def get_sift_pooling_kernel(ksize=25): """ Returns a weighted pooling kernel for SIFT descriptor """ ks_2 = float(ksize) / 2.0 xc2 = ks_2 - (torch.arange(ksize).float() + 0.5 - ks_2).abs() kernel = torch.ger(xc2, xc2) / (ks_2**2) return kernel
4576e97eaab4eba86c9a68edd67c00a2ede0db3f
699,591
def check_overscan(xstart, xsize, total_prescan_pixels=24, total_science_pixels=4096): """Check image for bias columns. Parameters ---------- xstart : int Starting column of the readout in detector coordinates. xsize : int Number of columns in the readout. total_prescan_pixels : int Total prescan pixels for a single amplifier on a detector. Default is 24 for WFC. total_science_pixels : int Total science pixels across a detector. Default is 4096 for WFC (across two amplifiers). Returns ------- hasoverscan : bool Indication if there are bias columns in the image. leading : int Number of bias columns on the A/C amplifiers side of the CCDs ("TRIMX1" in ``OSCNTAB``). trailing : int Number of bias columns on the B/D amplifiers side of the CCDs ("TRIMX2" in ``OSCNTAB``). """ hasoverscan = False leading = 0 trailing = 0 if xstart < total_prescan_pixels: hasoverscan = True leading = abs(xstart - total_prescan_pixels) if (xstart + xsize) > total_science_pixels: hasoverscan = True trailing = abs(total_science_pixels - (xstart + xsize - total_prescan_pixels)) return hasoverscan, leading, trailing
934d7683a710e9fe88d488398a9ad9e769470460
699,592
def is_python(view): """Cosmetic sugar.""" return view.score_selector(0, 'source.python') > 0
18c6f14db5b24130e0fb04718e3d81ff7749a526
699,593
import re def is_card(content, cards): """ Checks whether the current line contents is a data card starting string. """ card = None for icards in cards: actual_card = icards[0] card_regex = re.compile(icards[1]) if card_regex.match(content) is not None: card = actual_card break return card
6c07ea6d76a77b87288903584d295e4b26220f9c
699,594
def vec_reverse(a): """ Reverses a vector Parameters ---------- a: list[] A vector of scalar values Returns ------- list[] The reversed vector """ return a[::-1]
b22e13746de1fe8de60d774b1e20f5c5528507ff
699,595
def t04_ValueDNE(C, pks, crypto, server): """Checks that values not stored at the server return None.""" score = 0 alice = C("alice") score += alice.download("a") is None score += alice.download("b") is None score += alice.download("c") is None alice.upload("d", "e") score += alice.download("e") is None score += alice.download("d") == "e" return float(score) / 5.0
af42da60a48d407ad16da6801e847417dbcc69e9
699,596
def temp(input_temp): """ Formats input temperature with "+" or "-" sign :param input_temp: :return: formated temperature """ decimal = 0 temp2 = float(input_temp) if temp2 < 0: temp_sign = '' elif temp2 > 0: temp_sign = '+' else: temp_sign = '' decimal = 0 temp_result = temp_sign + str(round(temp2, decimal)) return temp_result
9ce3be3ec362761c2a6aa43e4ae4191c7a471667
699,597
def get_cell_values(queries, subjects, key=len, attr=None): """Generates the values of cells in the binary matrix. This function calls some specified key function (def. max) against all values of a specified attribute (def. None) from Hits inside Subjects which match each query. By default, this function will just count all matching Hits (i.e. len() is called on all Hits whose query attribute matches). To find maximum identities, for example, provide key=max and attr='identity' to this function. Parameters: queries (list): Names of query sequences. subjects (list): Subject objects to generate vlaues for. key (callable): Some callable that takes a list and produces a value. attr (str): A Hit attribute to calculate values with in key function. """ result = [0] * len(queries) for index, query in enumerate(queries): values = [ getattr(hit, attr) if attr else hit for subject in subjects for hit in subject.hits if hit.query == query ] result[index] = key(values) return result
4725a4c9f9dc291ff0e8595bb25f51c4612f747d
699,598
def and_(fst, snd): """Wraps the logical and function.""" return fst and snd
c03f2cb9178aa2863e2da58e4fe2741f4a98ea79
699,599
def _partition(sequence, size, count): """Partition sequence into count subsequences of size length, and a remainder. Return (partitions, remainder), where partitions is a sequence of count subsequences of cardinality count, and apply(append, partitions) + remainder == sequence.""" partitions = [] for index in range(0, size * count, size): partitions.append(sequence[index:index + size]) return (partitions, sequence[size * count:])
e33e7eaa35e3c57f0decda50e863686f529d5afa
699,600
def get_style_sheet( bg: str = "", bg_lighter: str = "", text: str = "", text_button: str = "" ) -> str: """ Returns a string representing the style sheet. Usage: get_style_sheet(True) for dark mode or get_style_sheet(False) for light mode """ return f"""QWidget {{ background: {bg}; color: {text}; font-size: 24pt; font-weight: bold; font-family: "Inconsolata Bold"; }} QPushButton, QComboBox {{ background: transparent; font-size: 25pt; border-radius: 5; padding: 8px; text-align: left; color: {text_button}; }} QPushButton::hover, QComboBox::hover, QPushButton::focus, QComboBox::focus {{ background: transparent; color: {text}; outline: none; }} QComboBox::down-arrow {{ background: transparent; }} QComboBox::item {{ background: {bg_lighter}; }} QComboBox::item:selected {{ font-weight: bold; color: {text}; }} QLabel, QRadioButton, QScrollArea, QScrollBar::add-line:vertical, QScrollBar::sub-line:vertical, #graphView {{ background: transparent; border: none; }} QScrollBar {{ background: {bg}; }} QScrollBar::handle {{ background: {text_button}; border: none; }} QScrollBar::handle:pressed {{ background: {text}; }} #labelMainMenu, #labelTitle, #labelStatistics {{ font-size: 50pt; }} #labelDaysAgo {{ color: {text_button} }}"""
1f760e240de493a34e4148f69ad64dc307b6dc99
699,601
import re def parse_docket_number(docket_number): """ Parse a Common Pleas docket number into its components. A docket number has the form "CP-46-CR-1234567-2019" This method takes a docket number as a string and returns a dictionary with the different parts as keys """ patt = re.compile( "(?P<court>[A-Z]{2})-(?P<county>[0-9]{2})-" + "(?P<docket_type>[A-Z]{2})-(?P<docket_index>[0-9]{7})-" + "(?P<year>[0-9]{4})") match = patt.match(docket_number) if match is None: return None else: return match.groupdict()
8f9af7709f881cc82bb7d22b420a5361e085de70
699,602
def suffixes(word): """Returns the list of propper suffixes of a word :param word: the word :type word: str :rtype: list .. versionadded: 0.9.8""" return [word[i:] for i in range(1, len(word))]
551ea4d612a6c9f0ffe432701e3bde2c80899d85
699,603
def build_url(param_dict): """ Builds the URL needed to query [University of Wyoming, College of Engineering, Department of Atmospheric Science's website](http://weather.uwyo.edu/upperair/sounding.html) to get the proper sounding data. Parameters ---------- param_dict : dict A dictionary containing the station number, year, month, and day/hour of the desired date and location. Returns ------- full_url : string String of the query URL with the proper date and location of the desired atmospheric sounding. """ base_url = 'http://weather.uwyo.edu/cgi-bin/sounding?TYPE=TEXT%3ALIST' full_url = base_url for key in param_dict.keys(): full_url += '&' + key + '=' + param_dict[key] return full_url
77825ae917650d1b832333920b6363d4dac7c36b
699,604
def smooth(values, factor): """smooth non zero values (by factor) towards 0th element.""" new_values = [0] * len(values) for i in reversed(range(len(values))): if values[i] != 0: smoothed_value = values[i] j = 0 while True: if i-j < 0: break new_values[i-j] += smoothed_value smoothed_value *= factor if abs(smoothed_value) < 1: break j += 1 return new_values
055c4bbe5bd1e696c69ad1ff14beb72b4abbdd11
699,605
def in_month(ref_date): """ which month contains a reference date :param ref_date: mx.DateTime reference date :rtype: range_string e.g. 2007-M12 """ return "%4d-M%02d" % (ref_date.year, ref_date.mon)
fe8126529521565e35f2012e359d7a9c57b773d1
699,606
def _precipitable_water(ea, pair): """Precipitable water in the atmosphere (Eq. D.3) Parameters ---------- ea : ee.Image Vapor pressure [kPa]. pair : ee.Image or ee.Number Air pressure [kPa]. Returns ------- ee.Image or ee.Number Precipitable water [mm]. Notes ----- w = pair * 0.14 * ea + 2.1 """ return ea.multiply(pair).multiply(0.14).add(2.1)
a56b3948167fb22bb49b6f84bd3980a2666dd345
699,607
def str2tuple(string: str): """Construct the tuple data from string for SecondQuantizedOp.""" return (string, 1)
f9b8fe56bf54d27c9aee6c7a3ec3ab797b08fcd0
699,608
import argparse def set_args(): """设置训练模型所需参数""" parser = argparse.ArgumentParser() parser.add_argument('--device', default='0', type=str, help='设置训练或测试时使用的显卡') parser.add_argument('--config_path', default='./config/config.json', type=str, help='模型参数配置信息') parser.add_argument('--vocab_path', default='./vocab/vocab.txt', type=str, help='词表,该词表为小词表,并增加了一些新的标记') parser.add_argument('--train_file_path', default='./data_dir/train_data.json', type=str, help='新闻标题生成的训练数据') parser.add_argument('--test_file_path', default='./data_dir/test_data.json', type=str, help='新闻标题生成的测试数据') parser.add_argument('--pretrained_model_path', default=None, type=str, help='预训练的GPT2模型的路径') parser.add_argument('--data_dir', default='./data_dir', type=str, help='生成缓存数据的存放路径') parser.add_argument('--num_train_epochs', default=5, type=int, help='模型训练的轮数') parser.add_argument('--train_batch_size', default=16, type=int, help='训练时每个batch的大小') parser.add_argument('--test_batch_size', default=8, type=int, help='测试时每个batch的大小') parser.add_argument('--learning_rate', default=1e-4, type=float, help='模型训练时的学习率') parser.add_argument('--warmup_proportion', default=0.1, type=float, help='warm up概率,即训练总步长的百分之多少,进行warm up') parser.add_argument('--adam_epsilon', default=1e-8, type=float, help='Adam优化器的epsilon值') parser.add_argument('--logging_steps', default=20, type=int, help='保存训练日志的步数') parser.add_argument('--eval_steps', default=4000, type=int, help='训练时,多少步进行一次测试') parser.add_argument('--gradient_accumulation_steps', default=4, type=int, help='梯度积累') parser.add_argument('--max_grad_norm', default=1.0, type=float, help='') parser.add_argument('--output_dir', default='output_dir/', type=str, help='模型输出路径') parser.add_argument('--seed', type=int, default=2020, help='随机种子') parser.add_argument('--max_len', type=int, default=512, help='输入模型的最大长度,要比config中n_ctx小') parser.add_argument('--title_max_len', type=int, default=32, help='生成标题的最大长度,要比max_len小') return parser.parse_args()
cea63034d513b8717236d68c1270147d6ed1cdff
699,609
def pack_bits( longbits ): """Crunch a 64-bit int (8 bool bytes) into a bitfield.""" byte = longbits & (0x0101010101010101) byte = (byte | (byte>>7)) & (0x0003000300030003) byte = (byte | (byte>>14)) & (0x0000000f0000000f) byte = (byte | (byte>>28)) & (0x00000000000000ff) return byte
78fdf9828b4f98c9bc44233ef5f95244eefb24fc
699,610
from pathlib import Path def fullpath(path=""): """ Path: Expand relative paths and tildes. """ return Path.cwd() / Path(path).expanduser()
e7b7d1caa67a2a9988035713ec18347b5581406e
699,611
import io def read_file(path): """Returns all the lines of a file at path as a List""" file_lines = [] with io.open(path, mode="rt", encoding="utf-8") as the_file: file_lines = the_file.readlines() return file_lines
8e220e0b90ded168a1d1d8d37b7451b7796b1ed5
699,612
import os def fixture(filename: str) -> str: """Load fixture JSON data from disk.""" path = os.path.join(os.path.dirname(__file__), "fixtures", "smartthings", filename) with open(path, "r", encoding="utf8") as fp: return fp.read()
cd8b938b20832e2f0dc41a1027734d1a753698cb
699,613
def target(func): """ This is a decorator function :param func: :return: """ def target_func(*original_args, **original_kwargs): # print 'before the func' # print original_kwargs retV = func(*original_args, **original_kwargs) if retV is None or retV == False: return False else: return retV # print 'after the func' return target_func
0e77444acd4b3a2ba7efd49854a3c081a054d337
699,614
import turtle def new_horse(image_file): """(str) -> turtle Create a new horse where <image_file> is a valid shapename Returns a turtle object """ horse = turtle.Turtle() horse.hideturtle() horse.shape(image_file) return horse
8abd7ea09cfe3340c06250f430ee6f25b03f45aa
699,615
import re def sort_simulations(df_ts, dyn_dend_order): """ Sorts the simulations in the dataframe according to the order in the list dyn_dend_order """ # Create a dictionary with the order of each simulation row in the plot dyn_dend_order_dict = { dyn_name : dyn_dend_order.index(dyn_name) for dyn_name in dyn_dend_order } # Adding column based in new order recieved from clustering df_ts['clust_order'] = df_ts['Id'].apply(lambda x: dyn_dend_order_dict[x]) #Sorting by ballesteros Id's (helixloop column) and clustering order df_ts['helixloop'] = df_ts['Position'].apply(lambda x: re.sub(r'^(\d)x',r'\g<1>0x',x)) df_ts = df_ts.sort_values(["helixloop",'clust_order']) #Drop sort columns once used df_ts.drop(['helixloop','clust_order'], axis = 1, inplace = True) return df_ts
68ae24234698803e9f5dd861d7f5255f845401c7
699,616
import csv def load_penetration(scenario, path): """ Load penetration forecast. """ output = {} with open(path, 'r') as source: reader = csv.DictReader(source) for row in reader: if row['scenario'] == scenario.split('_')[0]: output[int(row['year'])] = float(row['penetration']) return output
23a9730e9d8ec524fe79eaf50c3adb2a1641d002
699,617
def get4Neighbors(img, i, j): """ Get the 4 neighbours for the pixel analysed. Parameters: img, image; i, row number; j, column number. Returns: neighbors, list of neighbors. """ N, M = img.shape neighbors = [] if i - 1 >= 0: neighbors.append(img[i-1][j]) if j - 1 >= 0: neighbors.append(img[i][j-1]) if j - 1 >= 0 and i - 1 >= 0: neighbors.append(img[i-1][j-1]) if j + 1 < M and i - 1 >= 0: neighbors.append(img[i-1][j+1]) return neighbors
e3765a34ad02b9cf8d1be19f6e0e03db20ea28df
699,618
import os def default_log_path(): """ :return: The absolute path to the directory that contains Rally's log file. """ return os.path.join(os.path.expanduser("~"), ".rally", "logs")
28e51033d17de5a15079ab062c7169c51a65bf95
699,619