content
stringlengths
22
815k
id
int64
0
4.91M
async def app_exception_handler(request, exc): """ Error handler for AppException errors. Logs the AppException error detected and returns the appropriate message and details of the error. """ logger.debug(exc) return JSONResponse( Response(success=False, error_code=422, message=str(exc)).dict() )
5,334,900
def test_websocket_worker(hook, start_proc): """Evaluates that you can do basic tensor operations using WebsocketServerWorker""" kwargs = {"id": "fed", "host": "localhost", "port": 8766, "hook": hook} process_remote_worker = start_proc(WebsocketServerWorker, kwargs) time.sleep(0.1) x = torch.ones(5) local_worker = WebsocketClientWorker(**kwargs) x = x.send(local_worker) y = x + x y = y.get() assert (y == torch.ones(5) * 2).all() del x local_worker.ws.shutdown() time.sleep(0.1) local_worker.remove_worker_from_local_worker_registry() process_remote_worker.terminate()
5,334,901
def test_Gaussian2D(): """ Test rotated elliptical Gaussian2D model. https://github.com/astropy/astropy/pull/2038 """ model = models.Gaussian2D(100, 1.7, 3.1, x_stddev=3.3, y_stddev=5.0, theta=np.pi/6.) y, x = np.mgrid[0:5, 0:5] g = model(x, y) g_ref = [[77.86787722, 79.8450774, 75.66323816, 66.26262987, 53.6289606], [86.01743889, 90.20335997, 87.419011, 78.29536082, 64.80568981], [90.11888627, 96.6492347, 95.79172412, 87.74139348, 74.27249818], [89.54601432, 98.21442091, 99.55228375, 93.25543692, 80.73169335], [84.38744554, 94.65710923, 98.12408063, 94.00369635, 83.22642276]] assert_allclose(g, g_ref, rtol=0, atol=1e-6)
5,334,902
def search_book(doc, request): """This function also runs in a separate process.""" pattern = _make_search_re_pattern(request) try: for n in range(request.from_page, request.to_page + 1): resultset = [] sect = doc[n].section.title for pos, snip in search(pattern, doc.get_page_content(n)): resultset.append( SearchResult(excerpt=snip, page=n, position=pos, section=sect) ) yield resultset finally: doc.close()
5,334,903
def db_query_map(db_or_el, query, func_match, func_not) -> tuple: """ Helper function to find elems from query and transform them, to generate 2 lists of matching/not-matching elements. """ expr = parse_query_expr(query) elems1, elems2 = [], [] for el in _db_or_elems(db_or_el): m = el_to_meta(el) ok = [func(m.get(prop), val) for prop, func, val in expr] if ok and all(ok): r = func_match(el) if r is not None: elems1.append(r) else: r = func_not(el) if r is not None: elems2.append(r) return elems1, elems2
5,334,904
def route(pattern, method = HTTP_METHOD.GET): """ Decorator to declare the routing rule of handler methods. """ def decorator(func): frm = inspect.stack()[1] class_name = frm[3] module_name = frm[0].f_back.f_globals["__name__"] full_class_name = module_name + '.' + class_name real_pattern = '^' + pattern + '$' add_handler(method, real_pattern, full_class_name, func) return asynchronous(func) return decorator
5,334,905
def div_q(a: ElementModPOrQorInt, b: ElementModPOrQorInt) -> ElementModQ: """Compute a/b mod q.""" b = _get_mpz(b) inverse = invert(b, _get_mpz(get_small_prime())) return mult_q(a, inverse)
5,334,906
def donoho_gavish_threshold(shape, sigma): """ (Gavish and Donoho, 2014) Parameters ---------- shape: tuple (n_samples, n_features) Shape of the data matrix. sigma: float Estiamte of the noise standard deviation. Output ------ sigular_value_threshold: float """ n_samples, n_features = shape beta = n_features / n_samples mult = n_samples # TODO: is this what we want to do? if beta > 1: beta = 1 / beta mult = n_features if n_samples == n_features: lambd = 4 / np.sqrt(3) else: lambd = dg_threshold(beta) return lambd * np.sqrt(mult) * sigma
5,334,907
def main(args): """ Script to turn specific simulation data into Pandas Dataframes """ ## Reading all elements and converting to python dictionary param_dict = vars(args) ## Checking for correct input param_vals_test(param_dict) ## Adding extra variables param_dict = add_to_dict(param_dict) ## Program message Prog_msg = param_dict['Prog_msg'] ## ## Creating Folder Structure # proj_dict = directory_skeleton(param_dict, cwpaths.cookiecutter_paths(__file__)) proj_dict = directory_skeleton(param_dict, cwpaths.cookiecutter_paths('./')) ## ## Printing out project variables print('\n'+50*'='+'\n') for key, key_val in sorted(param_dict.items()): if key !='Prog_msg': print('{0} `{1}`: {2}'.format(Prog_msg, key, key_val)) print('\n'+50*'='+'\n') # # Extracting info from `simfile` sim_pd = simfile_data_extraction(param_dict, proj_dict)
5,334,908
def pyarrow_to_r_schema( obj: 'pyarrow.lib.Schema' ): """Create an R `arrow::Schema` object from a pyarrow Schema. This is sharing the C/C++ object between the two languages. The returned object depends on the active conversion rule in rpy2. By default it will be an `rpy2.robjects.Environment`. """ schema_ptr = rarrow.allocate_arrow_schema()[0] try: obj._export_to_c(int(schema_ptr)) r_schema = rarrow.ImportSchema(schema_ptr) finally: rarrow.delete_arrow_schema(schema_ptr) return r_schema
5,334,909
def plot_ovlp_stats(jobdir, nproc): """Plot 5' and 3' Overlap distributions""" log.info("Generating overlap plots") overlaps = get_overlaps(jobdir, nproc) ovlp_dict = {} for ovlp in overlaps: rid, length, fiveprime, threeprime = ovlp.split() ovlp_dict[rid] = fiveprime, threeprime fiveprime_ovlps = [int(v[0]) for k, v in ovlp_dict.items()] threeprime_ovlps = [int(v[1]) for k, v in ovlp_dict.items()] fig, axs = plt.subplots(2, 1) dataframe = pandas.DataFrame({'five_prime_ovlps': fiveprime_ovlps, 'three_prime_ovlps': threeprime_ovlps}) binsfive = dataframe['five_prime_ovlps'].max() + 1 binsthree = dataframe['three_prime_ovlps'].max() + 1 dataframe['five_prime_ovlps'].plot.hist( bins=binsfive, ax=axs[0], figsize=(5, 10)) axs[0].set_title('5\' overlaps') axs[0].set_xlim(0, 100) dataframe['three_prime_ovlps'].plot.hist( bins=binsthree, ax=axs[1], figsize=(5, 10)) axs[1].set_title('3\' overlaps') axs[1].set_xlim(0, 100) outfig = os.path.join('outfigs', 'overlap_distribution.png') plt.savefig(outfig) return dataframe
5,334,910
def validate_days(year, month, day): """validate no of days in given month and year >>> validate_days(2012, 8, 31) 31 >>> validate_days(2012, 8, 32) 31 """ total_days = calendar.monthrange(year, month) return (total_days[1] if (day > total_days[1]) else day)
5,334,911
def copy_dir_tree(src, dst, verbose=False): """Copies directory structure under src to dst.""" if verbose: print('Copying dir-structure {} to {}'.format(src, dst)) for src_dir, sub_dirs, basenames in tf.io.gfile.walk(src): rel_dir = os.path.relpath(src_dir, src) dst_dir = os.path.join(dst, rel_dir) for sub_dir in sorted(sub_dirs): path = os.path.join(dst, rel_dir, sub_dir) tf.io.gfile.makedirs(path)
5,334,912
def test_get_recoveries_no_tags(): """the get_recoveries() method of the report object should gracefully return None if no tags where associated with this report. (I'm not sure why there is a report if there are not tags') """ report = ReportFactory() tags = report.get_recoveries() assert len(tags) == len([])
5,334,913
def save_to_s3(bucket_name, file_name, data): """ Saves data to a file in the bucket bucket_name - - The name of the bucket you're saving to file_name - - The name of the file dat - - data to be saved """ s3 = boto3.resource('s3') obj = s3.Object(bucket_name, file_name) resp = obj.put(Body=json.dumps(data)) return resp
5,334,914
def linsearch_fun_BiCM_exp(xx, args): """Linsearch function for BiCM newton and quasinewton methods. This is the linesearch function in the exponential mode. The function returns the step's size, alpha. Alpha determines how much to move on the descending direction found by the algorithm. :param xx: Tuple of arguments to find alpha: solution, solution step, tuning parameter beta, initial alpha, function f :type xx: (numpy.ndarray, numpy.ndarray, float, float, func) :param args: Tuple, step function and arguments. :type args: (func, tuple) :return: Working alpha. :rtype: float """ x = xx[0] dx = xx[1] beta = xx[2] alfa = xx[3] f = xx[4] step_fun = args[0] arg_step_fun = args[1] i = 0 s_old = -step_fun(x, arg_step_fun) while ( sof.sufficient_decrease_condition( s_old, -step_fun(x + alfa * dx, arg_step_fun), alfa, f, dx ) is False and i < 50 ): alfa *= beta i += 1 return alfa
5,334,915
def getCountryName(countryID): """ Pull out the country name from a country id. If there's no "name" property in the object, returns null """ try: countryObj = getCountry(countryID) return(countryObj['name']) except: pass
5,334,916
def _pic_download(url, type): """ 图片下载 :param url: :param type: :return: """ save_path = os.path.abspath('...') + '\\' + 'images' if not os.path.exists(save_path): os.mkdir(save_path) img_path = save_path + '\\' + '{}.jpg'.format(type) img_data = base64.b64decode(url) with open(img_path, 'wb') as f: f.write(img_data) return img_path
5,334,917
def run_search(args): """Run a search from a given RadVel setup file Args: args (ArgumentParser): command line arguments """ config_file = args.setupfn conf_base = os.path.basename(config_file).split('.')[0] P, post = radvel.utils.initialize_posterior(config_file) if args.mstar is None: try: args.mstar = (P.stellar['mstar'], P.stellar['mstar_err']) except (AttributeError, KeyError): pass else: args.mstar = [float(x) for x in args.mstar] #starname = P.starname + '_' + conf_base starname = conf_base data = P.data if args.known and P.nplanets > 0: ipost = copy.deepcopy(post) #post.params['dvdt'].vary = args.trend #if not args.trend: # post.params['dvdt'].value = 0.0 post = radvel.fitting.maxlike_fitting(post, verbose=True) else: post = None max_planets = args.maxplanets searcher = rvsearch.search.Search(data, starname=starname, min_per=args.minP, max_per=args.maxP, workers=args.num_cpus, post=post, trend=args.trend, verbose=args.verbose, mcmc=args.mcmc, mstar=args.mstar, max_planets=max_planets) searcher.run_search(outdir=args.output_dir)
5,334,918
def get_placeholder(default_tensor=None, shape=None, name=None): """Return a placeholder_wirh_default if default_tensor given, otherwise a new placeholder is created and return""" if default_tensor is not None: return default_tensor else: if shape is None: raise ValueError('One of default_tensor and shape must be given') return tf.placeholder(tf.float32, shape=shape, name=name)
5,334,919
def test_http_processor_propagate_error_records(sdc_builder, sdc_executor, http_client, one_request_per_batch): """ Test when the http processor stage has the config option "Records for remaining statuses" set. To test this we force the URL to be a not available so we get a 404 response from the mock http server. The output should be one record containing the "Error Response Body Field" with the error message from the mock server. We use the pipeline: dev_raw_data_source >> http_client_processor >> wiretap """ one_request_per_batch_option = {} if Version(sdc_builder.version) < Version("4.4.0"): if one_request_per_batch: pytest.skip("Test skipped because oneRequestPerBatch option is only available from SDC 4.4.0 version") else: one_request_per_batch_option = {"one_request_per_batch": one_request_per_batch} mock_path = get_random_string(string.ascii_letters, 10) fake_mock_path = get_random_string(string.ascii_letters, 10) raw_dict = dict(city='San Francisco') raw_data = json.dumps(raw_dict) record_output_field = 'result' http_mock = http_client.mock() try: http_mock.when( rule=f'GET /{mock_path}' ).reply( body="Example", status=200, times=FOREVER ) mock_uri = f'{http_mock.pretend_url}/{fake_mock_path}' builder = sdc_builder.get_pipeline_builder() dev_raw_data_source = builder.add_stage('Dev Raw Data Source') dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=raw_data, stop_after_first_batch=True) http_client_processor = builder.add_stage('HTTP Client', type='processor') http_client_processor.set_attributes(data_format='JSON', default_request_content_type='application/text', headers=[{'key': 'content-length', 'value': f'{len(raw_data)}'}], http_method='GET', request_data="${record:value('/text')}", resource_url=mock_uri, output_field=f'/{record_output_field}', **one_request_per_batch_option) http_client_processor.records_for_remaining_statuses = True http_client_processor.error_response_body_field = 'errorField' wiretap = builder.add_wiretap() dev_raw_data_source >> http_client_processor >> wiretap.destination pipeline = builder.build(title='HTTP Lookup Processor pipeline Response Actions') sdc_executor.add_pipeline(pipeline) sdc_executor.start_pipeline(pipeline).wait_for_finished() assert len(wiretap.output_records) == 1 assert wiretap.output_records[0].field['result']['errorField'].value == 'No matching preset response' finally: logger.info("Deleting http mock") http_mock.delete_mock()
5,334,920
def test_make_map_plot_no_polygons(test_data_shape): """ Tests that the make_map_plot function can run on data that has no polygons. """ no_polygons = [ s for s in test_data_shape if not isinstance(s["shape"], Polygon) ] make_map_plot(no_polygons)
5,334,921
def get_targets(args): """ Gets the list of targets for cmake and kernel/build.sh :param args: The args variable generated by parse_parameters :return: A string of targets suitable for cmake or kernel/build.sh """ if args.targets: targets = args.targets elif args.full_toolchain: targets = "all" else: targets = "AArch64;ARM;BPF;Hexagon;Mips;PowerPC;RISCV;SystemZ;X86" return targets
5,334,922
def print_results( args: Any, processor: EYAMLProcessor, yaml_file: str, yaml_paths: List[Tuple[str, YAMLPath]], document_index: int ) -> None: """Dump search results to STDOUT with optional and dynamic formatting.""" in_expressions = len(args.search) print_file_path = not args.nofile print_expression = in_expressions > 1 and not args.noexpression print_yaml_path = not args.noyamlpath print_value = args.values buffers = [ ": " if print_file_path or print_expression and ( print_yaml_path or print_value ) else "", ": " if print_yaml_path and print_value else "", ] for entry in yaml_paths: expression, result = entry resline = "" if print_file_path: display_file_name = ("STDIN" if yaml_file.strip() == "-" else yaml_file) resline += "{}/{}".format(display_file_name, document_index) if print_expression: resline += "[{}]".format(expression) resline += buffers[0] if print_yaml_path: if args.noescape: use_flash = args.pathsep is PathSeperators.FSLASH seglines = [] join_mark = "/" if use_flash else "." path_prefix = "/" if use_flash else "" for (_, segment) in result.escaped: seglines.append(str(segment)) resline += "{}{}".format(path_prefix, join_mark.join(seglines)) else: resline += "{}".format(result) resline += buffers[1] if print_value: # These results can have only one match, but make sure lest the # output become messy. for node_coordinate in processor.get_nodes(result, mustexist=True): node = node_coordinate.node if isinstance(node, (dict, list, CommentedSet)): resline += "{}".format( json.dumps(Parsers.jsonify_yaml_data(node))) else: resline += "{}".format(str(node).replace("\n", r"\n")) break print(resline)
5,334,923
def test_view_translate_invalid_locale(client, resource_a, settings_debug): """ If the project is valid but the locale isn't, redirect home. """ # this doesnt seem to redirect as the comment suggests response = client.get( '/invalid-locale/%s/%s/' % (resource_a.project.slug, resource_a.path) ) assert response.status_code == 404
5,334,924
def modify_db_cluster(event, context): """ When we restore the database from a production snapshot, we don't know the passwords. So, modify the postgres password here so we can work with the database. :param event: :param context: :return: """ logger.info(event) rds_client.modify_db_cluster( DBClusterIdentifier=DB_CLUSTER_IDENTIFIER, ApplyImmediately=True, MasterUserPassword='Password123' )
5,334,925
def compPlayHand(hand, wordList, n): """ Allows the computer to play the given hand, following the same procedure as playHand, except instead of the user choosing a word, the computer chooses it. 1) The hand is displayed. 2) The computer chooses a word. 3) After every valid word: the word and the score for that word is displayed, the remaining letters in the hand are displayed, and the computer chooses another word. 4) The sum of the word scores is displayed when the hand finishes. 5) The hand finishes when the computer has exhausted its possible choices (i.e. compChooseWord returns None). hand: dictionary (string -> int) wordList: list (string) n: integer (HAND_SIZE; i.e., hand size required for additional points) """ handcopy = hand.copy() # Keep track of the total score score = 0 # As long as there are still letters left in the hand: while calculateHandlen(handcopy) > 0: # Display the hand print "Current Hand: ", displayHand(handcopy) """ #To pass the test, instead of displayHand(handcopy) for letter in handcopy.keys(): for j in range(handcopy[letter]): print letter, print "" """ # Ask user for input word = compChooseWord(handcopy, wordList, n) # If the input is a single period: if word == '.' or word == None: # End the game (break out of the loop) break # Otherwise (the input is not a single period): else: # If the word is not valid: if not isValidWord(word, handcopy, wordList): # Reject invalid word (print a message followed by a blank line) print "Invalid word, please try again." print # Otherwise (the word is valid): else: # Tell the user how many points the word earned, and the updated total score, in one line followed by a blank line score += getWordScore(word, n) print '"%s" earned %d points. Total: %d points' % (word, getWordScore(word, n), score) # Update the hand handcopy = updateHand(handcopy, word) # Game is over (user entered a '.' or ran out of letters), so tell user the total score print "Total score: %d points." % (score)
5,334,926
def _format_warning(message, category, filename, lineno, line=None): # noqa: U100, E501 """ Simple format for warnings issued by ProPlot. See the `internal warning call signature \ <https://docs.python.org/3/library/warnings.html#warnings.showwarning>`__ and the `default warning source code \ <https://github.com/python/cpython/blob/master/Lib/warnings.py>`__. """ return f'{filename}:{lineno}: ProPlotWarning: {message}\n'
5,334,927
def piocheCarte(liste_pioche, x): """ Cette fonction renvoie le nombre x de cartes de la pioche. Args: x (int): Nombre de cartes à retourner. Returns: list: Cartes retournées avec le nombre x. """ liste_carte = [] for i in range(x): liste_carte.append(liste_pioche[i]) del liste_pioche[0] return liste_carte
5,334,928
def is_process_running(pid): """Returns true if a process with pid is running, false otherwise.""" # from # http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid try: os.kill(pid, 0) except OSError: return False else: return True
5,334,929
def cost_logistic(p, x, y): """ Sum of absolute deviations of obs and logistic function :math:`L/(1+exp(-k(x-x0)))` Parameters ---------- p : iterable of floats parameters (`len(p)=3`) - `p[0]` = L = Maximum of logistic function - `p[1]` = k = Steepness of logistic function - `p[2]` = x0 = Inflection point of logistic function x : float or array_like of floats independent variable y : float or array_like of floats dependent variable, observations Returns ------- float sum of absolute deviations """ return np.sum(np.abs(y-logistic_p(x, p)))
5,334,930
def get_price(token: str, sellAmount=1000000000000000000): """ get_price uses the 0x api to get the most accurate eth price for the token :param token: token ticker or token address :param buyToken: token to denominate price in, default is WETH :param sellAmount: token amount to sell in base unit, default is 1e18 :return: eth/bnb price per token for the specified amount to sell """ if curr_network == "bsc" or curr_network == "bsc-fork": endpoint = "https://bsc.api.0x.org/" buyToken = "WBNB" elif curr_network == "eth": endpoint = "https://api.0x.org/" buyToken = "WETH" else: raise ValueError("Unrecognized network") params = ( "swap/v1/quote?buyToken=" + buyToken + "&sellToken=" + token + "&sellAmount=" + str(sellAmount) ) r = requests.get(endpoint + params) data = r.json() if not data.get("guaranteedPrice"): console.log(data) raise ValueError("Price could not be fetched") return data["guaranteedPrice"]
5,334,931
def _change_matplotlib_colours(text_color=_TEXT_COLOUR, bg_colour=_BG_COLOUR): """Change matplotlib default colors for ALL graphs produced in current session. - 'text_colour' sets the colour of all text, as well as axes colours and axis tick mark colours. - 'bg_colour' changes the background and outside fill colour of the plot.""" matplotlib.rc('figure', facecolor=_BG_COLOUR) matplotlib.rc('savefig', facecolor=_BG_COLOUR, edgecolor=_TEXT_COLOUR) matplotlib.rc('axes', edgecolor=_TEXT_COLOUR, facecolor=_BG_COLOUR, labelcolor=_TEXT_COLOUR) matplotlib.rc('text', color=_TEXT_COLOUR) matplotlib.rc('grid', color=_TEXT_COLOUR) matplotlib.rc('xtick', color=_TEXT_COLOUR) matplotlib.rc('ytick', color=_TEXT_COLOUR)
5,334,932
async def test_ObserveHeadRacket(): """监听拍头事件demo 监听拍头事件,当机器人头部被拍击时,上报拍头类型 当机器人头部被双击时,停止监听,并跳一个舞蹈 # ObserveHeadRacketResponse.type: # class HeadRacketType(enum.Enum): # SINGLE_CLICK = 1 # 单击 # LONG_PRESS = 2 # 长按 # DOUBLE_CLICK = 3 # 双击 """ # 创建监听 observer: ObserveHeadRacket = ObserveHeadRacket() # 事件处理器 # ObserveHeadRacketResponse.type: # @enum.unique # class HeadRacketType(enum.Enum): # SINGLE_CLICK = 1 # 单击 # LONG_PRESS = 2 # 长按 # DOUBLE_CLICK = 3 # 双击 def handler(msg: ObserveHeadRacketResponse): # 监听到一个事件后,停止监听, print("{0}".format(str(msg.type))) if msg.type == HeadRacketType.DOUBLE_CLICK.value: observer.stop() # 执行个舞动 asyncio.create_task(__dance()) observer.set_handler(handler) # 启动 observer.start() await asyncio.sleep(0)
5,334,933
def inv_exportlog(): """Exports a csv file formatted for Profitek's inventory task list. """ date = datetime.today().strftime('%Y%m%d') scanner_terminal = escape(session["scanner_terminal"]) allscanners = escape(request.form.get('allscanners','yes')) if 'yes' in allscanners.lower(): #get absolutely everything invdict = __countAllBarcodes_inv() else: invdict = redis_client.hgetall(f'inventory_{scanner_terminal}') logging.debug(invdict) with open(f'/var/ldbinvoice/{date}_{scanner_terminal}_inventory_scan_log.txt', 'w') as f: for k,v in invdict.items(): line = f"{k},{v}" logging.info(line) f.write(f'{line}\n') return {'success': True}
5,334,934
def send_command(InstanceIds=None, Targets=None, DocumentName=None, DocumentHash=None, DocumentHashType=None, TimeoutSeconds=None, Comment=None, Parameters=None, OutputS3Region=None, OutputS3BucketName=None, OutputS3KeyPrefix=None, MaxConcurrency=None, MaxErrors=None, ServiceRoleArn=None, NotificationConfig=None): """ Executes commands on one or more remote instances. See also: AWS API Documentation :example: response = client.send_command( InstanceIds=[ 'string', ], Targets=[ { 'Key': 'string', 'Values': [ 'string', ] }, ], DocumentName='string', DocumentHash='string', DocumentHashType='Sha256'|'Sha1', TimeoutSeconds=123, Comment='string', Parameters={ 'string': [ 'string', ] }, OutputS3Region='string', OutputS3BucketName='string', OutputS3KeyPrefix='string', MaxConcurrency='string', MaxErrors='string', ServiceRoleArn='string', NotificationConfig={ 'NotificationArn': 'string', 'NotificationEvents': [ 'All'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed', ], 'NotificationType': 'Command'|'Invocation' } ) :type InstanceIds: list :param InstanceIds: The instance IDs where the command should execute. You can specify a maximum of 50 IDs. If you prefer not to list individual instance IDs, you can instead send commands to a fleet of instances using the Targets parameter, which accepts EC2 tags. (string) -- :type Targets: list :param Targets: (Optional) An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call. For more information about how to use Targets, see Executing a Command Using Systems Manager Run Command . (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call. Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:Amazon EC2 tagor InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command . Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command . (string) -- :type DocumentName: string :param DocumentName: [REQUIRED] Required. The name of the Systems Manager document to execute. This can be a public document or a custom document. :type DocumentHash: string :param DocumentHash: The Sha256 or Sha1 hash created by the system when the document was created. Note Sha1 hashes have been deprecated. :type DocumentHashType: string :param DocumentHashType: Sha256 or Sha1. Note Sha1 hashes have been deprecated. :type TimeoutSeconds: integer :param TimeoutSeconds: If this time is reached and the command has not already started executing, it will not execute. :type Comment: string :param Comment: User-specified information about the command, such as a brief description of what the command should do. :type Parameters: dict :param Parameters: The required and optional parameters specified in the document being executed. (string) -- (list) -- (string) -- :type OutputS3Region: string :param OutputS3Region: (Optional) The region where the Amazon Simple Storage Service (Amazon S3) output bucket is located. The default value is the region where Run Command is being called. :type OutputS3BucketName: string :param OutputS3BucketName: The name of the S3 bucket where command execution responses should be stored. :type OutputS3KeyPrefix: string :param OutputS3KeyPrefix: The directory structure within the S3 bucket where the responses should be stored. :type MaxConcurrency: string :param MaxConcurrency: (Optional) The maximum number of instances that are allowed to execute the command at the same time. You can specify a number such as 10 or a percentage such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Executing a Command Using Systems Manager Run Command . :type MaxErrors: string :param MaxErrors: The maximum number of errors allowed without the command failing. When the command fails one more time beyond the value of MaxErrors, the systems stops sending the command to additional targets. You can specify a number like 10 or a percentage like 10%. The default value is 50. For more information about how to use MaxErrors, see Executing a Command Using Systems Manager Run Command . :type ServiceRoleArn: string :param ServiceRoleArn: The IAM role that Systems Manager uses to send notifications. :type NotificationConfig: dict :param NotificationConfig: Configurations for sending notifications. NotificationArn (string) --An Amazon Resource Name (ARN) for a Simple Notification Service (SNS) topic. Run Command pushes notifications about command status changes to this topic. NotificationEvents (list) --The different events for which you can receive notifications. These events include the following: All (events), InProgress, Success, TimedOut, Cancelled, Failed. To learn more about these events, see Setting Up Events and Notifications in the Amazon EC2 Systems Manager User Guide . (string) -- NotificationType (string) --Command: Receive notification when the status of a command changes. Invocation: For commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes. :rtype: dict :return: { 'Command': { 'CommandId': 'string', 'DocumentName': 'string', 'Comment': 'string', 'ExpiresAfter': datetime(2015, 1, 1), 'Parameters': { 'string': [ 'string', ] }, 'InstanceIds': [ 'string', ], 'Targets': [ { 'Key': 'string', 'Values': [ 'string', ] }, ], 'RequestedDateTime': datetime(2015, 1, 1), 'Status': 'Pending'|'InProgress'|'Success'|'Cancelled'|'Failed'|'TimedOut'|'Cancelling', 'StatusDetails': 'string', 'OutputS3Region': 'string', 'OutputS3BucketName': 'string', 'OutputS3KeyPrefix': 'string', 'MaxConcurrency': 'string', 'MaxErrors': 'string', 'TargetCount': 123, 'CompletedCount': 123, 'ErrorCount': 123, 'ServiceRole': 'string', 'NotificationConfig': { 'NotificationArn': 'string', 'NotificationEvents': [ 'All'|'InProgress'|'Success'|'TimedOut'|'Cancelled'|'Failed', ], 'NotificationType': 'Command'|'Invocation' } } } :returns: (string) -- (list) -- (string) -- """ pass
5,334,935
def test_extract(): """ Test extract packet information from raw bytes. """ # These raw bytes are from the ReadCentral packet built in test_build() raw_payload = bytearray(b'\x10\x00\x00\x00\x0c\x00\x10\x00\x0c\x00\x09\x00\x04\x00\x0a\x00\x0c\x00\x00\x00\x1c\x00' b'\x00\x00\x00\x0b\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00' b'\x06\x00\x04\x00\x08\x00\x00\x00\x03\x00\x40\x00') packet = Packet.extract(raw_payload) assert isinstance(packet, Packet) assert packet.controller_id == 0 assert packet.payload_type == Payload.Payload.ReadCentral assert packet.get('connection_handle') == 0x0040 assert packet.get('attribute_handle') == 0x0003 # Test with a wrong payload bytes (random) raw_payload2 = bytearray(b'\x10\x00\x00\x00\x0c\x00') with pytest.raises(Exception): Packet.extract(raw_payload2)
5,334,936
def _get_all_errors_if_unrecognized_properties(model: dict, props: list) -> iter: """Get error messages if the model has unrecognized properties.""" def get_error_if_property_is_unrecognized(key): if key not in props: return f"unrecognized field named '{key}' found in model '{model}'" return map(get_error_if_property_is_unrecognized, model.keys())
5,334,937
def clear_context(): """Helper to clear any thread local contexts.""" import talisker.sentry Context.clear() talisker.sentry.clear()
5,334,938
def no_holders(disk): """Return true if the disk has no holders.""" holders = os.listdir('/sys/class/block/' + disk + '/holders/') return len(holders) == 0
5,334,939
def write_data(data) -> None: """ write json data to the data file Args: data: json data """ with open(data_file, 'w') as data_io: json.dump(data, data_io)
5,334,940
def main(fn): """Call fn with command line arguments. Used as a decorator. The main decorator marks the function that starts a program. For example, @main def my_run_function(): # function body Use this instead of the typical __name__ == "__main__" predicate. """ if inspect.stack()[1][0].f_locals['__name__'] == '__main__': args = sys.argv[1:] # Discard the script name from command line fn(*args) # Call the main function return fn
5,334,941
def rows(f, null_value=None, columns=None, comments=True, header=False): """ Parses a tsv file. :param f: the file handler :param null_value: value to interpret as a None :param columns: which columns to return :param comments: skip comments ? :param header: has the tsv header ? :return: selected columns for each tsv line """ if comments: line = skip_comments_and_empty(f) # Discard comments else: line = f.readline() if len(line) == 0: return if header: hdr = dict([(name, index) for index, name in enumerate(line.rstrip("\n").split("\t"))]) if columns is not None: cols = [0] * len(columns) for index, name_or_index in enumerate(columns): if isinstance(name_or_index, basestring): if name_or_index in hdr: cols[index] = hdr[name_or_index] else: cols[index] = name_or_index columns = cols else: if not line.startswith("#"): yield row(line, null_value, columns) for line in f: if not line.startswith("#"): yield row(line, null_value, columns)
5,334,942
def test_list_non_positive_integer_min_length_4_nistxml_sv_iv_list_non_positive_integer_min_length_5_2(mode, save_output, output_format): """ Type list/nonPositiveInteger is restricted by facet minLength with value 10. """ assert_bindings( schema="nistData/list/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-list-nonPositiveInteger-minLength-5.xsd", instance="nistData/list/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-list-nonPositiveInteger-minLength-5-2.xml", class_name="NistschemaSvIvListNonPositiveIntegerMinLength5", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,334,943
def shellutil(device, facts): """ In the `facts` dictionary will set the following keys: * facts['hostname']: device hostname """ systime = device.rpc.get('Cisco-IOS-XR-shellutil-oper:system-time/uptime') uptime = _jsonpath(systime, 'data/system-time/uptime') facts['hostname'] = _jsonpath(uptime, 'host-name') ipdomain = device.rpc.get('Cisco-IOS-XR-ip-domain-oper:ip-domain') facts['domain'] = _jsonpath(ipdomain, 'data/ip-domain/vrfs/vrf/server/domain-name') facts['fqdn'] = '{host}.{domain}'.format( host=facts['hostname'], domain=facts['domain'] )
5,334,944
def get_get_single_endpoint_schema(class_name, id_field_where_type, response_schema): """ :param class_name: :param id_field_where_type: :param response_schema: """ return { "tags": [class_name], "description": f"Get a {class_name} model representation", "parameters": [ { "name": "id", "description": f"{class_name} identifier", "in": "path", "schema": {"type": "integer" if id_field_where_type == "int:" else "string"}, } ], "responses": { "200": {"description": f"{class_name} response model", "content": {"application/json": {"schema": response_schema}}}, "404": {"description": "Not found response model", "content": {"application/json": {"schema": not_found_swagger_schema}}}, "500": {"description": "Operation fail", "content": {"application/json": {"schema": error_swagger_schema}}}, }, }
5,334,945
def overlapping_template_matching( sequence, template_size: Optional[int] = None, blocksize: Optional[int] = None, matches_ceil: Optional[int] = None, ): """Overlapping matches to template per block is compared to expected result The sequence is split into blocks, where the number of overlapping patterns matches to the template in each block is found. This is referenced to the expected mean and variance in matches of a hypothetically truly random sequence. Parameters ---------- sequence : array-like with two distinct values Sequence containing 2 distinct elements template_size : ``int`` Size of the template to be generated blocksize : ``int`` Size of the blocks that partition the given sequence matches_ceil : ``int`` Group matches of this value and higher as one single tally Returns ------- result : ``OverlappingTemplateMatchingTestResult`` Dataclass that contains the test's statistic and p-value as well as other relevant information gathered. """ return _randtests.overlapping_template_matching( sequence, template_size=template_size, blocksize=blocksize, matches_ceil=matches_ceil, )
5,334,946
def _extract_options(config, options, *args): """Extract options values from a configparser, optparse pair. Options given on command line take precedence over options read in the configuration file. Args: config (dict): option values read from a config file through configparser options (optparse.Options): optparse 'options' object containing options values from the command line *args (str tuple): name of the options to extract """ extract = {} for key in args: if key not in args: continue extract[key] = config[key] option = getattr(options, key, None) if option is not None: extract[key] = option return extract
5,334,947
def _calc_fans(shape): """ :param shape: tuple with the shape(4D - for example, filters, depth, width, height) :return: (fan_in, fan_out) """ if len(shape) == 2: # Fully connected layer (units, input) fan_in = shape[1] fan_out = shape[0] elif len(shape) in {3, 4, 5}: # Convolutional kernals k_size = np.prod(shape[2:]) fan_in = k_size * shape[1] fan_out = k_size * shape[0] else: raise ValueError("Incompatible shape") return fan_in, fan_out
5,334,948
def train_collision(net: nn.Module, full_props: List[c.CollisionProp], args: Namespace) -> Tuple[int, float, int, float]: """ The almost completed skeleton of training Collision Avoidance/Detection networks using ART. :return: trained_epochs, train_time, certified, final accuracies """ logging.info(net) if args.reset_params: try: net.reset_params() except AttributeError: ''' This is possible when creating FFNN on the fly which doesn't have reset_params(). It's fine since such FFNN is using newly initialized weights. ''' pass props_dict = c.cluster_props(full_props) large_props = [ps[0] for ps in props_dict.values()] # pick the largest one for each safety margin base point large_props = AndProp(large_props[:args.n_props]) logging.info(f'Using {len(large_props.props)} largest properties.') v = Bisecter(args.dom, large_props) def run_abs(batch_abs_lb: Tensor, batch_abs_ub: Tensor, batch_abs_bitmap: Tensor) -> Tensor: """ Return the safety distances over abstract domain. """ batch_abs_ins = args.dom.Ele.by_intvl(batch_abs_lb, batch_abs_ub) batch_abs_outs = net(batch_abs_ins) return large_props.safe_dist(batch_abs_outs, batch_abs_bitmap) in_lb, in_ub = large_props.lbub(device) in_bitmap = large_props.bitmap(device) # already moved to GPU if necessary trainset = c.CollisionData.load(device) testset = trainset # there is only training set, following that in Ehlers 2017 start = timer() if args.no_abs or args.no_refine: curr_abs_lb, curr_abs_ub, curr_abs_bitmap = in_lb, in_ub, in_bitmap else: # refine it at the very beginning to save some steps in later epochs curr_abs_lb, curr_abs_ub, curr_abs_bitmap = v.split(in_lb, in_ub, in_bitmap, net, args.refine_top_k, # tiny_width=args.tiny_width, stop_on_k_all=args.start_abs_cnt) opti = Adam(net.parameters(), lr=args.lr) scheduler = args.scheduler_fn(opti) # could be None accuracies = [] # epoch 0: ratio best_metric = 1e9 if args.accu_bar else -1. best_params = None certified = False epoch = 0 while True: # first, evaluate current model logging.info(f'[{utils.time_since(start)}] After epoch {epoch}:') if not args.no_pts: logging.info(f'Loaded {trainset.real_len()} points for training.') if not args.no_abs: logging.info(f'Loaded {len(curr_abs_lb)} abstractions for training.') with torch.no_grad(): full_dists = run_abs(curr_abs_lb, curr_abs_ub, curr_abs_bitmap) worst_loss = full_dists.max() logging.info(f'min loss {full_dists.min()}, max loss {worst_loss}.') if worst_loss <= 0.: certified = True logging.info(f'All {len(curr_abs_lb)} abstractions certified.') else: _, worst_idx = full_dists.max(dim=0) logging.info(f'Max loss at LB: {curr_abs_lb[worst_idx]}, UB: {curr_abs_ub[worst_idx]}.') worst_props = large_props.props_of(curr_abs_bitmap[worst_idx]) logging.info(f'Max loss labels: {[p.larger_category for p in worst_props]}') accu = eval_test(net, testset) accuracies.append(accu) logging.info(f'Test set accuracy {accu}.') if args.accu_bar is None or args.no_abs: # pick the best accuracy model if accu > best_metric: best_metric = accu best_params = copy.deepcopy(net.state_dict()) else: if accu > args.accu_bar and worst_loss < best_metric: best_metric = worst_loss best_params = copy.deepcopy(net.state_dict()) # check termination if certified and epoch >= args.min_epochs: # all safe and sufficiently trained break if epoch >= args.max_epochs: break epoch += 1 certified = False # writting like this because ReduceLROnPlateau do not have get_lr() _param_lrs = [group['lr'] for group in opti.param_groups] curr_lr = sum(_param_lrs) / len(_param_lrs) logging.info(f'\n[{utils.time_since(start)}] Starting epoch {epoch} with lr = {curr_lr}:') absset = exp.AbsIns(curr_abs_lb, curr_abs_ub, curr_abs_bitmap) # dataset may have expanded, need to update claimed length to date if not args.no_pts: trainset.reset_claimed_len() if not args.no_abs: absset.reset_claimed_len() if (not args.no_pts) and (not args.no_abs): ''' Might simplify this to just using the amount of abstractions, is it unnecessarily complicated? ''' # need to enumerate both max_claimed_len = min(trainset.claimed_len, absset.claimed_len) # max_claimed_len = trainset.claimed_len trainset.claimed_len = max_claimed_len absset.claimed_len = max_claimed_len if not args.no_pts: # using drop_last may increase accuracy a bit, but decrease safety a bit? conc_loader = data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True) nbatches = len(conc_loader) conc_loader = iter(conc_loader) if not args.no_abs: # using drop_last may increase accuracy a bit, but decrease safety a bit? abs_loader = data.DataLoader(absset, batch_size=args.batch_size, shuffle=True, drop_last=True) nbatches = len(abs_loader) # doesn't matter rewriting len(conc_loader), they are the same abs_loader = iter(abs_loader) accu_total_loss = 0. safe_total_loss = 0. for i in range(nbatches): opti.zero_grad() batch_loss = 0. if not args.no_pts: batch_inputs, batch_labels = next(conc_loader) batch_outputs = net(batch_inputs) batch_loss += args.accuracy_loss(batch_outputs, batch_labels) accu_total_loss += batch_loss.item() if not args.no_abs: batch_abs_lb, batch_abs_ub, batch_abs_bitmap = next(abs_loader) batch_dists = run_abs(batch_abs_lb, batch_abs_ub, batch_abs_bitmap) safe_loss = batch_dists.mean() # L1, need to upgrade to batch_worsts to unlock loss other than L1 safe_loss *= args.safe_lambda safe_total_loss += safe_loss.item() batch_loss += safe_loss logging.debug(f'Epoch {epoch}: {i / nbatches * 100 :.2f}%. Batch loss {batch_loss.item()}') batch_loss.backward() nn.utils.clip_grad_norm_(net.parameters(), args.grad_clip) # doesn't seem to make a difference here.. opti.step() # inspect the trained weights after another epoch # meta.inspect_params(net.state_dict()) accu_total_loss /= nbatches safe_total_loss /= nbatches if scheduler is not None: scheduler.step(accu_total_loss + safe_total_loss) logging.info(f'[{utils.time_since(start)}] At epoch {epoch}: avg accuracy training loss {accu_total_loss}, ' + f'safe training loss {safe_total_loss}.') # Refine abstractions, note that restart from scratch may output much fewer abstractions thus imprecise. if (not args.no_refine) and len(curr_abs_lb) < args.max_abs_cnt: curr_abs_lb, curr_abs_ub, curr_abs_bitmap = v.split(curr_abs_lb, curr_abs_ub, curr_abs_bitmap, net, args.refine_top_k, # tiny_width=args.tiny_width, stop_on_k_new=args.refine_top_k) pass # summarize train_time = timer() - start if certified and args.n_props == 100: # the latest one is certified, use that final_accu = accuracies[-1] tot_certified = 500 else: # not yet having a certified model, thus pick the one with best accuracy so far and try certify it on all props if best_params is not None: logging.info(f'Post certify using best metric {best_metric}') net.load_state_dict(best_params) final_accu = eval_test(net, testset) tot_certified = 0 for i, (k, ps) in enumerate(props_dict.items()): assert len(ps) == 5 for j, p in enumerate(ps): tmp_v = Bisecter(args.dom, p) in_lb, in_ub = p.lbub(device) if tmp_v.try_certify(in_lb, in_ub, None, net, args.batch_size, timeout_sec=args.certify_timeout): tot_certified += (5 - j) logging.info(f'Certified prop based at {k} using {j}th eps, now {tot_certified}/{5*(i+1)}.') break pass serial_net = nn.Sequential(*[layer.export() for layer in net]) # save exported network in serialization torch.save(serial_net.cpu(), Path(RES_DIR, f'trained-{tot_certified}-{final_accu:.4f}-model.pt')) accuracies = [f'{v:.4f}' for v in accuracies] logging.info(f'Accuracy at every epoch: {accuracies}') logging.info(f'After {epoch} epochs / {utils.pp_time(train_time)}, ' + f'eventually the trained network got certified at {tot_certified} / 500 props, ' + f'with {final_accu:.4f} accuracy on test set.') return epoch, train_time, tot_certified, final_accu
5,334,949
def statRobustness(compromised, status): """produce data for robustness stats""" rob = {0:{"empty":0, "login based":0, "top 10 common":0, "company name":0}, 1:{"top 1000 common":0, "login extrapolation":0, "company context related":0, "4 char or less":0}, 2:{"top 1M common":0, "6 char or less":0, "2 charsets or less":0}, 3:{"present in attack wordlist":0, "present in locale attack wordlist":0, "leaked":0, "undetermined":0}} for acc in compromised: if status == 'all' or 'account_disabled' not in compromised[acc]["status"]: rob[compromised[acc]["robustness"]][compromised[acc]["reason"]] += 1 return rob
5,334,950
def test_create_datetime_index(): """Tests ability to create an array of datetime objects from distinct arrays of input paramters""" arr = np.ones(4) dates = pytime.create_datetime_index(year=2012*arr, month=2*arr, day=28*arr, uts=np.arange(0, 4)) assert dates[0] == pds.datetime(2012, 2, 28) assert dates[-1] == pds.datetime(2012, 2, 28, 0, 0, 3) assert len(dates) == 4
5,334,951
def ns_diff(newstr, oldstr): """ Calculate the diff. """ if newstr == STATUS_NA: return STATUS_NA # if new is valid but old is not we should return new if oldstr == STATUS_NA: oldstr = '0' new, old = int(newstr), int(oldstr) return '{:,}'.format(max(0, new - old))
5,334,952
def get_crab(registry): """ Get the Crab Gateway :rtype: :class:`crabpy.gateway.crab.CrabGateway` # argument might be a config or a request """ # argument might be a config or a request regis = getattr(registry, 'registry', None) if regis is None: regis = registry return regis.queryUtility(ICrab)
5,334,953
def create_meal(): """Create a new meal. --- tags: - meals parameters: - in: body name: body schema: id: Meal properties: name: type: string description: the name of the meal description: type: string description: the description of the meal price: type: number format: float description: the cost of the meal scheduled_for: type: string format: date-time description: the date time that the meal is scheduled for responses: 201: description: Meal was successfully created schema: id: Meal 401: description: The user is not authenticated 422: description: The data failed validation 428: description: The current user has not added their address """ if current_user.location is None: raise PreconditionRequired(Errors.LOCATION_NOT_CREATED_YET) meal_data = MEAL_SCHEMA.load(request.json).data if 'tags' in meal_data: tags = meal_data.pop('tags') meal = Meal.create(location_id=current_user.location.id, **meal_data) meal.tags = tags else: meal = Meal.create(location_id=current_user.location.id, **meal_data) return jsonify(data=MEAL_SCHEMA.dump(meal).data, message=Success.MEAL_CREATED), 201
5,334,954
def buscaBinariaIterativa(alvo, array): """ Retorna o índice do array em que o elemento alvo está contido. Considerando a coleção recebida como parâmetro, identifica e retor- na o índice em que o elemento especificado está contido. Caso esse elemento não esteja presente na coleção, retorna -1. Utiliza uma abordagem iterativa. Parameters ---------- alvo : ? Elemento cujo índice está sendo buscado array : list A lista cujo índice do elemento deve ser identificado Return ------ index : int O índice em que o elemento alvo está armazenado """ min = 0 max = len(array) - 1 while (min <= max): mid = (min + max) // 2 if (array[mid] == alvo): return mid else: if (array[mid] < alvo): min = mid + 1 else: max = mid - 1 return -1
5,334,955
async def test_percent_conv(): """Test percentage conversion.""" assert util.percent_conv(0.12) == 12.0 assert util.percent_conv(0.123) == 12.3
5,334,956
def test_url_to_widget_info_regex(): """Test regex for parsing the source object name, source object id, widget name, mapped object name, mapped object id from URL.""" urls = [ ("https://grc-test.appspot.com/dashboard/", "dashboard", "", "", "", ""), ("https://grc-test.appspot.com/dashboard#data_asset_widget/", "dashboard", "", "data_asset_widget", "", ""), ("https://grc-test.appspot.com/data_assets/90#/clause/90/", "data_assets", 90, "info_widget", "clause", 90), ("https://grc-test.appspot.com/data_assets/90#/", "data_assets", 90, "info_widget", "", ""), ("https://grc-test.appspot.com/data_assets/90/", "data_assets", 90, "info_widget", "", ""), ("https://grc-test.appspot.com/data_assets/90#data_asset_widget/", "data_assets", 90, "data_asset_widget", "", ""), ("https://grc-test.appspot.com/data_assets/90#info_widget/", "data_assets", 90, "info_widget", "", ""), ("https://grc-test.appspot.com/data_assets/107/", "data_assets", 107, "info_widget", "", ""), ("https://grc-test.appspot.com/data_assets/107#task_group_widget/", "data_assets", 107, "task_group_widget", "", ""), (("https://grc-test.appspot.com/" "data_assets/107#info_widget/workflow/107/"), "data_assets", 107, "info_widget", "workflow", 107), ("https://grc-test.appspot.com/data_assets/107#/data_asset/107/", "data_assets", 107, "info_widget", "data_asset", 107), ] for (url, expected_source_object_name, expected_source_object_id, expected_widget_name, expected_mapped_object_name, expected_mapped_object_id) in urls: (source_object_name, source_object_id, widget_name, mapped_object_name, mapped_object_id) = re.search(regex.URL_WIDGET_INFO, url).groups() if source_object_id: source_object_id = int(source_object_id) if mapped_object_id: mapped_object_id = int(mapped_object_id) if widget_name == "" and source_object_name != "dashboard": widget_name = "info_widget" # if '#' in URL without name assert ( expected_source_object_name, expected_source_object_id, expected_widget_name, expected_mapped_object_name, expected_mapped_object_id) == ( source_object_name, source_object_id, widget_name, mapped_object_name, mapped_object_id)
5,334,957
def setHomePath(homePath): """Seth the env variable HOME""" # Do some sanity/defensive stuff if homePath == None: raise Exception("homePath=None.") elif len(homePath) == 0: raise Exception("len(homePath)=0.") # We do some special stuff on windows if platform.system() == "Windows": os.environ["HOME"] = homePath if "USERPROFILE" in os.environ: os.environ["USERPROFILE"] = os.environ["HOME"] if "HOMEPATH" in os.environ: os.environ["HOMEPATH"] = os.environ["HOME"] if not os.path.isdir(os.environ["HOME"]): raise Exception(os.environ["HOME"] + " path not found.") else: # Not windows set HOME env var os.environ["HOME"] = homePath if not os.path.isdir(os.environ["HOME"]): raise Exception(os.environ["HOME"] + " path not found.")
5,334,958
def sync_out_streams(): """Just flush all stdin and stderr to make streams go in sync""" sys.stderr.flush() sys.stdout.flush()
5,334,959
def empiricalcdf(data, method='Hazen'): """Return the empirical cdf. Methods available: Hazen: (i-0.5)/N Weibull: i/(N+1) Chegodayev: (i-.3)/(N+.4) Cunnane: (i-.4)/(N+.2) Gringorten: (i-.44)/(N+.12) California: (i-1)/N Where i goes from 1 to N. """ i = np.argsort(np.argsort(data)) + 1. N = len(data) method = method.lower() if method == 'hazen': cdf = (i-0.5)/N elif method == 'weibull': cdf = i/(N+1.) elif method == 'california': cdf = (i-1.)/N elif method == 'chegodayev': cdf = (i-.3)/(N+.4) elif method == 'cunnane': cdf = (i-.4)/(N+.2) elif method == 'gringorten': cdf = (i-.44)/(N+.12) else: raise ValueError('Unknown method. Choose among Weibull, Hazen,' 'Chegodayev, Cunnane, Gringorten and California.') return cdf
5,334,960
def CCT_to_xy_Kang2002(CCT): """ Returns the *CIE XYZ* tristimulus values *CIE xy* chromaticity coordinates from given correlated colour temperature :math:`T_{cp}` using *Kang et al. (2002)* method. Parameters ---------- CCT : numeric or array_like Correlated colour temperature :math:`T_{cp}`. Returns ------- ndarray *CIE xy* chromaticity coordinates. Raises ------ ValueError If the correlated colour temperature is not in appropriate domain. References ---------- :cite:`Kang2002a` Examples -------- >>> CCT_to_xy_Kang2002(6504.38938305) # doctest: +ELLIPSIS array([ 0.313426 ..., 0.3235959...]) """ CCT = as_float_array(CCT) if np.any(CCT[np.asarray(np.logical_or(CCT < 1667, CCT > 25000))]): usage_warning(('Correlated colour temperature must be in domain ' '[1667, 25000], unpredictable results may occur!')) x = np.where( CCT <= 4000, -0.2661239 * 10 ** 9 / CCT ** 3 - 0.2343589 * 10 ** 6 / CCT ** 2 + 0.8776956 * 10 ** 3 / CCT + 0.179910, -3.0258469 * 10 ** 9 / CCT ** 3 + 2.1070379 * 10 ** 6 / CCT ** 2 + 0.2226347 * 10 ** 3 / CCT + 0.24039, ) cnd_l = [CCT <= 2222, np.logical_and(CCT > 2222, CCT <= 4000), CCT > 4000] i = -1.1063814 * x ** 3 - 1.34811020 * x ** 2 + 2.18555832 * x - 0.20219683 j = -0.9549476 * x ** 3 - 1.37418593 * x ** 2 + 2.09137015 * x - 0.16748867 k = 3.0817580 * x ** 3 - 5.8733867 * x ** 2 + 3.75112997 * x - 0.37001483 y = np.select(cnd_l, [i, j, k]) xy = tstack([x, y]) return xy
5,334,961
def tel_information(tel_number): """ check and return a dictionary that has element of validation and operator of number if number is not valid it return validation = 'False' and operator = 'None' """ validation = is_valid(tel_number) operator = tel_operator(tel_number) info_dict = {'validation' : validation, 'operator' : operator} return (info_dict)
5,334,962
def load_file(file_name: pathlib.Path) -> Dict[str, Any]: """ Load JSON or YAML file content into a dict. This is not intended to be the default load mechanism. It should only be used if a OSCAL object type is unknown but the context a user is in. """ content_type = FileContentType.to_content_type(file_name.suffix) with file_name.open('r', encoding=const.FILE_ENCODING) as f: if content_type == FileContentType.YAML: return yaml.load(f, yaml.FullLoader) elif content_type == FileContentType.JSON: return json.load(f)
5,334,963
def DocumentPlugin(): """ :return: document plugin class """ from tests.test_plugins.documentations_plugin import DocumentPlugin return DocumentPlugin
5,334,964
def read_in(file_index, normalized): """ Reads in a file and can toggle between normalized and original files :param file_index: patient number as string :param normalized: boolean that determines whether the files should be normalized or not :return: returns npy array of patient data across 4 leads """ if normalized == 1: data = np.load(os.path.join("Working_Data", "Normalized_Fixed_Dim_HBs_Idx" + file_index + ".npy")) else: data = np.load(os.path.join("Working_Data", "Fixed_Dim_HBs_Idx" + file_index + ".npy")) return data
5,334,965
def emitfuncs(): """emit functions to access node fields""" printf("int %Pop_label(NODEPTR_TYPE p) {\n" "%1%Passert(p, PANIC(\"NULL tree in %Pop_label\\n\"));\n" "%1return OP_LABEL(p);\n}\n\n"); printf("STATE_TYPE %Pstate_label(NODEPTR_TYPE p) {\n" "%1%Passert(p, PANIC(\"NULL tree in %Pstate_label\\n\"));\n" "%1return STATE_LABEL(p);\n}\n\n"); printf("NODEPTR_TYPE %Pchild(NODEPTR_TYPE p, int index) {\n" "%1%Passert(p, PANIC(\"NULL tree in %Pchild\\n\"));\n" "%1switch (index) {\n%1case 0:%1return LEFT_CHILD(p);\n" "%1case 1:%1return RIGHT_CHILD(p);\n%1}\n" "%1%Passert(0, PANIC(\"Bad index %%d in %Pchild\\n\", index));\n%1return 0;\n}\n\n")
5,334,966
def remove_extended(text): """ remove Chinese punctuation and Latin Supplement. https://en.wikipedia.org/wiki/Latin-1_Supplement_(Unicode_block) """ # latin supplement: \u00A0-\u00FF # notice: nbsp is removed here lsp_pattern = re.compile(r'[\x80-\xFF]') text = lsp_pattern.sub('', text) # chinese special character # chc_pattern = re.compile(r'[\r\t\n\.\!\/_,$%^*(+\"\')]|[+——()?【】“”!,。?、~@#¥%……&*()]') # text = chc_pattern.sub('',text) return text
5,334,967
def appointment_letter(request, tid): """Display the appointment letter.""" paf = get_object_or_404(Operation, pk=tid) return render( request, 'transaction/appointment_letter.html', {'paf': paf}, )
5,334,968
def sample(): """ Sample command """
5,334,969
def balance_targets(sentences: Iterable[Sentence], method: str = "downsample_o_cat", shuffle=True) \ -> Iterable[Sentence]: """ Oversamples and/or undersamples training sentences by a number of targets. This is useful for linear shallow classifiers, that are prone to simply overfit the most-occurring category. See the source code for a documentation of resample methods logic :param shuffle: whether to shuffle the output :param sentences: sentences to resample :param method: resample method, one of {downsample_o_cat, downsample_o_pzk_cats, all_upsampled, remove_o_cat} :return: resampled, possibly shuffled input sentences """ import random # take the second-top count from categories apart from "Other" targets = [s.label for s in sentences] second_top_count = sorted([sum([target == cat for target in targets]) for cat in set(targets) - {"O"}])[-2] if method == "downsample_o_cat": # downsample "other" category to second-most-occurring category count out_sentences = list((random.sample([s for s in sentences if s.label == "O"], second_top_count) + [s for s in sentences if s.label != "O"])) elif method == "downsample_o_pzk_cats": # downsample "other" + "P_ZK" (experience description) category to third-most-occurring category count out_sentences = list((random.sample([s for s in sentences if s.label == "O"], second_top_count) + [s for s in sentences if s.label != "O"])) out_sentences = list((random.sample([s for s in out_sentences if s.label == "P_ZK"], second_top_count) + [s for s in out_sentences if s.label != "P_ZK"])) elif method == "all_upsampled": # upsample all categories to a count of most-occurring one (presumably "other" category) from itertools import chain out_sentences = list(itertools.chain(*[random.choices([s for s in sentences if s.label == cat], k=second_top_count) for cat in set(targets)])) elif method == "remove_o_cat": # completely remove sentences of "other" category out_sentences = [s for s in sentences if s.label != "O"] else: out_sentences = sentences if shuffle: # random shuffle output sentences random.shuffle(out_sentences) return out_sentences
5,334,970
def create_subscription(post, user, sub_type=None, update=False): """ Creates subscription to a post. Returns a list of subscriptions. """ subs = Subscription.objects.filter(post=post.root, user=user) sub = subs.first() default = Subscription.TYPE_MAP.get(user.profile.message_prefs, Subscription.LOCAL_MESSAGE) empty = sub_type is None # Get the current sub type from what's given or the existing sub sub_type = None if empty else sub_type # No type has been given so default sub_type = sub_type or default # Ensure the sub type is not set to something wrote if sub and update: # Update an existing subscription sub.type = sub_type sub.save() else: # Drop all existing subscriptions for the user by default. subs.delete() Subscription.objects.create(post=post.root, user=user, type=sub_type) # Recompute post subscription. subs_count = Subscription.objects.filter(post=post.root).exclude(type=Subscription.NO_MESSAGES).count() # Update root subscription counts. Post.objects.filter(pk=post.root.pk).update(subs_count=subs_count)
5,334,971
def discriminator_loss(real_output, fake_output, batch_size): """ Computes the discriminator loss after training with HR & fake images. :param real_output: Discriminator output of the real dataset (HR images). :param fake_output: Discriminator output of the fake dataset (SR images). :param batch_size: Batch size. :return: Discriminator loss. """ real_loss = tf.nn.compute_average_loss(cross_entropy(tf.ones_like(real_output), real_output), global_batch_size=batch_size) fake_loss = tf.nn.compute_average_loss(cross_entropy(tf.zeros_like(fake_output), fake_output), global_batch_size=batch_size) total_loss = real_loss + fake_loss return total_loss
5,334,972
def NNx(time, IBI, ibimultiplier=1000, x=50): """ computes Heart Rate Variability metrics NNx and pNNx Args: time (pandas.DataFrame column or pandas series): time column IBI (pandas.DataFrame column or pandas series): column with inter beat intervals ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1 x (IntegerType): default = 50; set the number of times successive heartbeat intervals exceed 'x' ms Returns: NNx (FloatType): the number of times successive heartbeat intervals exceed x ms pNNx (FloatType): the proportion of NNx divided by the total number of NN (R-R) intervals. """ time = time ibi = IBI*ibimultiplier differences = abs(np.diff(ibi)) n = np.sum(differences > x) p = (n / len(differences)) * 100 return (round(n * 10) / 10), (round(p * 10) / 10)
5,334,973
def datetimeobj_YmdHMS(value): """Convert timestamp string to a datetime object. Timestamps strings like '20130618120000' are able to be converted by this function. Args: value: A timestamp string in the format '%Y%m%d%H%M%S'. Returns: A datetime object. Raises: ValueError: If timestamp is invalid. Note: The timezone is assumed to be UTC/GMT. """ i = int(value) S = i M = S//100 H = M//100 d = H//100 m = d//100 Y = m//100 return datetime.datetime( Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, tzinfo=TZ_GMT )
5,334,974
def make_roc_curves(args): """ NAME make_roc_curves PURPOSE Given some collection pickles, this script produces the one roc plot that will be put someplace in the SW system paper. COMMENTS FLAGS -h Print this message INPUTS collection pickles colors for the lines line styles labels OUTPUTS roc png plot EXAMPLE BUGS - Code is not tested yet... AUTHORS This file is part of the Space Warps project, and is distributed under the MIT license by the Space Warps Science Team. http://spacewarps.org/ HISTORY 2013-07-01 started Davis (KIPAC) 2014-09-06 updated to only use collections. """ # ------------------------------------------------------------------ flags = {'output_directory': './', 'collections': [], 'labels': [], 'line_styles': [], 'colors': []} for arg in args: if arg in flags: flags[arg] = args[arg] else: print "make_roc_curves: unrecognized flag ",arg print flags # check that collections etc are equal length if len(flags['collections']) != len(flags['labels']): raise Exception('Collections and labels must be same length!') if len(flags['collections']) != len(flags['line_styles']): raise Exception('Collections and line_styles must be same length!') if len(flags['collections']) != len(flags['colors']): raise Exception('Collections and colors must be same length!') n_min = 0 output_directory = flags['output_directory'] collections = flags['collections'] fig, ax = plt.subplots(figsize=(10,8)) for i, collection_path in enumerate(collections): # ------------------------------------------------------------------ # Read in collection object: collection = swap.read_pickle(collection_path, 'collection') print "make_roc_curves: collection {0} subject numbers: {1}".format(i, len(collection.list())) # ------------------------------------------------------------------ # set up data for roc plots y_true = np.array([]) y_score = np.array([]) for ID in collection.list(): subject = collection.member[ID] if (subject.category == 'training'): n_assessment = len(subject.annotationhistory['ItWas']) if (n_assessment > n_min): truth = {'LENS': 1, 'NOT': 0}[subject.truth] y_true = np.append(y_true, truth) y_score = np.append(y_score, subject.mean_probability) fpr, tpr, threshold = roc_curve(y_true, y_score) color = flags['colors'][i] label = flags['labels'][i] line_style = flags['line_styles'][i] ax.plot(fpr, tpr, color, label=label, linestyle=line_style, linewidth=3) ax.set_xlim(0, 0.1) ax.set_ylim(0.8, 1) ax.set_xlabel('False Positive Rate') ax.set_ylabel('True Positive Rate') plt.legend(loc='lower right') pngfile = output_directory + 'roc_curve.png' plt.savefig(pngfile) print "make_roc_curves: roc curve saved to "+pngfile # ------------------------------------------------------------------ print "make_roc_curves: all done!" return
5,334,975
def drawBoundingBoxes(ax, x, y, z, w, l, h, r, col='b', linewidth=2): """ Draws bounding boxe lines to given axis Params: (x, y, z): center point coordinates of an object (w, l, h): width, length and height of the bounding box / object r: rotation in radians """ # Do this, because we have center point l = l / 2.0 w = w / 2.0 # Calculate corner locations with rotation x1 = x + (w * math.cos(r) + l * math.sin(r)) y1 = y + (-w * math.sin(r) + l * math.cos(r)) x2 = x + (-w * math.cos(r) + l * math.sin(r)) y2 = y + (+w * math.sin(r) + l * math.cos(r)) x3 = x + (-w * math.cos(r) - l * math.sin(r)) y3 = y + (w * math.sin(r) - l * math.cos(r)) x4 = x + (w * math.cos(r) - l * math.sin(r)) y4 = y + (-w * math.sin(r) - l * math.cos(r)) # Bottom rectangle ax.plot3D([x3, x4], [y3, y4], [z, z], col, linewidth=2, alpha=0.8) ax.plot3D([x2, x1], [y2, y1], [z, z], col, linewidth=2, alpha=0.8) ax.plot3D([x3, x2], [y3, y2], [z, z], col, linewidth=2, alpha=0.8) ax.plot3D([x4, x1], [y4, y1], [z, z], col, linewidth=2, alpha=0.8) # Top rectangle ax.plot3D([x3, x4], [y3, y4], [z+h, z+h], col, linewidth=2, alpha=0.8) ax.plot3D([x2, x1], [y2, y1], [z+h, z+h], col, linewidth=2, alpha=0.8) ax.plot3D([x3, x2], [y3, y2], [z+h, z+h], col, linewidth=2, alpha=0.8) ax.plot3D([x4, x1], [y4, y1], [z+h, z+h], col, linewidth=2, alpha=0.8) # Vertical lines ax.plot3D([x1, x1], [y1, y1], [z, z+h], col, linewidth=2, alpha=0.8) ax.plot3D([x2, x2], [y2, y2], [z, z+h], col, linewidth=2, alpha=0.8) ax.plot3D([x3, x3], [y3, y3], [z, z+h], col, linewidth=2, alpha=0.8) ax.plot3D([x4, x4], [y4, y4], [z, z+h], col, linewidth=2, alpha=0.8)
5,334,976
def home(): """List devices.""" devices = Device.query.all() return render_template('devices/home.html', devices=devices)
5,334,977
def translate_value(document_field, form_value): """ Given a document_field and a form_value this will translate the value to the correct result for mongo to use. """ value = form_value if isinstance(document_field, ReferenceField): value = document_field.document_type.objects.get(id=form_value) if form_value else None return value
5,334,978
def error_404(error): """Custom 404 Error Page""" return render_template("error.html", error=error), 404
5,334,979
def sum_num(n1, n2): """ Get sum of two numbers :param n1: :param n2: :return: """ return(n1 + n2)
5,334,980
def listb(containerclient): """ cette fonction retourne la liste des blobs contenu un container elle prend en paramètre un container """ blob_list=containerclient.list_blobs() for blob in blob_list: print(blob.name)
5,334,981
def intensity_slice_volume(kernel_code, image_variables, g_variables, blockdim, bound_box, vol_dim, voxel_size, poses, out_points=False): """ Function that slices an intensity volume with fan shaped sections section defined by poses of a curvilinear array :param kernel_code: CUDA C++ kernel code to compile :param image_variables: image dimensioning variable list :param g_variables: All preallocated GPU variables as described in the preallocation function. A list with the following indexes: 0 - fan positions in 2D 1 - fan positions in 3D 2 - intensities mapped in fan positions 3 - the target intensity volume 4 - the output images in image space 5 - the 2D fan mask outline :param blockdim: block dimensions for CUDA kernels :param bound_box: bounding box of target volume :param vol_dim: 3D intensity volume dimensions :param voxel_size: voxel_size of the volume :param poses: input set of poses :param out_points: bool to get fan positions or not :return: positions in 3D, stack of resulting images """ # First, compile kernel code with SourceModule cuda_modules = SourceModule(kernel_code) # Get image variables from input fan_parameters = image_variables[0] slice_dim = image_variables[1] image_dim = image_variables[2] pixel_size = image_variables[3] # Define voxel size for intersection of intensity volume voxel_size = voxel_size.astype(np.float32) # Get size of one image, useful to get array of images im_size = image_dim[0] * image_dim[1] # Get block and grid dimensions as int blockdim_x = int(blockdim[0]) blockdim_y = int(blockdim[1]) griddim_x = int(slice_dim[0] / blockdim_x) griddim_y = int(slice_dim[1] / blockdim_y) image_num = int(slice_dim[2]) # Convert poses to 1D array to be input in a kernel pose_array = np.zeros((1, 9 * image_num)).astype(np.float32) # And an array to offset fan position per image plane offset_array = np.zeros((1, 3 * image_num)).astype(np.float32) for p_ind in range(image_num): pose = poses[:, 4 * p_ind:4 * (p_ind + 1)] # Allocate the pose pose_array[0, 9 * p_ind:9 * (p_ind + 1)] = \ np.hstack((pose[0, 0:2], pose[0, 3], pose[1, 0:2], pose[1, 3], pose[2, 0:2], pose[2, 3])) # Allocate the offset offset_array[0, 3 * p_ind:3 * (p_ind + 1)] = pose[0:3, 1] # 1-Run position computation kernel, acts on index 0 and 1 of # the gpu variables, get kernel transform_kernel = cuda_modules.get_function("transform") # Then run it transform_kernel(g_variables[1], g_variables[0], drv.In(pose_array), drv.In(offset_array), drv.In(fan_parameters), np.int32(image_num), block=(blockdim_x, blockdim_y, 3), grid=(griddim_x, griddim_y, image_num)) # Collect the output to a CPU array positions_3d = np.empty((1, np.prod(slice_dim) * 3), dtype=np.float32) # In case points are to be used or visualised (with out_points as True) if out_points is True: g_variables[1].get(positions_3d) positions_3d = positions_3d.reshape([3, np.prod(slice_dim)]).T # 2-Next step, run slicing kernel, where intensity values are # placed in the positions. Define volume dimensions intensity_volume_dims = np.hstack((bound_box[0, :], vol_dim[0], vol_dim[1], vol_dim[2])).astype(np.float32) # Call kernel from file slice_kernel = cuda_modules.get_function('weighted_slice') slice_kernel(g_variables[2], g_variables[1], g_variables[3], drv.In(intensity_volume_dims), drv.In(voxel_size), drv.In(slice_dim), block=(blockdim_x, blockdim_y, 1), grid=(griddim_x, griddim_y, image_num)) # 3-Map pixels to fan like image # Define bounds of image output in 2d coordinates as float image_bounding_box = np.array([-image_dim[0] * pixel_size[0]/2*1000, 0, image_dim[0], image_dim[1]]).astype(np.float32) # Allocate output images, the intensity image as a float, and the # fan outline as an int. These must be in CPU. intensity_images = np.empty((1, np.prod(image_dim)), dtype=np.float32) masks = np.empty((1, np.prod(image_dim)), dtype=np.int32) # Call kernel from file map_kernel = cuda_modules.get_function('intensity_map_back') # Then run it, multiplying coordinates value by a 1000, in order # to avoid sampling errors map_kernel(g_variables[4], g_variables[5], g_variables[2], g_variables[0]*1000, drv.In(slice_dim), drv.In(image_bounding_box), drv.In(pixel_size*1000), block=(blockdim_x, blockdim_y, 1), grid=(griddim_x, griddim_y, image_num)) # Create a volume with generated images intensity_image_array = np.zeros((image_dim[1], image_dim[0], image_dim[2])).astype(np.float32) # Gather the results g_variables[4].get(intensity_images) g_variables[4].fill(0) g_variables[5].get(masks) g_variables[5].fill(0) for plane in range(image_num): # Get image and reshape it current_image = intensity_images[0, im_size*plane: im_size*(plane+1)] # Get masks that weight values current_mask = masks[0, im_size*plane: im_size*(plane + 1)] # Normalise by amount of points added to image output, using the # the occurrences output by mask, ignoring divide error with np.errstate(divide='ignore'): current_image = np.divide(current_image, current_mask) current_image = current_image.reshape(image_dim[0], image_dim[1]).T # Scale intensities, by setting nan values to minimum nan_indexes = np.where(np.isnan(current_image)) current_image[nan_indexes] = np.nanmin(current_image) # Allocate to output intensity_image_array[:, :, plane] = current_image # Output a stack of images, where each z-slice has a plane, # and the corresponding 3D positions return positions_3d, intensity_image_array
5,334,982
def mean_filter(img, kernel_size): """take mean value in the neighbourhood of center pixel. """ return cv2.blur(img, ksize=kernel_size)
5,334,983
def main(): """Return the module instance.""" return AnsibleModule( argument_spec=dict( data=dict(default=None), path=dict(default=None, type=str), file=dict(default=None, type=str), ) )
5,334,984
def load_RegNetwork_interactions( root_dir: Optional[Path] = None, ) -> pd.DataFrame: """ Loads RegNetwork interaction datafile. Downloads the file first if not already present. """ file = _download_RegNetwork(root_dir) return pd.read_csv( file, delimiter="\t", header=None, names=["g1", "id1", "g2", "id2"] )
5,334,985
def hxlspec_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr): """ Run hxlspec with command-line arguments. Args: args (list): a list of command-line arguments stdin (io.IOBase): alternative standard input (mainly for testing) stdout (io.IOBase): alternative standard output (mainly for testing) stderr (io.IOBase): alternative standard error (mainly for testing) """ def get_json (url_or_filename): if not url_or_filename: return json.load(stdin) if re.match(r'^(?:https?|s?ftp)://', url_or_filename.lower()): headers = make_headers(args) response = requests.get(url_or_filename, verify=(not args.ignore_certs), headers=headers) response.raise_for_status() return response.json() else: with open(url_or_filename, "r") as input: return json.load(input) parser = make_args('Process a HXL JSON spec') args = parser.parse_args(args) do_common_args(args) spec = get_json(args.infile) source = hxl.io.from_spec(spec, allow_local_ok=True) with make_output(args, stdout) as output: hxl.io.write_hxl(output.output, source, show_tags=not args.strip_tags)
5,334,986
def search_restaurant(house_list, filter_by_distance=True, search_range=3, sort=True, top_k=100, save_csv=False, offline_save=True): """Scraping restaurant information from opentable.com and calculate restaurant score for each house :param house_list: All of the house information in a list :param filter_by_distance: Whether to filter restaurants by their distance to each house :param search_range: The distance range for filtering by distance, IN MILE !! :param sort: For each restaurant dataframe, whether sort the restaurants by their scores :param top_k: For each restaurant dataframe, only return restaurants with highest k score :param save_csv: Whether save each restaurant dataframes as csv file :param offline_save: Whether save restaurant score of each house in a text file for offline mode """ # house_list is the list generated by match_house function scores = [] house_names = [] rest_dfs = [] csv_path = r"restaurant_csv" check_path(csv_path) time.sleep(0.05) house_gen = tqdm(house_list, desc="Searching for restaurants", file=sys.stdout) for house in house_gen: house_latitude, house_longitude = house[1], house[2] house_name = house[0] csv_name = "%s_restaurants.csv" % house_name save_path = csv_path + "/" + csv_name rest_score, rest_df = generate_restaurant_score(house_latitude, house_longitude, filter_by_distance, search_range, sort, top_k, save_csv, save_path) scores.append(rest_score) house_names.append(house_name) rest_dfs.append(rest_df) scores = np.array(scores) scores = normalize(scores) # The restaurant part of score for each house with house name is in `rest_scores` rest_scores = list(zip(house_names, list(scores))) offline_path = "restaurant_score_offline.txt" if offline_save: write_content = ["%s %f" % (house_name, score) for house_name, score in rest_scores] with open(offline_path, "w") as file: file.writelines(write_content) return rest_scores, rest_dfs
5,334,987
def get_model_config(model, dataset): """Map model name to model network configuration.""" if 'cifar10' == dataset.name: return get_cifar10_model_config(model) if model == 'vgg11': mc = vgg_model.Vgg11Model() elif model == 'vgg16': mc = vgg_model.Vgg16Model() elif model == 'vgg19': mc = vgg_model.Vgg19Model() elif model == 'lenet': mc = lenet_model.Lenet5Model() elif model == 'googlenet': mc = googlenet_model.GooglenetModel() elif model == 'overfeat': mc = overfeat_model.OverfeatModel() elif model == 'alexnet': mc = alexnet_model.AlexnetModel() elif model == 'trivial': mc = trivial_model.TrivialModel() elif model == 'inception3': mc = inception_model.Inceptionv3Model() elif model == 'inception4': mc = inception_model.Inceptionv4Model() elif model == 'resnet50' or model == 'resnet50_v2': mc = resnet_model.ResnetModel(model, (3, 4, 6, 3)) elif model == 'resnet101' or model == 'resnet101_v2': mc = resnet_model.ResnetModel(model, (3, 4, 23, 3)) elif model == 'resnet152' or model == 'resnet152_v2': mc = resnet_model.ResnetModel(model, (3, 8, 36, 3)) else: raise KeyError('Invalid model name \'%s\' for dataset \'%s\'' % (model, dataset.name)) return mc
5,334,988
def flacwrite(x, fs, bits, flacfile, normalize=False, compress=True): """ まずwavで吐く→flacをコマンド呼び出しして処理 """ open(flacfile, 'w').close() # lock file wavfile = chext(flacfile, "wav") wavwrite(x, fs, bits, wavfile, normalize, compress) command = [FLACPATH, "--delete-input-file", wavfile, "-f", "-o", flacfile] #print command ret = perform(command) print ret return
5,334,989
def dev(session: nox.Session) -> None: """ Sets up a python development environment for the project. This session will: - Create a python virtualenv for the session - Install the `virtualenv` cli tool into this environment - Use `virtualenv` to create a global project virtual environment - Invoke the python interpreter from the global project environment to install the project and all it's development dependencies. """ session.install("virtualenv") # VENV_DIR here is a pathlib.Path location of the project virtualenv # e.g. .venv session.run("virtualenv", os.fsdecode(VENV_DIR), silent=True) python = os.fsdecode(VENV_DIR.joinpath("bin/python")) # Use the venv's interpreter to install the project along with # all it's dev dependencies, this ensures it's installed in the right way session.run(python, "-m", "pip", "install", "-e", ".[dev,test,doc]", external=True)
5,334,990
def utc_now(): """Return current utc timestamp """ now = datetime.datetime.utcnow() return int(now.strftime("%s"))
5,334,991
def rrc_filter(alpha, length, osFactor, plot=False): """ Generates the impulse response of a root raised cosine filter. Args: alpha (float): Filter roll-off factor. length (int): Number of symbols to use in the filter. osFactor (int): Oversampling factor (number of samples per symbol). plot (bool): Enable or disable plotting of filter impulse response. Returns: (NumPy array): Filter coefficients for use in np.convolve. """ if alpha < 0 or alpha > 1.0: raise error.WfmBuilderError('Invalid \'alpha\' chosen. Use something between 0.1 and 1.') filterOrder = length * osFactor # Make GOOD and sure that filterOrder is an integer value filterOrder = round(filterOrder) if filterOrder % 2: raise error.WfmBuilderError('Must use an even number of filter taps.') delay = filterOrder / 2 t = np.arange(-delay, delay) / osFactor # Calculate the impulse response without warning about the inevitable divide by zero operations # I promise we will deal with those down the road with np.errstate(divide='ignore', invalid='ignore'): h = -4 * alpha / osFactor * (np.cos((1 + alpha) * np.pi * t) + np.sin((1 - alpha) * np.pi * t) / (4 * alpha * t)) / (np.pi * ((4 * alpha * t) ** 2 - 1)) # Find middle point of filter and manually populate the value # np.where returns a list of indices where the argument condition is True in an array. Nice. idx0 = np.where(t == 0) h[idx0] = -1 / (np.pi * osFactor) * (np.pi * (alpha - 1) - 4 * alpha) # Define machine precision used to check for near-zero values for small-number arithmetic eps = np.finfo(float).eps # Find locations of divide by zero points divZero = abs(abs(4 * alpha * t) - 1) # np.where returns a list of indices where the argument condition is True. Nice. idx1 = np.where(divZero < np.sqrt(eps)) # Manually populate divide by zero points h[idx1] = 1 / (2 * np.pi * osFactor) * (np.pi * (alpha + 1) * np.sin(np.pi * (alpha + 1) / (4 * alpha)) - 4 * alpha * np.sin(np.pi * (alpha - 1) / (4 * alpha)) + np.pi * (alpha - 1) * np.cos(np.pi * (alpha - 1) / (4 * alpha))) # Normalize filter energy to 1 h = h / np.sqrt(np.sum(h ** 2)) if plot: plt.plot(t, h) plt.title('Filter Impulse Response') plt.ylabel('h(t)') plt.xlabel('t') plt.show() return h
5,334,992
def test_unknown_metadata_arg(): """Unrecognized metadata options should result in an error""" search = fixture_dir('listing', 'valid-json') result = npc.commands.listing.make_list(search, fmt='md', metadata='asdf') assert not result.success
5,334,993
def get_available_services(project_dir: str): """Get standard services bundled with stakkr.""" services_dir = file_utils.get_dir('static') + '/services/' conf_files = _get_services_from_dir(services_dir) services = dict() for conf_file in conf_files: services[conf_file[:-4]] = services_dir + conf_file services = _add_local_services(project_dir, services) return services
5,334,994
def build_insert(table, to_insert): """ Build an insert request. Parameters ---------- table : str Table where query will be directed. to_insert: iterable The list of columns where the values will be inserted. Returns ------- str Built query. """ sql_q = 'INSERT INTO \"' + table + '\" (' sql_q += ', '.join('{0}'.format(w) for w in to_insert) sql_q += ') VALUES (' sql_q += ', '.join(':{0}'.format(w) for w in to_insert) sql_q += ')' return sql_q
5,334,995
def get_reference_data(fname): """ Load JSON reference data. :param fname: Filename without extension. :type fname: str """ base_dir = Path(__file__).resolve().parent fpath = base_dir.joinpath('reference', 'data', fname + '.json') with fpath.open() as f: return json.load(f)
5,334,996
def _is_l10n_ch_isr_issuer(account_ref, currency_code): """ Returns True if the string account_ref is a valid a valid ISR issuer An ISR issuer is postal account number that starts by 01 (CHF) or 03 (EUR), """ if (account_ref or '').startswith(ISR_SUBSCRIPTION_CODE[currency_code]): return _is_l10n_ch_postal(account_ref) return False
5,334,997
def little_endian_uint32(i): """Return the 32 bit unsigned integer little-endian representation of i""" s = struct.pack('<I', i) return struct.unpack('=I', s)[0]
5,334,998
def evaluate_scores(scores_ID, scores_OOD): """calculates classification performance (ROCAUC, FPR@TPR95) based on lists of scores Returns: ROCAUC, fpr95 """ labels_in = np.ones(scores_ID.shape) labels_out = np.zeros(scores_OOD.shape) y = np.concatenate([labels_in, labels_out]) score = np.concatenate([scores_ID, scores_OOD]) fpr, tpr, _ = roc_curve(y, score) roc_auc = auc(fpr, tpr) ii=np.where(tpr>0.95)[0][0] return roc_auc, fpr[ii]
5,334,999