content
stringlengths
22
815k
id
int64
0
4.91M
def vector(*args): """ A single vector in any coordinate basis, as a numpy array. """ return N.array(args)
5,329,400
def arcmin_to_deg(arcmin: float) -> float: """ Convert arcmin to degree """ return arcmin / 60
5,329,401
def soft_embedding_lookup(embedding, soft_ids): """Transforms soft ids (e.g., probability distribution over ids) into embeddings, by mixing the embedding vectors with the soft weights. Args: embedding: A Tensor of shape `[num_classes] + embedding-dim` containing the embedding vectors. Embedding can have dimensionality > 1, i.e., :attr:`embedding` can be of shape `[num_classes, emb_dim_1, emb_dim_2, ...]` soft_ids: A Tensor of weights (probabilities) used to mix the embedding vectors. Returns: A Tensor of shape `shape(soft_ids)[:-1] + shape(embedding)[1:]`. For example, if `shape(soft_ids) = [batch_size, max_time, vocab_size]` and `shape(embedding) = [vocab_size, emb_dim]`, then the return tensor has shape `[batch_size, max_time, emb_dim]`. Example:: decoder_outputs, ... = decoder(...) soft_seq_emb = soft_embedding_lookup( embedding, tf.nn.softmax(decoder_outputs.logits)) """ return tf.tensordot(tf.to_float(soft_ids), embedding, [-1, 0])
5,329,402
def test_user3_biosamples_access(user3_token): """" Make sure user3 has access to open1, open2, registered3, controlled4, and controlled6 """ response = helper_get_katsu_response(user3_token, f"{KATSU_URL}/api/biosamples") assert response.status_code == 200 response_json = response.json() assert response_json["count"] == 5 biosamples_ids = set() for biosample in response_json["results"]: biosamples_ids.add(biosample["id"]) assert "open1" in biosamples_ids assert "open2" in biosamples_ids assert "registered3" in biosamples_ids assert "controlled4" in biosamples_ids assert "controlled6" in biosamples_ids
5,329,403
def param_11(i): """Returns parametrized Exp11Gate.""" return Exp11Gate(half_turns=i)
5,329,404
def resolve_link(db: Redis[bytes], address: hash_t) -> hash_t: """Resolve any link recursively.""" key = join(ARTEFACTS, address, "links_to") link = db.get(key) if link is None: return address else: out = hash_t(link.decode()) return resolve_link(db, out)
5,329,405
def image_ppg(ppg_np): """ Input: ppg: numpy array Return: ax: 画布信息 im:图像信息 """ ppg_deps = ppg.DependenciesPPG() ppg_M = Matrix(ppg_np) monophone_ppgs = ppg.reduce_ppg_dim(ppg_M, ppg_deps.monophone_trans) monophone_ppgs = monophone_ppgs.numpy().T fig, ax = plt.subplots(figsize=(10, 6)) im = ax.imshow(monophone_ppgs, aspect="auto", origin="lower", interpolation='none') return ax, im
5,329,406
def read_and_parse(filenames): """Read all apache log files (possibly gzipped) and yield each parsed bitsteam download event.""" log_parser = create_log_parser() for filename in filenames: print("parsing '{0}'".format(filename)) if not os.path.exists(filename): print("failed to open '{0}': file do not exist !".format(filename), file=sys.stderr) continue if os.path.splitext(filename)[1] == '.gz': fp = gzip.open(filename, 'rt') else: fp = open(filename) for line in fp: if not bitstreamRE[getServerAPI()].search(line): # if no bitstream ID, don't go any further continue try: p = log_parser(line) except apache_log_parser.LineDoesntMatchException: print("failed to parse '{0}'".format(line), file=sys.stderr) continue yield p
5,329,407
def create_homography_calibrator_launch(filename): """ Creates launch file for homography calibrators """ template_name = 'homography_calibrator_launch.xml' machine_file = mct_utilities.file_tools.machine_launch_file params_file = mct_utilities.file_tools.homography_calibrator_params_file # Get list of pairs (namespace, rectified images) image_rect_list = mct_introspection.find_camera_image_topics(transport='image_rect') launch_list = [] for topic in image_rect_list: print(topic) topic_split = topic.split('/') namespace = '/'.join(topic_split[:4]) launch_list.append((namespace,topic)) # Create xml launch file jinja2_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir)) template = jinja2_env.get_template(template_name) xml_str = template.render( machine_file=machine_file, params_file=params_file, launch_list=launch_list ) with open(filename,'w') as f: f.write(xml_str)
5,329,408
def main(): """ Fetch and write out HTML files around property values. Use requests.Session to keep a connection open to the domain and get a performance benefit, as per the documentation here: http://docs.python-requests.org/en/master/user/advanced/ In case there is a poor connection or the server is slow to response, catch all request errors for a URI up to a configured number of attempts, waiting between attempts and finally raising the error if all attempts failed. This was implemented because ReadTimeout errors were causing the script to abort. Once request is complete, handle anything other than a HTTP success as an error, then skip to the next URI. @return: None @throws: AssertionError """ today = datetime.date.today() session = requests.Session() processed = skipped = errors = 0 with open(config.METADATA_CSV_PATH) as f_in: reader = csv.DictReader(f_in) try: for row in reader: out_name = "{area_type}|{parent_name}|{name}|{area_id}|{date}"\ ".html".format( area_type=row['area_type'], parent_name=row['parent_name'], name=row['name'], area_id=row['area_id'], date=str(today) ) out_path = os.path.join(config.HTML_OUT_DIR, out_name) # For suburbs, only fetch those which match configured # provinces. if (row['area_type'] == 'suburb' and row['parent_name'] not in config.SUBURB_DETAIL_REQUIRED): continue if config.SKIP_EXISTING and os.path.exists(out_path): if config.SHOW_SKIPPED: print("Skipping: {parent} | {name}".format( name=row['name'], parent=row['parent_name'] )) skipped += 1 else: print("Processing: {parent} | {name} ... ".format( name=row['name'], parent=row['parent_name'] )) for attempt in range(config.REQUEST_ATTEMPTS): try: resp = session.get( row['uri'], timeout=config.REQUEST_TIMEOUT, headers=config.REQUEST_HEADERS ) break except requests.RequestException as e: print("Failed attempt #{}".format(attempt+1)) if attempt + 1 == config.REQUEST_ATTEMPTS: raise else: wait = config.REQUEST_ATTEMPT_WAIT print(" sleeping {}s".format(wait)) time.sleep(wait) if resp.status_code == 200: with open(out_path, 'w') as f_out: f_out.writelines(resp.text) # Wait between requests, to avoid being possibly # blocked by the server for doing requests too # frequently. time.sleep(config.REQUEST_SPACING) processed += 1 else: error = dict( code=resp.status_code, reason=resp.reason, uri=row['uri'] ) print("Error: {code} {reason} {uri}".format(**error)) errors += 1 finally: print("\nProcessed: {}".format(processed)) print("Skipped: {}".format(skipped)) print("Errors: {}".format(errors))
5,329,409
def hook_plan_building(): """When builing the plan we should not call directly specific methods from CrypTen and as such we return here some "dummy" responses only to build the plan. """ f = lambda *args, **kwargs: crypten.cryptensor(th.zeros([])) for method_name in methods_to_hook: method = getattr(crypten, method_name) setattr(crypten, f"native_{method_name}", method) setattr(crypten, method_name, f)
5,329,410
def print_list_all_apps(): """This function print list of all installed packages or error message if an error occurred :returns: None """ all_apps, err_msg, err = get_list_all_apps() if err: print_error_and_exit(err_msg) return print_message('\n'.join(all_apps))
5,329,411
def create_inputs(): """ Create inputs for `test_plot_spectra_for_qa_single_frame`. The raw files will be downloaded and saved inside the path stored in the `$DRAGONS_TEST/raw_inputs` directory. Processed files will be stored inside a new folder called "dragons_test_inputs". The sub-directory structure should reflect the one returned by the `path_to_inputs` fixture. """ import glob import os from geminidr.gmos.primitives_gmos_longslit import GMOSLongslit from gempy.utils import logutils from recipe_system.reduction.coreReduce import Reduce from recipe_system.utils.reduce_utils import normalize_ucals cwd = os.getcwd() path = f"./dragons_test_inputs/geminidr/core/{__file__.split('.')[0]}/" os.makedirs(path, exist_ok=True) os.chdir(path) os.makedirs("inputs/", exist_ok=True) for raw_list, bias_list, quartz_list, arc_list in single_aperture_data: if all([os.path.exists(f"inputs/{s.split('.')[0]}_extracted.fits") for s in raw_list]): print("Skipping already created input.") continue raw_paths = [download_from_archive(f) for f in raw_list] bias_paths = [download_from_archive(f) for f in bias_list] quartz_paths = [download_from_archive(f) for f in quartz_list] arc_paths = [download_from_archive(f) for f in arc_list] cals = [] raw_ads = [astrodata.open(p) for p in raw_paths] data_label = raw_ads[0].data_label() print('Current working directory:\n {:s}'.format(os.getcwd())) if len(bias_paths): logutils.config(file_name='log_bias_{}.txt'.format(data_label)) r = Reduce() r.files.extend(bias_paths) r.runr() master_bias = r.output_filenames.pop() cals.append(f"processed_bias:{master_bias}") del r else: master_bias = None if len(quartz_paths): logutils.config(file_name='log_quartz_{}.txt'.format(data_label)) r = Reduce() r.files.extend(quartz_paths) r.ucals = normalize_ucals(cals) r.runr() master_quartz = r.output_filenames.pop() cals.append(f"processed_flat:{master_quartz}") del r else: master_quartz = None logutils.config(file_name='log_arc_{}.txt'.format(data_label)) r = Reduce() r.files.extend(arc_paths) r.ucals = normalize_ucals(cals) r.runr() master_arc = r.output_filenames.pop() do_cal_bias = 'skip' if master_bias is None else 'procmode' do_cal_flat = 'skip' if master_quartz is None else 'procmode' logutils.config(file_name='log_{}.txt'.format(data_label)) p = GMOSLongslit(raw_ads) p.prepare() p.addDQ(static_bpm=None) p.addVAR(read_noise=True) p.overscanCorrect() p.biasCorrect(do_cal=do_cal_bias, bias=master_bias) p.ADUToElectrons() p.addVAR(poisson_noise=True) p.flatCorrect(do_cal=do_cal_flat, flat=master_quartz) p.QECorrect(arc=master_arc) p.distortionCorrect(arc=master_arc) p.findApertures(max_apertures=3) p.skyCorrectFromSlit() p.traceApertures() p.extractSpectra() p.linearizeSpectra() [os.remove(s) for s in glob.glob("*_arc.fits")] [os.remove(s) for s in glob.glob("*_bias.fits")] [os.remove(s) for s in glob.glob("*_flat.fits")] [os.remove(s) for s in glob.glob("*_mosaic.fits")] os.chdir("inputs/") print("\n\n Writing processed files for tests into:\n" " {:s}\n\n".format(os.getcwd())) _ = p.writeOutputs() os.chdir("../") os.chdir(cwd)
5,329,412
def test_add_updated(): """Test the function that add updated repo to list""" ctx = invoker.ctx() repo = 'repo123' cli_support.add_updated(ctx, repo) assert repo in ctx.obj['updated']
5,329,413
def is_hex_value(val): """ Helper function that returns True if the provided value is an integer in hexadecimal format. """ try: int(val, 16) except ValueError: return False return True
5,329,414
def create_cluster(*, cluster_name: str) -> Optional[Operation]: """Create a dataproc cluster """ cluster_client = dataproc.ClusterControllerClient(client_options={"api_endpoint": dataproc_api_endpoint}) cluster = { "project_id": project_id, "cluster_name": cluster_name, "config": { "config_bucket": config_bucket, "temp_bucket": temp_bucket, "master_config": {"num_instances": 1, "machine_type_uri": "n1-standard-2"}, "worker_config": {"num_instances": 2, "machine_type_uri": "n1-standard-2"}, }, } logger.info("cluster: %s is creating now", cluster_name) operation = cluster_client.create_cluster(request={"project_id": project_id, "region": region, "cluster": cluster}) logger.info("cluster: %s is created successfully", cluster_name) return operation
5,329,415
def gc_cache(seq: str) -> Cache: """Return the GC ratio of each range, between i and j, in the sequence Args: seq: The sequence whose tm we're querying Returns: Cache: A cache for GC ratio lookup """ n = len(seq) arr_gc = [] for _ in seq: arr_gc.append([math.inf] * len(seq)) # fill in the diagonal for i in range(n): if i == n - 1: # hackish arr_gc[i][i] = arr_gc[i - 1][i - 1] continue arr_gc[i][i] = 1.0 if seq[i] in "GC" else 0.0 if i == n - 2 and not arr_gc[i][i]: # don't ignore last pair arr_gc[i][i] = 1.0 if seq[i + 1] in "GC" else 0.0 # fill in the upper right of the array for i in range(n): for j in range(i + 1, n): arr_gc[i][j] = arr_gc[i][j - 1] + arr_gc[j][j] # convert to ratios for i in range(n): for j in range(i, n): arr_gc[i][j] = round(arr_gc[i][j] / (j - i + 1), 1) return arr_gc
5,329,416
def ParseVariableName(variable_name, args): """Parse a variable name or URL, and return a resource. Args: variable_name: The variable name. args: CLI arguments, possibly containing a config name. Returns: The parsed resource. """ return _ParseMultipartName(variable_name, args, 'runtimeconfig.projects.configs.variables', 'variablesId')
5,329,417
def test_handle_success_request_success(provider_base_config, order_with_products): """Test request helper changes the order status to confirmed Also check it returns a success url with order number""" params = { 'RESPA_UI_RETURN_URL': 'http%3A%2F%2F127.0.0.1%3A8000%2Fv1', 'AUTHCODE': '905EDAC01C9E6921250C21BE23CDC53633A4D66BE7241A3B5DA1D2372234D462', 'RETURN_CODE': '0', 'ORDER_NUMBER': 'abc123', 'SETTLED': '1' } rf = RequestFactory() request = rf.get('/payments/success/', params) payment_provider = create_bambora_provider(provider_base_config, request, UI_RETURN_URL) returned = payment_provider.handle_success_request() order_after = Order.objects.get(order_number=params.get('ORDER_NUMBER')) assert order_after.state == Order.CONFIRMED assert isinstance(returned, HttpResponse) assert 'payment_status=success' in returned.url assert 'reservation_id={}'.format(order_after.reservation.id) in returned.url
5,329,418
def index(request): """Home page""" return render(request, 'read_only_site/index.html')
5,329,419
def test_shorthand_inversion(): """ Test that the Matplotlib subtraction shorthand for composing and inverting transformations works. """ w1 = WCS(naxis=2) w1.wcs.ctype = ['RA---TAN', 'DEC--TAN'] w1.wcs.crpix = [256.0, 256.0] w1.wcs.cdelt = [-0.05, 0.05] w1.wcs.crval = [120.0, -19.0] w2 = WCS(naxis=2) w2.wcs.ctype = ['RA---SIN', 'DEC--SIN'] w2.wcs.crpix = [256.0, 256.0] w2.wcs.cdelt = [-0.05, 0.05] w2.wcs.crval = [235.0, +23.7] t1 = WCSWorld2PixelTransform(w1) t2 = WCSWorld2PixelTransform(w2) assert t1 - t2 == t1 + t2.inverted() assert t1 - t2 != t2.inverted() + t1 assert t1 - t1 == IdentityTransform()
5,329,420
def parse_calculation_strings_OLD(args): """form the strings into arrays """ calculations = [] for calculation in args.calculations: calculation = calculation.split("/") foreground = np.fromstring( ",".join(calculation[0].replace("x", "0")), sep=",") background = np.fromstring( ",".join(calculation[1].replace("x", "0")), sep=",") calculations.append((foreground, background)) args.calculations = calculations return None
5,329,421
def plot_inflections(): """ """ study_list = retrieve_ref('study_list') sensor_list = retrieve_ref('sensor_list') segment_list = retrieve_ref('segment_list') searchRange = retrieve_ref('searchRange') for study in study_list: for sensor in sensor_list: format_type = 'clean' clean_path = os.path.join(study, 'formatted', format_type) recordNames = os.listdir(clean_path) for sensor in sensor_list: for record in recordNames: row_num, col_num, plot_num = len(searchRange), 2, 0 row_width_mulp, col_width_mulp = 7, 5 plot_width, plot_height = col_num*row_width_mulp, row_num*col_width_mulp plt.figure(figsize=(plot_width, plot_height)) for range in searchRange: plot_num += 1 plt.subplot(row_num, col_num, plot_num) format_type = 'clean' segment = 'All' path = [study, 'analyzed', 'inflections', 'all_times', str(range), record, segment] path = build_path(path) file = os.path.join(path, sensor + ".csv") if os.path.isfile(file): source = os.path.join(study, 'formatted', format_type, record, segment, sensor + '.csv') print('source = ' + source) df = pd.read_csv(source) for colName in df.columns: if 'timeMinutes' in colName: timeMinutes = list(df[colName]) if 'meas' in colName: measList = list(df[colName]) measMin = min(measList) measMax = max(measList) plt.scatter(timeMinutes, measList, label = str(colName)) df = pd.read_csv(file) for colName in df.columns: if 'inflection' in colName: df = df.drop(df[(df[colName] != 'Yes')].index) timeInflections = list(df['timeMinutes']) for time in timeInflections: xx = np.linspace( time, time, 100) yy = np.linspace( measMin, measMax, 100) plt.plot(xx, yy, color=[0,.9,.6]) plt.xlabel('time Unix') sensor_unit = retrieve_sensor_unit(sensor) plt.ylabel(sensor + ' ' + sensor_unit ) # plt.legend(bbox_to_anchor=(1, 0.5, 0.3, 0.2), loc='upper left') plt.title('Record = ' + str(record) + ' Range = ' + str(range) + ' seconds') path = [study, 'plotted', 'inflection', 'each_record', record] path = build_path(path) file = os.path.join(path, sensor + ".png") plt.savefig(file, bbox_inches='tight') print('inflection plot saved ' + file)
5,329,422
def fixture_hdf5_scalar(request): """fixture_hdf5_scalar""" import h5py # pylint: disable=import-outside-toplevel tmp_path = tempfile.mkdtemp() filename = os.path.join(tmp_path, "test.h5") with h5py.File(filename, 'w') as f: f.create_dataset('int8', data=np.int8(123)) f.create_dataset('int16', data=np.int16(123)) f.create_dataset('int32', data=np.int32(123)) f.create_dataset('int64', data=np.int64(123)) f.create_dataset('float32', data=np.float32(1.23)) f.create_dataset('float64', data=np.float64(1.23)) f.create_dataset('complex64', data=np.complex64(12+3j)) f.create_dataset('complex128', data=np.complex128(12+3j)) f.create_dataset('string', data=np.dtype('<S5').type("D123D")) args = filename def func(args): """func""" i8 = tfio.IOTensor.from_hdf5(args)('/int8') i16 = tfio.IOTensor.from_hdf5(args)('/int16') i32 = tfio.IOTensor.from_hdf5(args)('/int32') i64 = tfio.IOTensor.from_hdf5(args)('/int64') f32 = tfio.IOTensor.from_hdf5(args)('/float32') f64 = tfio.IOTensor.from_hdf5(args)('/float64') c64 = tfio.IOTensor.from_hdf5(args)('/complex64') c128 = tfio.IOTensor.from_hdf5(args)('/complex128') ss = tfio.IOTensor.from_hdf5(args)('/string') return [i8, i16, i32, i64, f32, f64, c64, c128, ss] expected = [ np.int8(123), np.int16(123), np.int32(123), np.int64(123), np.float32(1.23), np.float64(1.23), np.complex64(12+3j), np.complex128(12+3j), np.dtype('<S5').type("D123D"), ] def fin(): shutil.rmtree(tmp_path) request.addfinalizer(fin) return args, func, expected
5,329,423
def find_border(edge_list) : """ find_border(edge_list) Find the borders of a hexagonal graph Input ----- edge_list : array List of edges of the graph Returns ------- border_set : set Set of vertices of the border """ G = nx.Graph([(edge_list[i,0], edge_list[i,1]) for i in range(len(edge_list))]) occurence_list = np.unique(np.reshape(edge_list, 2*len(edge_list)), return_counts=True) # list of vertex of degree 2 sec_edge_list = occurence_list[0][np.argwhere(occurence_list[:][1] == 2)] # list of vertex of degree 3 three_edge_list = occurence_list[0][np.argwhere(occurence_list[:][1] == 3)] sec = np.reshape(sec_edge_list, newshape=(len(sec_edge_list))) border_set = set(sec) inner_set = set() for elem in three_edge_list : for neigh in G[elem[0]].keys() : if len(G[neigh]) == 2 : border_set.add(elem[0]) return border_set
5,329,424
def get_all_files(credentials: Credentials, email: str) -> Set['DriveResult']: """Get all files shared with the specified email in the current half-year (January-June or July-December of the current year)""" # Create drive service with provided credentials service = build('drive', 'v3', credentials=credentials, cache_discovery=False) all_user_files = [] next_page_token = None date = datetime.date.today() while True: # Request the next page of files metadata, next_page_token = request_files(service, next_page_token, email, date) all_user_files = all_user_files + metadata print('\r{} files processed'.format(len(all_user_files)), end='') # If we have reached the end of the list of documents, next_page_token will be None if next_page_token is None: break return {DriveResult(student_email=file['owners'][0]['emailAddress'], file_name=file['name'], create_time=file['createdTime'], url=file['webViewLink']) for file in all_user_files}
5,329,425
def _split_kwargs(model, kwargs, lookups=False, with_fields=False): """ Split kwargs into fields which are safe to pass to create, and m2m tag fields, creating SingleTagFields as required. If lookups is True, TagFields with tagulous-specific lookups will also be matched, and the returned tag_fields will be a dict of tuples in the format ``(val, lookup)`` The only tagulous-specific lookup is __exact For internal use only - likely to change significantly in future versions Returns a tuple of safe_fields, singletag_fields, tag_fields If with_fields is True, a fourth argument will be returned - a dict to look up Field objects from their names """ safe_fields = {} singletag_fields = {} tag_fields = {} field_lookup = {} for field_name, val in kwargs.items(): # Check for lookup if lookups and "__" in field_name: orig_field_name = field_name field_name, lookup = field_name.split("__", 1) # Only one known lookup if lookup == "exact": try: field = model._meta.get_field(field_name) except FieldDoesNotExist: # Unknown - pass it on untouched pass else: if isinstance(field, TagField): # Store for later tag_fields[field_name] = (val, lookup) field_lookup[field_name] = field continue # Irrelevant lookup - no need to take special actions safe_fields[orig_field_name] = val continue # No lookup # Try to look up the field try: field = model._meta.get_field(field_name) except FieldDoesNotExist: # Assume it's something clever and pass it through untouched # If it's invalid, an error will be raised later anyway safe_fields[field_name] = val # Next field continue field_lookup[field_name] = field # Take special measures depending on field type if isinstance(field, SingleTagField): singletag_fields[field_name] = val elif isinstance(field, TagField): # Store for later if lookups: tag_fields[field_name] = (val, None) else: tag_fields[field_name] = val else: safe_fields[field_name] = val if with_fields: return safe_fields, singletag_fields, tag_fields, field_lookup return safe_fields, singletag_fields, tag_fields
5,329,426
def sliceResultToBytes(sr): """Copies a FLSliceResult to a Python bytes object. Does not free the FLSliceResult.""" if sr.buf == None: return None lib.FLSliceResult_Release(sr) b = bytes( ffi.buffer(sr.buf, sr.size) ) return b
5,329,427
def cycle_dual(G, cycles, avg_fun=None): """ Returns dual graph of cycle intersections, where each edge is defined as one cycle intersection of the original graph and each node is a cycle in the original graph. The general idea of this algorithm is: * Find all cycles which share edges by an efficient dictionary operation * Those edges which border on exactly two cycles are connected The result is a possibly disconnected version of the dual graph which can be further processed. The naive algorithm is O(n_cycles^2) whereas this improved algorithm is better than O(n_cycles) in the average case. """ if avg_fun == None: avg_fun = lambda c, w: average(c, weights=w) dual = nx.Graph() neighbor_cycles = find_neighbor_cycles(G, cycles) # Construct dual graph for ns in neighbor_cycles: # Add cycles for c, n in ((cycles[n], n) for n in ns): dual.add_node(n, x=c.com[0], y=c.com[1], cycle=c, \ external=False, cycle_area=c.area()) # Connect pairs if len(ns) == 2: a, b = ns c_a = cycles[a] c_b = cycles[b] sect = c_a.intersection(c_b) wts = [G[u][v]['weight'] for u, v in sect] conds = [G[u][v]['conductivity'] for u, v in sect] wt = sum(wts) #cond = average(conds, weights=wts) #cond = min(conds) cond = avg_fun(conds, wts) dual.add_edge(a, b, weight=wt, conductivity=cond, intersection=sect) return dual
5,329,428
def main(): """Entry point""" if check_for_unstaged_changes(TARGET_FILE): print("ERROR: You seem to have unstaged changes to %s that would be overwritten." % (TARGET_FILE)) print("Please clean, commit, or stash them before running this script.") return 1 if not path.exists(path.dirname(TARGET_FILE)): os.makedirs(path.dirname(TARGET_FILE)) shutil.copyfile(ORIGIN_FILE, TARGET_FILE) print("Bootstrapping optdata is complete.") for tool in TOOL_LIST: for arch in ARCH_LIST: optdata_dir = get_optdata_dir(tool, arch) print(" * Copy %s %s files into: %s" % (arch, tool, optdata_dir)) print("NOTE: Make sure to add 'skiprestoreoptdata' as a switch on the build command line!") return 0
5,329,429
def resource_teardown(): """ """ dataset = Dataset('tests/test_data/test_dataset.csv') if os.path.exists("tests/experiments"): shutil.rmtree("tests/experiments") if os.path.exists(dataset._internals_folder_path): shutil.rmtree(dataset._internals_folder_path)
5,329,430
def log_results(url): """ Generate static result metadata, which is rendered in the Kubeflow Pipelines UI. Refer to https://elyra.readthedocs.io/en/latest/recipes/visualizing-output-in-the-kfp-ui.html for details. """ # Create result metadata metadata = {'outputs': [ { 'storage': 'inline', 'source': '# Data archive URL: {}' .format(url), 'type': 'markdown', }] } # Save metadata to file with open('mlpipeline-ui-metadata.json', 'w') as f: json.dump(metadata, f)
5,329,431
def get_timebucketedlog_reader(log, event_store): """ :rtype: TimebucketedlogReader """ return TimebucketedlogReader(log=log, event_store=event_store)
5,329,432
def get_database_name(url): """Return a database name in a URL. Example:: >>> get_database_name('http://foobar.com:5984/testdb') 'testdb' :param str url: The URL to parse. :rtype: str """ name = compat.urlparse(url).path.strip("/").split("/")[-1] # Avoid re-encoding the name if "%" not in name: name = encode_uri_component(name) return name
5,329,433
def get_tags(): """ 在这里希望根据用户来获取,和用户有关的tag 所以我们需要做的是,获取用户所有的post,然后找到所有的tag :return: """ result_tags = [] # 找到某个用户的所有的文章,把所有文章的Tag都放在一块 def append_tag(user_posts): tmp = [] for post in user_posts: for tag in post.tags.all(): tmp.append(tag.tag_name) return tmp # 如果当前用户存在,就是用当前用户 if g.get('current_user', None): user_posts_ = g.current_user.posts.all() result_tags.extend(append_tag(user_posts_)) # 如果不存在,就是用默认用户 else: user = User.query.get(1) result_tags.extend(append_tag(user.posts.all())) result_tags = list(set(result_tags)) return jsonify(result_tags)
5,329,434
def get_selinux_modules(): """ Read all custom SELinux policy modules from the system Returns 3-tuple (modules, retain_rpms, install_rpms) where "modules" is a list of "SELinuxModule" objects, "retain_rpms" is a list of RPMs that should be retained during the upgrade and "install_rpms" is a list of RPMs that should be installed during the upgrade """ modules = list_selinux_modules() semodule_list = [] # list of rpms containing policy modules to be installed on RHEL 8 retain_rpms = [] install_rpms = [] # modules need to be extracted into cil files # cd to /tmp/selinux and save working directory so that we can return there # clear working directory rmtree(WORKING_DIRECTORY, ignore_errors=True) try: wd = os.getcwd() os.mkdir(WORKING_DIRECTORY) os.chdir(WORKING_DIRECTORY) except OSError: api.current_logger().warning("Failed to access working directory! Aborting.") return ([], [], []) for (name, priority) in modules: if priority == "200": # Module on priority 200 was installed by an RPM # Request $name-selinux to be installed on RHEL8 retain_rpms.append(name + "-selinux") continue if priority == "100": # module from selinux-policy-* package - skipping continue # extract custom module and save it to SELinuxModule object module_file = name + ".cil" try: run(['semodule', '-c', '-X', priority, '-E', name]) # check if the module contains invalid types and remove them if so removed = check_module(module_file) # get content of the module try: with open(module_file, 'r') as cil_file: module_content = cil_file.read() except OSError as e: api.current_logger().warning("Error reading %s.cil : %s", name, str(e)) continue semodule_list.append(SELinuxModule( name=name, priority=int(priority), content=module_content, removed=removed ) ) except CalledProcessError: api.current_logger().warning("Module %s could not be extracted!", name) continue # rename the cil module file so that it does not clash # with the same module on different priority try: os.rename(module_file, "{}_{}".format(name, priority)) except OSError: api.current_logger().warning("Failed to rename module file %s to include priority.", name) # this is necessary for check if container-selinux needs to be installed try: run(['semanage', 'export', '-f', 'semanage']) except CalledProcessError: pass # Check if modules contain any type, attribute, or boolean contained in container-selinux and install it if so # This is necessary since container policy module is part of selinux-policy-targeted in RHEL 7 (but not in RHEL 8) try: run(['grep', '-w', '-r', '-E', "|".join(CONTAINER_TYPES)], split=False) # Request "container-selinux" to be installed since container types where used in local customizations # and container-selinux policy was removed from selinux-policy-* packages install_rpms.append("container-selinux") except CalledProcessError: # expected, ignore exception pass try: os.chdir(wd) except OSError: pass rmtree(WORKING_DIRECTORY, ignore_errors=True) return (semodule_list, list(set(retain_rpms)), list(set(install_rpms)))
5,329,435
def a_star_search(graph, start, goal): """Runs an A* search on the specified graph to find a path from the ''start'' node to the ''goal'' node. Returns a list of nodes specifying a minimal path between the two nodes. If no path exists (disconnected components), returns an empty list. """ all_nodes = graph.get_all_node_ids() if start not in all_nodes: raise NonexistentNodeError(start) if goal not in all_nodes: raise NonexistentNodeError(goal) came_from, cost_so_far, goal_reached = _a_star_search_internal(graph, start, goal) if goal_reached: path = reconstruct_path(came_from, start, goal) path.reverse() return path else: return []
5,329,436
def output_format_option(default: OutputFormat = OutputFormat.TREE): """ A ``click.option`` for specifying a format to use when outputting data. Args: default (:class:`~ape.cli.choices.OutputFormat`): Defaults to ``TREE`` format. """ return click.option( "--format", "output_format", type=output_format_choice(), default=default.value, callback=lambda ctx, param, value: OutputFormat(value.upper()), )
5,329,437
def compute_errors(u_e, u): """Compute various measures of the error u - u_e, where u is a finite element Function and u_e is an Expression. Adapted from https://fenicsproject.org/pub/tutorial/html/._ftut1020.html """ print('u_e',u_e.ufl_element().degree()) # Get function space V = u.function_space() # Explicit computation of L2 norm error = (u - u_e)**2*dl.dx E1 = np.sqrt(abs(dl.assemble(error))) # Explicit interpolation of u_e onto the same space as u u_e_ = dl.interpolate(u_e, V) error = (u - u_e_)**2*dl.dx E2 = np.sqrt(abs(dl.assemble(error))) # Explicit interpolation of u_e to higher-order elements. # u will also be interpolated to the space Ve before integration Ve = dl.FunctionSpace(V.mesh(), 'P', 5) u_e_ = dl.interpolate(u_e, Ve) error = (u - u_e)**2*dl.dx E3 = np.sqrt(abs(dl.assemble(error))) # Infinity norm based on nodal values u_e_ = dl.interpolate(u_e, V) E4 = abs(u_e_.vector().get_local() - u.vector().get_local()).max() # L2 norm E5 = dl.errornorm(u_e, u, norm_type='L2', degree_rise=3) # H1 seminorm E6 = dl.errornorm(u_e, u, norm_type='H10', degree_rise=3) # Collect error measures in a dictionary with self-explanatory keys errors = {'u - u_e': E1, 'u - interpolate(u_e, V)': E2, 'interpolate(u, Ve) - interpolate(u_e, Ve)': E3, 'infinity norm (of dofs)': E4, 'L2 norm': E5, 'H10 seminorm': E6} return errors
5,329,438
def save_reg(reg_list, data_name): """ Save the regression results for premiums and claims data, respectively. Args: reg_list (list): a list consisting of the results of all regressions data_name (str): name of tables (see **wscript** file) """ for idx in [0, 2, 5, 7]: reg_list[idx] = round(reg_list[idx], 6) for idx in [4, 9]: reg_list[idx] = round(reg_list[idx], 3) result_df = pd.DataFrame(reg_list, columns=['results']).T result_df.columns = ['a', 'ta', 'm', 'tm', 'Rsp', 'b', 'tb', 'n', 'tn', 'Rsq'] result_df.index.name = 'type' dfs = np.split(result_df, [5], axis=1) reg_p = dfs[0].T.rename(columns={'results': 'Premiums Data'}).T reg_q = dfs[1].T.rename(columns={'results': 'Claims Data'}).T reg_p.to_csv(ppj('OUT_TABLES', '{}_prem_reg.csv'.format(data_name)), index=True, sep=',') reg_q.to_csv(ppj('OUT_TABLES', '{}_clam_reg.csv'.format(data_name)), index=True, sep=',')
5,329,439
def list_to_str(input_list, delimiter=","): """ Concatenates list elements, joining them by the separator specified by the parameter "delimiter". Parameters ---------- input_list : list List with elements to be joined. delimiter : String, optional, default ','. The separator used between elements. Returns ------- String Returns a string, resulting from concatenation of list's elements, separeted by the delimiter. """ return delimiter.join( [x if isinstance(x, str) else repr(x) for x in input_list] )
5,329,440
def learner_loop(create_env_fn, create_agent_fn, create_optimizer_fn, config: learner_config.TrainingConfig, settings: utils.MultiHostSettings, action_distribution_config=None): """Main learner loop. Args: create_env_fn: Callable that must return a newly created environment. The callable takes the task ID as argument - an arbitrary task ID of 0 will be passed by the learner. The returned environment should follow GYM's API. It is only used for infering tensor shapes. This environment will not be used to generate experience. create_agent_fn: Function that must create a new tf.Module with the neural network that outputs actions and new agent state given the environment observations and previous agent state. See dmlab.agents.ImpalaDeep for an example. The factory function takes as input the environment action and observation spaces and a parametric distribution over actions. create_optimizer_fn: Function that takes the final iteration as argument and must return a tf.keras.optimizers.Optimizer and a tf.keras.optimizers.schedules.LearningRateSchedule. config: Training config. settings: Settings for training and inference strategies. You can set this to avoid re-initialization of the TPU system. If not set, we use the settings as returned by utils.init_learner. action_distribution_config: configuration for ParametricDistribution over actions; the actual distribution also depends on the action spec retrieved from the environment. If None (and actions are continuous), uses the setting parametric_distribution.continuous_action_config(). """ learner = Learner( create_env_fn, create_agent_fn, create_optimizer_fn, settings=settings, config=config, action_distribution_config=action_distribution_config) learner.prepare_for_run() learner.run_training()
5,329,441
def pytest_runtest_setup(item): """Set the number of openmp threads based on the number of workers xdist is using to prevent oversubscription. Parameters ---------- item : pytest item item to be processed """ try: xdist_worker_count = int(os.environ['PYTEST_XDIST_WORKER_COUNT']) except KeyError: # raises when pytest-xdist is not installed return openmp_threads = _openmp_effective_n_threads() threads_per_worker = max(openmp_threads // xdist_worker_count, 1) threadpool_limits(threads_per_worker, user_api='openmp')
5,329,442
def load_data_and_labels(file): """ Loads data from taobao crawler files, use jieba to split the data into words and generates labels. Returns split sentences and labels. """ mapping = { u'书包':0, u'T恤':1, u'阔腿裤':2, u'运动鞋':3} sentences = [] labels = [] filename = file + '.json' for line in codecs.open(filename,'rb',encoding='utf8'): item = json.loads(line) text = item['comment'] label = mapping[item['label']] sentence = list(jieba.cut(text)) sentences.append(sentence) labels.append(label) out_label = file + '_label.pkl' out_text = file + '_text.pkl' with open(out_text,'w') as f: pickle.dump(sentences,f) with open(out_label,'w') as f: pickle.dump(labels,f)
5,329,443
def strategy_supports_no_merge_call(): """Returns if the current `Strategy` can operate in pure replica context.""" if not distribution_strategy_context.has_strategy(): return True strategy = distribution_strategy_context.get_strategy() return not strategy.extended._use_merge_call() # pylint: disable=protected-access
5,329,444
def process_recursively(subtree, key_name, new_value, skip_if=None): """ Processes value with given key in the subtree. If new_value is None, removes key from tree, otherwise replaces old value with the new one. If skip_if is specified, it should be function(value) that returns True if this specific value should not be touched. Continues recursively for internal subtrees. """ for subkey in list(subtree.keys()): if subkey == key_name: if skip_if is not None and skip_if(subtree[subkey]): continue if new_value is None: del subtree[subkey] else: subtree[subkey] = new_value elif isinstance(subtree[subkey], dict): process_recursively(subtree[subkey], key_name, new_value, skip_if=skip_if)
5,329,445
def is_group(obj): """Returns true if the object is a h5py-like group.""" kind = get_h5py_kind(obj) return kind in ["file", "group"]
5,329,446
def recovery_data(): """ This function recovers data from Data Base, if server was temporarily disabled :return: """ global communications s = session() for i in s.query(Contact).all(): first = s.query(User).filter(User.id == i.userID).first() second = s.query(User).filter(User.id == i.userToID).first() communications[i.userID] = { "UserTo": second.id, "UserName": second.username, "like": second.like, } communications[i.userToID] = { "UserTo": first.id, "UserName": first.username, "like": first.like, } for i in s.query(User).filter(User.status == 0).all(): add_users(user_chat_id=i.id, username=i.username) s.close()
5,329,447
def cli(): """Run DropSeq Data Analysis """ pass
5,329,448
def monotonic(): """ Example 2.2. """ size = mpl.rcParams['figure.figsize'] size[0] *= 2 size[1] *= 0.5 fig, axs = plt.subplots(1, 4, figsize=size) x = np.linspace(-1, 1, 1000) ys = [] ys.append(x * 0.7 + 0.3) ys.append(1/2/(x + 2) - 0.3) _y = - x - 0.5 _y[x > 0] = -0.5 ys.append(_y) _y = (x**2 - 0.16)*x _y[(-0.4 < x) & (x < 0.4)] = 0 ys.append(_y) for ax, y in zip(axs, ys): ax.spines['left'].set_position('zero') ax.spines['bottom'].set_position('zero') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.plot(x, y, 'r')
5,329,449
def test_app_access_with_app(test_app): """Test that Extension.app returns the provided app.""" extension = Extension(test_app) assert extension.app == test_app
5,329,450
def analyze_page(page_url): """ Analyzes the content at page_url and returns a list of the highes weighted words.json/phrases and their weights """ html = fetch_html(page_url) if not html: return soup = BeautifulSoup(html, "html.parser") word_counts = {} url_words = words_in_url(page_url) stop_words = get_stop_words('english') words_to_add = ['like', '...'] stop_words = stop_words + words_to_add ignore_tags = ["script", "img", "meta", "style"] # html tags to ignore weights = {'title': 15, 'div': .5, 'a': .3, 'span': .5, "link": .2, 'url': 22, \ 'two' : 3, 'three': 3, 'four': 5, 'five': 5} # adjust weights here lemma = WordNetLemmatizer() for tag in soup.find_all(): if tag.name not in ignore_tags: words = tag.find(text=True, recursive=False) # with bs4, recursive = False means we will not be double counting tags if words: words = words.split() words = [w for w in words if w not in stop_words] # remove common stop words.json words = [w for w in words if len(w) > 1] # ignore single character words.json for index, word in enumerate(words): word_lower = lemma.lemmatize(word.lower()) # lemmatize/stem words.json multiplier = 1 if tag.name in weights: # assign weight based on HTML tag multiplier = weights[tag.name] if word_lower in word_counts: word_counts[word_lower] = word_counts[word_lower] + (1 * multiplier) else: word_counts[word_lower] = 1 * multiplier if index < (len(words) - 1): # two word phrase two_word = word_lower + ' ' + lemma.lemmatize((words[index + 1]).lower()).strip() two_word = two_word.strip() if two_word != word_lower: if two_word in word_counts: word_counts[two_word] = word_counts[two_word] + (weights['two'] * multiplier) else: word_counts[two_word] = 1 * multiplier if index < (len(words) - 2): # three word phrase two_word = word_lower + ' ' + lemma.lemmatize((words[index + 1]).lower()).strip() \ + ' ' + lemma.lemmatize((words[index + 2]).lower()).strip() two_word = two_word.strip() if two_word != word_lower: if two_word in word_counts: word_counts[two_word] = word_counts[two_word] + (weights['three'] * multiplier) else: word_counts[two_word] = 1 * multiplier if index < (len(words) - 3): # four word phrase two_word = word_lower + ' ' + lemma.lemmatize((words[index + 1]).lower()).strip() \ + ' ' + lemma.lemmatize((words[index + 2]).lower()).strip() \ + ' ' + lemma.lemmatize((words[index + 3]).lower()).strip() two_word = two_word.strip() if two_word != word_lower: if two_word in word_counts: word_counts[two_word] = word_counts[two_word] + (weights['four'] * multiplier) else: word_counts[two_word] = 1 * multiplier if index < (len(words) - 4): # five word phrase two_word = word_lower + ' ' + lemma.lemmatize((words[index + 1]).lower()).strip() \ + ' ' + lemma.lemmatize((words[index + 2]).lower()).strip() \ + ' ' + lemma.lemmatize((words[index + 3]).lower()).strip() \ + ' ' + lemma.lemmatize((words[index + 4]).lower()).strip() two_word = two_word.strip() if two_word != word_lower: if two_word in word_counts: word_counts[two_word] = word_counts[two_word] + (weights['five'] * multiplier) else: word_counts[two_word] = 1 * multiplier for word in url_words: # add weight for words.json in the url string if word in word_counts: word_counts[word] = word_counts[word] + weights['url'] def determine(x, top_25): """ Helper function for removing phrases that are substrings of other phrases """ if len(x[0].split()) > 1: # print(x[0]) for i in top_25: if x[0] in i[0] and x[0] != i[0]: return False return True top_25 = list(reversed(sorted(word_counts.items(), key=lambda x: x[1])[-25:])) # grab highest 25 weighted items final_list = [x for x in top_25 if determine(x, top_25)] # remove phrases that are substrings of other phrases return final_list
5,329,451
def load_config_from_paths(config_paths: Iterable[str], strict: bool = False) -> List[dict]: """ Load configuration from paths containing \*.yml and \*.json files. As noted in README.config, .json will take precedence over .yml files. :param config_paths: Path to \*.yml and \*.json config files. :param strict: Set to true to error if the file is not found. :return: A list of configs in increasing order of precedence. """ # Put the .json configs after the .yml configs to make sure .json takes # precedence over .yml. sorted_paths = sorted(config_paths, key=lambda x: x.endswith(".json")) return list(map(lambda path: load_config_from_file(path, strict), sorted_paths))
5,329,452
def submit_simulation(sim_dir, job_file): """ Submit LAMMPS simulation with Slurm scheduler. """ subprocess.run(['sbatch', job_file], cwd=sim_dir) pass
5,329,453
def sort_flats(flats_unsorted: List[arimage.ARImage]): """ Sort flat images into a dictionary with "filter" as the key """ if bool(flats_unsorted) == False: return None flats = { } logger.info("Sorting flat images by filter") for flat in flats_unsorted: fl = flat.filter if fl not in flats: # Found a flat with a new filter # Create a new array in the dictionary logger.info("Found a flat with filter=" + fl) flats[fl] = [] flats[fl].append(flat) return flats
5,329,454
def run_in_parallel(function, list_of_kwargs_to_function, num_workers): """Run a function on a list of kwargs in parallel with ThreadPoolExecutor. Adapted from code by mlbileschi. Args: function: a function. list_of_kwargs_to_function: list of dictionary from string to argument value. These will be passed into `function` as kwargs. num_workers: int. Returns: list of return values from function. """ if num_workers < 1: raise ValueError( 'Number of workers must be greater than 0. Was {}'.format(num_workers)) with concurrent.futures.ThreadPoolExecutor(num_workers) as executor: futures = [] logging.info( 'Adding %d jobs to process pool to run in %d parallel ' 'threads.', len(list_of_kwargs_to_function), num_workers) for kwargs in list_of_kwargs_to_function: f = executor.submit(function, **kwargs) futures.append(f) for f in concurrent.futures.as_completed(futures): if f.exception(): # Propagate exception to main thread. raise f.exception() return [f.result() for f in futures]
5,329,455
def upload(host, key, path): """ Upload one file at a time """ url= urljoin(host, 'api/files?key=' + key) os.chdir(path[0]) f = open(path[1], 'rb') r = requests.post(url, files={"File" : f}) r.raise_for_status() return r.json()['id']
5,329,456
def show_project(project_id): """return a single project formatted according to Swagger spec""" try: project = annif.project.get_project( project_id, min_access=Access.hidden) except ValueError: return project_not_found_error(project_id) return project.dump()
5,329,457
def pollard_rho(n: int, e: int, seed: int = 2) -> int: """ Algoritmo de Pollard-Rho para realizar a quebra de chave na criptografia RSA. n - n da chave pública e - e da chave pública seed - valor base para executar o ciclo de testes """ a, b = seed, seed p = 1 while (p == 1): a = ( pow(a,2) + 1 ) % n b = ( pow(b,2) + 1 ) b = ( pow(b,2) + 1 ) % n p = gcd( abs(a-b)%n, n) if p == n: return pollard_rho(n, e, seed+1) #brutal_force(n, e,) # else: q = n // p phi = (p - 1) * (q - 1) d = find_inverse(e, phi) return d
5,329,458
def _url_from_string(url): """ Generate actual tile url from tile provider definition or template url. """ if "tileX" in url and "tileY" in url: warnings.warn( "The url format using 'tileX', 'tileY', 'tileZ' as placeholders " "is deprecated. Please use '{x}', '{y}', '{z}' instead.", FutureWarning, ) url = ( url.replace("tileX", "{x}").replace("tileY", "{y}").replace("tileZ", "{z}") ) return {"url": url}
5,329,459
def display_es( data: pd.DataFrame, ticker: str = "", use_mean: bool = False, distribution: str = "normal", percentile: float = 0.999, portfolio: bool = False, ): """Displays expected shortfall Parameters ---------- data: pd.DataFrame stock dataframe use_mean: if one should use the stocks mean return ticker: str ticker of the stock distribution: str choose distribution to use: logistic, laplace, normal percentile: int es percentile portfolio: bool If the data is a portfolio """ es_list, hist_es_list = qa_model.get_es( data, use_mean, distribution, percentile, portfolio ) str_hist_label = "Historical ES:" if distribution == "laplace": str_es_label = "Laplace ES:" str_title = "Laplace " elif distribution == "student_t": str_es_label = "Student-t ES" str_title = "Student-t " elif distribution == "logistic": str_es_label = "Logistic ES" str_title = "Logistic " else: str_es_label = "ES:" str_title = "" if ticker != "": ticker += " " data_dictionary = {str_es_label: es_list, str_hist_label: hist_es_list} data = pd.DataFrame( data_dictionary, index=["90.0%", "95.0%", "99.0%", f"{percentile*100}%"] ) print_rich_table( data, show_index=True, headers=list(data.columns), title=f"[bold]{ticker}{str_title}Expected Shortfall[/bold]", floatfmt=".4f", ) console.print("")
5,329,460
def get_reverse_dns(ip_address: str) -> str: """Does a reverse DNS lookup and returns the first IP""" try: rev = socket.gethostbyaddr(ip_address) if rev: return rev[0] return "" # noqa except (socket.herror, socket.gaierror, TypeError, IndexError): return ""
5,329,461
def update_sca(): """ 根据SCA数据库,更新SCA记录信息 :return: """ logger.info(f'SCA离线检测开始') try: assets = Asset.objects.all() if assets.values('id').count() == 0: logger.info('dependency is empty') return step = 20 start = 0 while True: asset_steps = assets[start:(start + 1) * step] if len(asset_steps) == 0: break for asset in asset_steps: update_fields = list() signature = asset.signature_value maven_model = ScaMavenDb.objects.filter(sha_1=signature).values('aql').first() if maven_model is not None and asset.package_name != maven_model['aql']: logger.info('update dependency name') asset.package_name = maven_model['aql'] update_fields.append('package_name') aids = ScaMavenArtifact.objects.filter(signature=signature).values("aid") vul_count = len(aids) levels = ScaVulDb.objects.filter(id__in=aids).values('vul_level') level = 'info' if len(levels) > 0: levels = [_['vul_level'] for _ in levels] if 'high' in levels: level = 'high' elif 'high' in levels: level = 'high' elif 'medium' in levels: level = 'medium' elif 'low' in levels: level = 'low' else: level = 'info' new_level = IastVulLevel.objects.get(name=level) if asset.level != new_level: asset.level = IastVulLevel.objects.get(name=level) update_fields.append('level') if asset.vul_count != vul_count: asset.vul_count = vul_count update_fields.append('vul_count') if len(update_fields) > 0: logger.info(f'update dependency fields: {update_fields}') asset.save(update_fields=update_fields) start = start + 1 logger.info('SCA离线检测完成') except Exception as e: logger.error(f'SCA离线检测出错,错误原因:{e}')
5,329,462
def test_1D_180to180_to_0to360(da_1D): """Tests that a 1D -180to180 grid converts to 0to360.""" data = da_1D(degreesEast=False) converted = convert_lon(data) lonmin = converted.lon.min() lonmax = converted.lon.max() # Checks that it was appropriately converted, not going below 0 or above 360. assert (lonmin >= 0) & (lonmax <= 360) # Checks that data isn't changed. assert np.allclose(data.mean(), converted.mean())
5,329,463
def abs_path(file_path): """ Returns the absolute path from the file that calls this function to file_path. Needed to access other files within aide_gui when initialized by aide. Parameters ---------- file_path: String The relative file path from the file that calls this function. """ return join(dirname(abspath(__file__)), file_path)
5,329,464
def function(x: np.ndarray) -> float: """The ellipse function is x0^2 + 2 * x1^2 + 3 * x2^2 + ...""" return np.linalg.norm(np.sqrt(np.arange(1, 1 + len(x))) * x) ** 2
5,329,465
def get_permission(certificate_authority_arn: Optional[str] = None, principal: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPermissionResult: """ Permission set on private certificate authority :param str certificate_authority_arn: The Amazon Resource Name (ARN) of the Private Certificate Authority that grants the permission. :param str principal: The AWS service or identity that receives the permission. At this time, the only valid principal is acm.amazonaws.com. """ __args__ = dict() __args__['certificateAuthorityArn'] = certificate_authority_arn __args__['principal'] = principal if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:acmpca:getPermission', __args__, opts=opts, typ=GetPermissionResult).value return AwaitableGetPermissionResult(
5,329,466
def replace_with_encoded_bits(one_hot_matrix, enum_val, add_value, last_col_index): """ Generate encoded bits for a categorical data value using one hot encoding. :param one_hot_matrix: matrix representing the encoding of categorical data value to 1-hot encoding :param enum_val: categorical data value, could be np.nan :param add_value: set to 1 if a reference value is needed in addition to 1-hot encoding :param last_col_index: index into encoding for np.nan if exists :return: vector representing the encoded values for a enum value """ if np.isnan(enum_val): # if data value is np.nan return one_hot_matrix[last_col_index] else: return one_hot_matrix[int(enum_val-add_value)]
5,329,467
def cosine_similarity(n_co_elements, n_first_element, n_second_element): """ Description A function which returns the cosine similarity between two elements. Arguments :param n_co_elements: Number of co-elements. :type n_co_elements: int :param n_first_element: Size of the first element. :type n_first_element: int :param n_second_element: Size of the second element :type n_second_element: int """ try: return n_co_elements / (sqrt(n_first_element) * sqrt(n_second_element)) except ZeroDivisionError: return 0
5,329,468
def AddForwardEulerDynamicsConstraint(mp, A, B, x, u, xnext, dt): """ Add a dynamics constraint to the given Drake mathematical program mp, represinting the euler dynamics: xnext = x + (A*x + B*u)*dt, where x, u, and xnext are symbolic variables. """ n = A.shape[0] Aeq = np.hstack([ (np.eye(n)+A*dt), B*dt, -np.eye(n) ]) beq = np.zeros((n,1)) xeq = np.hstack([ x, u, xnext])[np.newaxis].T return mp.AddLinearEqualityConstraint(Aeq,beq,xeq)
5,329,469
def test_variant_flexiblerollout_stickiness_100_customfield_112(unleash_client): """ Feature.flexible.rollout.custom.stickiness_100 and customField=112 yields yellow """ # Set up API responses.add(responses.POST, URL + REGISTER_URL, json={}, status=202) responses.add(responses.GET, URL + FEATURES_URL, json=json.loads(MOCK_JSON), status=200) responses.add(responses.POST, URL + METRICS_URL, json={}, status=202) # Tests unleash_client.initialize_client() context = { 'customField': "112" } expected_result = { "name": "yellow", "payload": { "type": "string", "value": "val1" }, "enabled": True } actual_result = unleash_client.get_variant("Feature.flexible.rollout.custom.stickiness_100", context) assert actual_result == expected_result
5,329,470
async def light_pure_rgb_msg_fixture(hass): """Return a mock MQTT msg with a pure rgb light actuator message.""" light_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/light_pure_rgb.json") ) message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"]) message.encode() return message
5,329,471
def make_header_names_thesaurus(header_names_thesaurus_file=HEADER_NAMES_THESAURUS_FILE): """ Get a dict mapping ideal domain-specific phrases to list of alternates. Parameters ---------- header_names_thesaurus_file : str Filepath. Returns ------- Dict of {'ideal phrase': ['alt_phrase0', 'alt_phrase1', ...]}. """ with open(header_names_thesaurus_file, 'rbU') as f: f.readline() # skip headers csvreader = csv.reader(f) header_names_thesaurus = {} for row in csvreader: header_primary_name = row[0] header_names_thesaurus[header_primary_name] = [x.lower().rstrip() for x in filter(None,row)] return header_names_thesaurus
5,329,472
def split_train_valid_test(adata_here, training_proportion=0.6, validation_proportion=0.2, test_proportion=0.2, rng=None,copy_adata=False): """Split cells into training, validation and test """ assert training_proportion<=1.0 assert validation_proportion<=1.0 assert test_proportion<=1.0 assert (training_proportion+validation_proportion+test_proportion)<=1.0 num_examples=adata_here.n_obs if rng==None: idx_shuff=np.random.RandomState(seed=77).permutation(range(num_examples)) else: idx_shuff=rng.permutation(range(num_examples)) training_threshold=int(num_examples*training_proportion) validation_threshold=int(num_examples*(training_proportion+validation_proportion)) training=range(training_threshold) validation=range(training_threshold,min(validation_threshold,num_examples)) test=range(validation_threshold,num_examples) #make obs with train, validation, test train_test_df=pd.DataFrame({'cell':adata_here.obs_names, 'train_valid_test':'train'},index=adata_here.obs_names) train_test_df=train_test_df.iloc[idx_shuff,:] train_test_df.iloc[training,1]='train' train_test_df.iloc[validation,1]='valid' train_test_df.iloc[test,1]='test' print('splitting',train_test_df.loc[adata_here.obs_names,'train_valid_test'].value_counts()) return(train_test_df.loc[adata_here.obs_names,'train_valid_test'])
5,329,473
def get_stock_ledger_entries(previous_sle, operator=None, order="desc", limit=None, for_update=False, debug=False, check_serial_no=True): """get stock ledger entries filtered by specific posting datetime conditions""" conditions = " and timestamp(posting_date, posting_time) {0} timestamp(%(posting_date)s, %(posting_time)s)".format(operator) if previous_sle.get("warehouse"): conditions += " and warehouse = %(warehouse)s" elif previous_sle.get("warehouse_condition"): conditions += " and " + previous_sle.get("warehouse_condition") if check_serial_no and previous_sle.get("serial_no"): # conditions += " and serial_no like {}".format(frappe.db.escape('%{0}%'.format(previous_sle.get("serial_no")))) serial_no = previous_sle.get("serial_no") conditions += (""" and ( serial_no = {0} or serial_no like {1} or serial_no like {2} or serial_no like {3} ) """).format(frappe.db.escape(serial_no), frappe.db.escape('{}\n%'.format(serial_no)), frappe.db.escape('%\n{}'.format(serial_no)), frappe.db.escape('%\n{}\n%'.format(serial_no))) if not previous_sle.get("posting_date"): previous_sle["posting_date"] = "1900-01-01" if not previous_sle.get("posting_time"): previous_sle["posting_time"] = "00:00" if operator in (">", "<=") and previous_sle.get("name"): conditions += " and name!=%(name)s" return frappe.db.sql(""" select *, timestamp(posting_date, posting_time) as "timestamp" from `tabStock Ledger Entry` where item_code = %%(item_code)s and is_cancelled = 0 %(conditions)s order by timestamp(posting_date, posting_time) %(order)s, creation %(order)s %(limit)s %(for_update)s""" % { "conditions": conditions, "limit": limit or "", "for_update": for_update and "for update" or "", "order": order }, previous_sle, as_dict=1, debug=debug)
5,329,474
def test_license(client): """ GIVEN a user who wants to visit the license page WHEN he access the page THEN assert the right page is sent """ response = client.get("/license/") assert response.status_code == 200 assertTemplateUsed(response, "core/license.html")
5,329,475
def init_ext_scorer(language_model_path, vocab_list, beam_alpha=5, beam_beta=1): """Initialize the external scorer. :param beam_alpha: Parameter associated with language model. :type beam_alpha: float :param beam_beta: Parameter associated with word count. :type beam_beta: float :param language_model_path: Filepath for language model. If it is empty, the external scorer will be set to None, and the decoding method will be pure beam search without scorer. :type language_model_path: basestring|None :param vocab_list: List of tokens in the vocabulary, for decoding. :type vocab_list: list """ if language_model_path != '': _ext_scorer = Scorer(beam_alpha, beam_beta, language_model_path, vocab_list) lm_char_based = _ext_scorer.is_character_based() lm_max_order = _ext_scorer.get_max_order() lm_dict_size = _ext_scorer.get_dict_size() else: _ext_scorer = None
5,329,476
def index(request): """ Main index. Editor view. """ # Render editor body = render_to_string('editor.html', {}) data = { 'body': body } # Render page layout return render(request, 'index.html', data)
5,329,477
def isUsdExt(ext): """ Check if the given extension is an expected USD file extension. :Parameters: ext : `str` :Returns: If the file extension is a valid USD extension :Rtype: `bool` """ return ext.lstrip('.') in USD_EXTS
5,329,478
def _get_dflt_lexicon(a_pos, a_neg): """Generate default lexicon by putting in it terms from seed set. @param a_pos - set of positive terms @param a_neg - set of negative terms @return list(3-tuple) - list of seed set terms with uniform scores and polarities """ return [(w, POSITIVE, 1.) for w in a_pos] \ + [(w, NEGATIVE, -1.) for w in a_neg]
5,329,479
def process_dst_overwrite_args(src, dst=None, overwrite=True, src_to_dst_func=None): """ Check when overwrite is not allowed, whether the destination exists. """ src = os.path.abspath(src) if dst is None: dst = src_to_dst_func(src) if not overwrite: if os.path.exists(dst): raise EnvironmentError( "output path '%s' already exists.." % dst) return src, dst
5,329,480
def loft(*args, **kwargs): """ This command computes a skinned (lofted) surface passing through a number of NURBS curves. Returns: `string[]` Object name and node name """ pass
5,329,481
def rct(target_t : Tensor, source_t : Tensor, target_mask_t : Tensor = None, source_mask_t : Tensor = None, mask_cutoff = 0.5) -> Tensor: """ Transfer color using rct method. arguments target_t Tensor( [N]CHW ) C==3 (BGR) float16|32 source_t Tensor( [N]CHW ) C==3 (BGR) float16|32 target_mask_t(None) Tensor( [N]CHW ) C==1|3 float16|32 target_source_t(None) Tensor( [N]CHW ) C==1|3 float16|32 reference: Color Transfer between Images https://www.cs.tau.ac.il/~turkel/imagepapers/ColorTransfer.pdf """ if target_t.ndim != source_t.ndim: raise ValueError('target_t.ndim != source_t.ndim') if target_t.ndim == 3: ch_axis = 0 spatial_axes = (1,2) else: ch_axis = 1 spatial_axes = (2,3) target_t = cvt_color(target_t, 'BGR', 'LAB', ch_axis=ch_axis) source_t = cvt_color(source_t, 'BGR', 'LAB', ch_axis=ch_axis) target_stat_t = target_t if target_mask_t is not None: target_stat_t = any_wise('O = I0*(I1 >= I2)', target_stat_t, target_mask_t, np.float32(mask_cutoff) ) source_stat_t = source_t if source_mask_t is not None: source_stat_t = any_wise('O = I0*(I1 >= I2)', source_stat_t, source_mask_t, np.float32(mask_cutoff) ) target_stat_mean_t, target_stat_var_t = moments(target_stat_t, axes=spatial_axes) source_stat_mean_t, source_stat_var_t = moments(source_stat_t, axes=spatial_axes) target_t = any_wise(f""" O_0 = clamp( (I0_0 - I1_0) * sqrt(I2_0) / sqrt(I3_0) + I4_0, 0.0, 100.0); O_1 = clamp( (I0_1 - I1_1) * sqrt(I2_1) / sqrt(I3_1) + I4_1, -127.0, 127.0); O_2 = clamp( (I0_2 - I1_2) * sqrt(I2_2) / sqrt(I3_2) + I4_2, -127.0, 127.0); """, target_t, target_stat_mean_t, source_stat_var_t, target_stat_var_t, source_stat_mean_t, dim_wise_axis=ch_axis) return cvt_color(target_t, 'LAB', 'BGR', ch_axis=ch_axis)
5,329,482
def random_policy(num_actions): """ Returns a policy where all actions have equal probabilities, i.e., an uniform distribution. """ return np.zeros((num_actions,)) + 1 / num_actions
5,329,483
def create_branch_switches(net): """ Changes bus-bus switches with auxiliary buses into bus-branch switches and drops all auxiliary buses. """ # initialize DataFrame to store the indices of auxiliary buses ("aux_buses"), the switch indices # the auxiliary buses are connected to ("idx_switch"), the bus indices which are connected to # auxiliary buses via the switches("connected_buses"), the element type the auxiliary buses are # connected to ("et") and the element the auxiliary buses are connected to ("element") aux_bus_df = pd.DataFrame([], columns=["idx_switch", "aux_buses", "connected_buses", "et", "element"]) # determine the bus indices of all auxiliary buses all_aux_buses = net.bus.index[net.bus.type == "auxiliary"] # determine the switch indices which are connected to auxiliary buses aux_bus_df["idx_switch"] = net.switch.index[net.switch.element.isin(all_aux_buses)] # determine the auxiliary bus indices of the switches aux_bus_df["aux_buses"] = net.switch.element.loc[aux_bus_df["idx_switch"]].values # determine the indices of the buses which are connected to auxiliary buses via switches aux_bus_df["connected_buses"] = net.switch.bus.loc[aux_bus_df["idx_switch"]].values # determine the element types and element indices which are connected to auxiliary buses for branch, bus_types in zip(["trafo", "line"], [["hv_bus", "lv_bus"], ["from_bus", "to_bus"]]): for bus_type in bus_types: current_branch_bus_type_buses = net[branch][bus_type].astype(int) aux_buses_are_cbbtb = aux_bus_df["aux_buses"].isin(current_branch_bus_type_buses) current_branch_bus_types_aux_buses = aux_bus_df["aux_buses"][aux_buses_are_cbbtb].values aux_bus_df["element"].loc[aux_buses_are_cbbtb] = current_branch_bus_type_buses.index[ idx_in_2nd_array(current_branch_bus_types_aux_buses, current_branch_bus_type_buses.values)] # requirement: only one # switch per aux bus aux_bus_df["et"].loc[aux_buses_are_cbbtb] = branch[0] # replace auxiliary buses in line and trafo tables net[branch][bus_type].loc[aux_bus_df["element"].loc[aux_buses_are_cbbtb]] = aux_bus_df[ "connected_buses"].loc[aux_buses_are_cbbtb].values if pd.isnull(aux_bus_df).any().any(): logger.error("Auxiliary bus replacement fails.") # replace auxiliary buses in switch table by branch elements net.switch["et"].loc[aux_bus_df["idx_switch"]] = aux_bus_df["et"].values net.switch["element"].loc[aux_bus_df["idx_switch"]] = np.array( aux_bus_df["element"].values, dtype=int) # drop all auxiliary buses net.bus.drop(aux_bus_df["aux_buses"], inplace=True) idx_in_res_bus = aux_bus_df["aux_buses"][aux_bus_df["aux_buses"].isin(net.res_bus.index)] net.res_bus.drop(idx_in_res_bus, inplace=True) idx_in_bus_geodata = aux_bus_df["aux_buses"][aux_bus_df["aux_buses"].isin( net.bus_geodata.index)] net.bus_geodata.drop(idx_in_bus_geodata, inplace=True)
5,329,484
def _check_no_miscalled_stubs(all_calls: Sequence[BaseSpyCall]) -> None: """Ensure every call matches a rehearsal, if the spy has rehearsals.""" all_calls_by_id: Dict[int, List[BaseSpyCall]] = {} for call in all_calls: spy_id = call.spy_id spy_calls = all_calls_by_id.get(spy_id, []) all_calls_by_id[spy_id] = spy_calls + [call] for spy_id, spy_calls in all_calls_by_id.items(): unmatched: List[SpyCall] = [] for index, call in enumerate(spy_calls): past_stubs = [ wr for wr in spy_calls[0:index] if isinstance(wr, WhenRehearsal) ] matched_past_stubs = [wr for wr in past_stubs if match_call(call, wr)] matched_future_verifies = [ vr for vr in spy_calls[index + 1 :] if isinstance(vr, VerifyRehearsal) and match_call(call, vr) ] if ( isinstance(call, SpyCall) and len(past_stubs) > 0 and len(matched_past_stubs) == 0 and len(matched_future_verifies) == 0 ): unmatched = unmatched + [call] if index == len(spy_calls) - 1: warn(MiscalledStubWarning(calls=unmatched, rehearsals=past_stubs)) elif isinstance(call, WhenRehearsal) and len(unmatched) > 0: warn(MiscalledStubWarning(calls=unmatched, rehearsals=past_stubs)) unmatched = []
5,329,485
def find_object(func, name, *args, **kwargs): """Locate an object by name or identifier This function will use the `name` argumetn to attempt to locate an object. It will first attempt to find the object by identifier and if that fails, it will attempt to find the object by name. Since object names are non-unique values in the Pureport API, this function will return the first value it finds in the case of multiple objects. If the requested object can not be found, this function will raise an exception. :param name: The name or identifier of the object to locate :type name: str :returns: An instance of the object found :rtype: `pureport.models.Model` :raises: `pureport.exceptions.PureportError` """ objects = func(*args, **kwargs) match = None name_matches = list() for item in objects: if name == item.id: match = item break elif name == item.name: name_matches.append(item) else: if not name_matches: raise PureportError("could not locate object `{}`".format(name)) if match is None: match = first(name_matches) return match
5,329,486
def load_inputs(mod, switch_data, inputs_dir): """ Import battery data from a .dat file. TODO: change this to allow multiple storage technologies. """ switch_data.load(filename=os.path.join(inputs_dir, 'batteries.dat'))
5,329,487
def mkdir(path): """ Make a directory, if the parent directory exists. """ path = abspath(path, fse.get_working().get_full_path()) parent_path, d = os.path.split(path) parent = fse.find_dir(parent_path) if parent: entry = fse.create(name=d, parent=parent, depth=parent.depth+1, is_directory=True) return f'{path} created.' return f'{parent_path} does not exist.'
5,329,488
def _get_cognitive_services_client() -> ImageSearchClient: """Get the cognitive service client to run the searches against. Ensure there is a COGNITIVE_KEY and COGNITIVE_ENDPOINT configured in your app setting for the function, or your local.settings.json file when running locally. Returns ------- client: ImageSearchClient Cognitive service client """ subscription_key = os.environ.get('COGNITIVE_KEY') subscription_endpoint = os.environ.get('COGNITIVE_ENDPOINT') client = ImageSearchClient(endpoint=subscription_endpoint, credentials=CognitiveServicesCredentials(subscription_key)) return client
5,329,489
def getZeroPadding(path): """Get original zero padding, so can be re-added.""" files = listVisibleFiles(path) zero_padding = len(getNumSubString(os.path.splitext(files[0])[0])) return zero_padding
5,329,490
def weld_segments(gdf_line_net, gdf_line_gen, gdf_line_houses, debug_plotting=False): """Weld continuous line segments together and cut loose ends. This is a public function that recursively calls the internal function weld_line_segments_(), until the problem cannot be simplified further. Find all lines that only connect to one other line and connect those to a single MultiLine object. Points that connect to Generators and Houses are not simplified. Loose ends are shortened where possible. Parameters ---------- gdf_line_net : GeoDataFrame Potential pipe network. gdf_line_gen : GeoDataFrame Generators that need to be connected. gdf_line_houses : GeoDataFrame Houses that need to be connected. debug_plotting : bool, optional Plot the selection process. Returns ------- gdf_line_net_new : GeoDataFrame Simplified potential pipe network. """ gdf_line_net_last = gdf_line_net gdf_line_net_new = _weld_segments(gdf_line_net, gdf_line_gen, gdf_line_houses, debug_plotting) # Now do all of this recursively while len(gdf_line_net_new) < len(gdf_line_net_last): logging.info('Welding lines... reduced from {} to {} lines'.format( len(gdf_line_net_last), len(gdf_line_net_new))) gdf_line_net_last = gdf_line_net_new gdf_line_net_new = _weld_segments(gdf_line_net_new, gdf_line_gen, gdf_line_houses, debug_plotting) return gdf_line_net_new
5,329,491
def CreateHSpline(points, multiple=False): """ Construct an H-spline from a sequence of interpolation points Args: points (IEnumerable<Point3d>): Points to interpolate """ url = "rhino/geometry/nurbscurve/createhspline-point3darray" if multiple: url += "?multiple=true" args = [points] if multiple: args = [[item] for item in points] response = Util.ComputeFetch(url, args) response = Util.DecodeToCommonObject(response) return response
5,329,492
def SetRoleStageIfAlpha(role): """Set the role stage to Alpha if None. Args: role: A protorpc.Message of type Role. """ if role.stage is None: role.stage = StageTypeFromString('alpha')
5,329,493
def get_instance_embedding_loss(embedding, instance_loss_type, instance_labels, crop_area, crop_min_height, num_samples=10, similarity_strategy='dotproduct', loss_strategy='softmax'): """Returns the instance embedding loss based on instance_loss_type. Args: embedding: A tf.float32 tensor of size [height, width, dims] or [batch_size, height, width, dims]. instance_loss_type: A string containing the type of the embedding loss. instance_labels: A tf.int32 tensor of size [height, width] or [batch_size, heigh, width] containing instance ids. Assumed values in target start from 0 and cover 0 to N-1. crop_area: Area of the crop window. Only used in some cases of embedding loss. crop_min_height: Minimum height of the crop window. Only used in some cases of embedding loss. num_samples: Number of samples. Only used in some cases of embedding loss. similarity_strategy: Defines the method for computing similarity between embedding vectors. Possible values are 'dotproduct' and 'distance'. loss_strategy: Defines the type of loss including 'softmax' or 'sigmoid'. Returns: Instance embedding loss. Raises: ValueError: If instance loss type is not known. """ # Handling the case where there is a batch size. embedding_shape = embedding.get_shape().as_list() if len(embedding_shape) == 4: num_batches = embedding_shape[0] losses = [] embedding_list = tf.unstack(embedding) instance_label_list = tf.unstack(instance_labels) for i in range(num_batches): embedding_i = embedding_list[i] instance_labels_i = instance_label_list[i] loss = get_instance_embedding_loss(embedding_i, instance_loss_type, instance_labels_i, crop_area, crop_min_height, num_samples, similarity_strategy, loss_strategy) losses.append(loss) return tf.reduce_mean(tf.stack(losses)) if instance_loss_type == 'npair': return instance_embedding_npair_loss( embedding=embedding, instance_labels=instance_labels, crop_min_height=crop_min_height, crop_area=crop_area, similarity_strategy=similarity_strategy, loss_strategy=loss_strategy) elif instance_loss_type == 'npair_r_c': return instance_embedding_npair_random_center_loss( embedding=embedding, instance_labels=instance_labels, similarity_strategy=similarity_strategy, loss_strategy=loss_strategy) elif instance_loss_type == 'npair_r_c_r_s': return instance_embedding_npair_random_center_random_sample_loss( embedding=embedding, instance_labels=instance_labels, num_samples=num_samples, similarity_strategy=similarity_strategy, loss_strategy=loss_strategy) elif instance_loss_type == 'npair_r_s': return instance_embedding_npair_random_sample_loss( embedding=embedding, instance_labels=instance_labels, num_samples=num_samples, similarity_strategy=similarity_strategy, loss_strategy=loss_strategy) elif instance_loss_type == 'iou': return instance_embedding_iou_loss( embedding=embedding, instance_labels=instance_labels, num_samples=num_samples, similarity_strategy=similarity_strategy) else: raise ValueError('Instance loss type is not known')
5,329,494
def generate_ordered_map_to_left_streamed(left: Field, right: Field, l_result: Field, r_result: Field, invalid: Union[np.int32, np.int64], chunksize: Optional[int] = 1 << 20, rdtype=np.int32): """ This function performs the most generic type of left to right mapping calculation in which both key fields can have repeated key values. At its heart, the function generates a mapping from left to right that can then be used to map data in the right space to data in the left space. Note that this can also be used to generate the inverse mapping my simply flipping left and right collections. As the Fields ``left`` and ``right`` can contain arbitrarily long sequences of data, the data is streamed through the algorithm in a series of chunks. Similarly, the resulting map is written to a buffer that is written to the ``result`` field in chunks. This streamed function makes a sequence of calls to a corresponding _partial function that does the heavy lifting. Inside the _partial function, a finite state machine (FSM) iterates over the data, performing the mapping. The _partial function call exits whenever any of the chunks (``left_``, ``right_`` or ``result_`` that it is passed become exhausted. Please take a look at the documentation for the partial function to understand the finite state machine parameters to understand that role that the various parameters play. We have to make some adjustments to the finite state machine between calls to _partial: * if the call used all the ``left_`` data, add the size of that data chunk to ``i_off`` * if the call used all of the ``right_`` data, add the size of that data chunk to ``j_off`` * write the accumulated ``result_`` data to the `result`` field, and reset ``r`` to 0 """ # the collection of variables that make up the finite state machine for the calls to # partial i_off, j_off, i, j, r, ii, jj, ii_max, jj_max, inner = 0, 0, 0, 0, 0, 0, 0, -1, -1, False l_result_ = np.zeros(chunksize, dtype=rdtype) r_result_ = np.zeros(chunksize, dtype=rdtype) l_chunk, left_, i_max, i_off, i = first_trimmed_chunk(left, chunksize) r_chunk, right_, j_max, j_off, j = first_trimmed_chunk(right, chunksize) while i + i_off < len(left) and j + j_off < len(right): i, j, r, ii, jj, ii_max, jj_max, inner = \ generate_ordered_map_to_left_partial(left_, i_max, right_, j_max, l_result_, r_result_, invalid, i_off, j_off, i, j, r, ii, jj, ii_max, jj_max, inner) # update the left chunk if necessary if i_off + i < len(left) and i >= l_chunk[1] - l_chunk[0]: l_chunk, left_, i_max, i_off, i = next_trimmed_chunk(left, l_chunk, chunksize) # update the right chunk if necessary if j_off + j < len(right) and j >= r_chunk[1] - r_chunk[0]: r_chunk, right_, j_max, j_off, j = next_trimmed_chunk(right, r_chunk, chunksize) # write the result buffer if r > 0: l_result.data.write_part(l_result_[:r]) r_result.data.write_part(r_result_[:r]) r = 0 while i + i_off < len(left): i, r = generate_ordered_map_to_left_remaining(i_max, l_result_, r_result_, i_off, i, r, invalid) # update which part of left we are writing for; note we don't need to fetch the data # itself as we are mapping left on a 1:1 basis for the rest of its length l_chunk = next_chunk(l_chunk[1], len(left), chunksize) i_max = l_chunk[1] - l_chunk[0] i_off = l_chunk[0] i = 0 # write the result buffer if r > 0: l_result.data.write_part(l_result_[:r]) r_result.data.write_part(r_result_[:r]) r = 0 l_result.data.complete() r_result.data.complete()
5,329,495
def str_to_array(value): """ Check if value can be parsed to a tuple or and array. Because Spark can handle tuples we will try to transform tuples to arrays :param value: :return: """ try: if isinstance(literal_eval((value.encode('ascii', 'ignore')).decode("utf-8")), (list, tuple)): return True except (ValueError, SyntaxError,): pass
5,329,496
def get_current_func_name(): """for python version greater than equal to 2.7""" return inspect.stack()[1][3]
5,329,497
def test_pseudocolor(debug, axes, tmpdir, visualize_test_data): """Test for PlantCV.""" # Create a tmp directory cache_dir = tmpdir.mkdir("cache") params.debug_outdir = cache_dir # Input image img = cv2.imread(visualize_test_data.small_bin_img, -1) r, c = img.shape # generate "bad" pixels mask_bad = np.zeros((r, c), dtype=np.uint8) mask_bad[0:1, 0:1] = 255 # Debug mode params.debug = debug pseudo_img = pseudocolor(gray_img=img, mask=None, title="Pseudocolored image", axes=axes, bad_mask=mask_bad) # Assert the output is a matplotlib figure assert isinstance(pseudo_img, Figure)
5,329,498
def getParmNames(parmsDef): """Return a list of parm names in a model parm definition parmsDef: list of tuples, each tuple is a list of parms and a time constraint. Call with modelDict[modelname]['Parms]. Returns: List of string parameter names Here's an example of how to remove unused parms from Fcst, this can run in localConfig: parmsToRemove=[] for p in getParmNames(modelDict['Fcst']): pl=p.lower() for t in ['period','swell','wave','surf', 'surge']: if t in pl: parmsToRemove.append(p) break removeParms(modelDict,'Fcst',parmsToRemove) """ result=[] for pList,tc in parmsDef: # p is the parmDef tuple where first item is the parm name newParms=[p[0] for p in pList] result+=newParms return sorted(result)
5,329,499