content
stringlengths
22
815k
id
int64
0
4.91M
def saveCameraTXT( path, camera ): """ Saves a Camera view to a simple text format. *Arguments*: - path = the path to save the file. - camera = a camera object containing the data to save. """ # make directories if need be makeDirs( path ) with open(path,'w') as f: f.write('%.3f,%.3f,%.3f #camera position\n' % (camera.pos[0],camera.pos[1],camera.pos[2])) f.write('%.3f,%.3f,%.3f #camera ori\n' % (camera.ori[0], camera.ori[1], camera.ori[2])) f.write('%s #project type\n' % camera.proj) f.write('%.3f #vertical field of view (deg)\n' % camera.fov) f.write('%d,%d #dims\n' % (camera.dims[0],camera.dims[1])) if 'pano' in camera.proj.lower(): f.write('%.3f # angular pitch (x)\n' % camera.step)
31,800
def save_graphlet_document(gexf_fh, gidx, num_graphlets, samplesize, cooccurence_corpus): """Saves the induced graphlet patterns into dataset folder, it is only used in conjunction with graphlet_corpus() Parameters ---------- gexf_fh : str path to gexf file of graph gidx : int integer id of the graph, typically matches number in gexf_fh, if using TU Dortmund benchmark datasets num_graphlets : int size of graphlet patterns induced samplesize : int number of graphlet patterns sampled from graph cooccurrence_corpus : list list of graphlet patterns induced under cooccurrence rules, in this case all graphlets immediately adjacent to an induced graphlet pattern. Returns ------- None : None The decomposition algorithm will induce graphlet patterns for graphs recording the dataset/"global" vocabulary of patterns within a dictionary. The graph and its associated patterns (by IDs given through our hash function) are saved into a <graphid>.wldr<depth> file which contains a line delimited list of all the substructure pattern ids. """ open_fname = gexf_fh + ".graphlet" + "_ng_" + str(num_graphlets) + "_ss_" + str(samplesize) with open(open_fname,'w') as fh: for graphlet_neighbourhood in cooccurence_corpus: sentence = str.join(" ", map(str, graphlet_neighbourhood)) print (sentence, file=fh)
31,801
def frechet_distance(real, fake): """Frechet distance. Lower score is better. """ n = real.shape[0] mu1, sigma1 = np.mean(real, axis=0), np.cov(real.reshape(n, -1), rowvar=False) mu2, sigma2 = np.mean(fake, axis=0), np.cov(fake.reshape(n, -1), rowvar=False) diff = mu1 - mu2 covmean, _ = scipy.linalg.sqrtm(sigma1.dot(sigma2), disp=False) if not np.isfinite(covmean).all(): msg = ( "fid calculation produces singular product; " "adding %s to diagonal of cov estimates" ) % eps print(msg) offset = np.eye(sigma1.shape[0]) * eps covmean = scipy.linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) # Numerical error might give slight imaginary component if np.iscomplexobj(covmean): if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): m = np.max(np.abs(covmean.imag)) raise ValueError("Imaginary component {}".format(m)) covmean = covmean.real assert np.isfinite(covmean).all() and not np.iscomplexobj(covmean) tr_covmean = np.trace(covmean) frechet_dist = diff.dot(diff) frechet_dist += np.trace(sigma1) + np.trace(sigma2) frechet_dist -= 2 * tr_covmean return frechet_dist
31,802
def get_tenants(zuul_url): """ Fetch list of tenant names """ is_witelabel = requests.get( "%s/info" % zuul_url).json().get('tenant', None) is not None if is_witelabel: raise RuntimeError("Need multitenant api") return [ tenant["name"] for tenant in requests.get("%s/tenants" % zuul_url).json() ]
31,803
def ant(): """Configuration for MuJoCo's ant task.""" locals().update(default()) # Environment env = 'Ant-v2' max_length = 1000 steps = 2e7 # 20M return locals()
31,804
def docker_run(task, image, pull_image=True, entrypoint=None, container_args=None, volumes=None, remove_container=True, **kwargs): """ This task runs a docker container. For details on how to use this task, see the :ref:`docker-run` guide. :param task: The bound task reference. :type task: :py:class:`girder_worker.task.Task` :param image: The docker image identifier. :type image: str :param pull_image: Whether to explicitly pull the image prior to running the container. :type pull_image: bool :param entrypoint: Alternative entrypoint to use when running the container. :type entrypoint: str :param container_args: Arguments to pass to the container. :type container_args: list :param volumes: Volumes to expose to the container. :type volumes: dict :param remove_container: Whether to delete the container after the task is done. :type remove_container: bool :return: Fulfilled result hooks. :rtype: list """ return _docker_run( task, image, pull_image, entrypoint, container_args, volumes, remove_container, **kwargs)
31,805
def test_constants_are_floats(): """Check that known color constants dont have integers in them""" from distinctipy import colorsets def _assert_colors_are_floats(colors): for color in colors: r, g, b = color assert isinstance(r, float) assert isinstance(g, float) assert isinstance(b, float) for name in colorsets.list_colorsets(): colors = colorsets.get_colors(name) _assert_colors_are_floats(colors) _assert_colors_are_floats(distinctipy.CORNERS) _assert_colors_are_floats(distinctipy.POINTS_OF_INTEREST)
31,806
def get_accuracy(pred, target): """gets accuracy either by single prediction against target or comparing their codes """ if len(pred.size()) > 1: pred = pred.max(1)[1] #pred, target = pred.flatten(), target.flatten() accuracy = round(float((pred == target).sum())/float(pred.numel()) * 100, 3) return accuracy
31,807
def ifourier_transform(F,dt,n): """ See Also ------- fourier_transform """ irfft = numpy.fft.irfft shift = numpy.fft.fftshift return (1.0/dt)*shift(irfft(F,n=n))
31,808
def preprocess_labels(labels, encoder=None, categorical=True): """Encode labels with values among 0 and `n-classes-1`""" if not encoder: encoder = LabelEncoder() encoder.fit(labels) y = encoder.transform(labels).astype(np.int32) if categorical: y = np_utils.to_categorical(y) return y, encoder
31,809
async def test_spot_user_info(): """ POST 获取用户信息 访问频率 6次/2秒 api_key String 是 用户申请的apiKey sign String 是 请求参数的签名 :return: is_ok:True/False status_code:200 response: result:{'info': {'funds': {'free': {'1st': '0.9985', 账户余额 'aac': '0',}, 'freezed': {'1st': '0', 账户冻结余额 'aac': '0',}}}, 'result': True} """ data = await okex_spot_user_info(API_KEY, SECRET_KEY, ) pprint(data)
31,810
async def test_detect_up(session, mocker): """Test the detection logic, still up""" session.required_min_rx_interval = 4000 session.remote_detect_mult = 3 session.remote_min_tx_interval = 2000 session.last_rx_packet_time = time.time() session.state = aiobfd.session.STATE_UP await asyncio.sleep(((4000))/1000000) mocker.patch.object(asyncio, 'sleep', new_callable=AsyncMock) asyncio.sleep.side_effect = ErrorAfter(1) mocker.patch('aiobfd.session.log') with pytest.raises(CallableExhausted): await session.detect_async_failure() aiobfd.session.log.critical.assert_not_called() aiobfd.session.log.info.assert_not_called()
31,811
def import_minimal_log(path, parameters=None, variant=DEFAULT_VARIANT_LOG): """ Import a Parquet file (as a minimal log with only the essential columns) Parameters ------------- path Path of the file to import parameters Parameters of the algorithm, possible values: Parameters.COLUMNS -> columns to import from the Parquet file variant Variant of the algorithm, possible values: - Variants.PYARROW Returns ------------- df Pandas dataframe """ if parameters is None: parameters = {} parameters[COLUMNS] = [constants.CASE_CONCEPT_NAME, xes.DEFAULT_NAME_KEY, xes.DEFAULT_TIMESTAMP_KEY] return exec_utils.get_variant(variant).import_log(path, parameters=parameters)
31,812
async def test_app(test_client, loop): """Check availability of the feature flags collection endpoint.""" app = await build_application() client = await test_client(app) resp = await client.get('/api/v1/features/') assert resp.status == 200 text = await resp.text() assert '{"content": "feature-flags-api"}' == text
31,813
def get_input_assign(input_signal, input_value): """ Get input assignation statement """ input_assign = ReferenceAssign( input_signal, Constant(input_value, precision=input_signal.get_precision()) ) return input_assign
31,814
def remove_client_id_from_open_id_connect_provider(OpenIDConnectProviderArn=None, ClientID=None): """ Removes the specified client ID (also known as audience) from the list of client IDs registered for the specified IAM OpenID Connect (OIDC) provider resource object. This action is idempotent; it does not fail or return an error if you try to remove a client ID that does not exist. See also: AWS API Documentation :example: response = client.remove_client_id_from_open_id_connect_provider( OpenIDConnectProviderArn='string', ClientID='string' ) :type OpenIDConnectProviderArn: string :param OpenIDConnectProviderArn: [REQUIRED] The Amazon Resource Name (ARN) of the IAM OIDC provider resource to remove the client ID from. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders action. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference . :type ClientID: string :param ClientID: [REQUIRED] The client ID (also known as audience) to remove from the IAM OIDC provider resource. For more information about client IDs, see CreateOpenIDConnectProvider . """ pass
31,815
def prune_repos(region: str=None, registry_prefix: str=None, repo: str=None, current_tag: str=None, all_tags: str=None): """ Pull the image from the registry if it doesn't exist locally :param region: :param registry_prefix: :param repo: :param current_tag: :param all_tags: :return: """ # Get the tags from the all_tags JSON all_tags_list = get_tags_from_all_tags(all_tags) # Add the current_tag to the recent (local) tags. Just to be safe recent_tags = all_tags_list + [current_tag] # Get the tags for the repo from ECR ecr_tags = get_tags_from_ecr(region, repo) # Get all the tags in the registry that are *not* the ones we want bad_tags = [tag for tag in ecr_tags if tag not in recent_tags] # Delete the obsolete images for tag in bad_tags: output = get_stdout('''{AWS} ecr batch-delete-image --region {region} --repository-name {repo} --image-ids imageTag={tag}''' .format(AWS=AWS, region=region, repo=repo, tag=tag)) return True
31,816
def convert_size_bytes_to_gb(size_in_bytes): """:rtype: float""" return float(size_in_bytes) / GB
31,817
def merge_swab(survey_df, swab_df): """ Process for matching and merging survey and swab result data. Should be executed after merge with blood test result data. """ survey_antibody_swab_df, none_record_df = execute_merge_specific_swabs( survey_df=survey_df, labs_df=swab_df, barcode_column_name="swab_sample_barcode", visit_date_column_name="visit_datetime", received_date_column_name="pcr_result_recorded_datetime", void_value="Void", ) survey_antibody_swab_df = survey_antibody_swab_df.drop( "abs_offset_diff_vs_visit_hr_swab", "count_barcode_swab", "count_barcode_voyager", "diff_vs_visit_hr_swab", "pcr_flag", "time_order_flag", "time_difference_flag", ) df_all_iqvia, df_lab_residuals, df_failed_records = merge_process_filtering( df=survey_antibody_swab_df, none_record_df=none_record_df, merge_type="swab", barcode_column_name="swab_sample_barcode", lab_columns_list=[column for column in swab_df.columns if column != "swab_sample_barcode"], ) return df_all_iqvia, df_lab_residuals, df_failed_records
31,818
def confirm_space(environ, start_response): """ Confirm a spaces exists. If it does, raise 204. If not, raise 404. """ store = environ['tiddlyweb.store'] space_name = environ['wsgiorg.routing_args'][1]['space_name'] try: space = Space(space_name) store.get(Recipe(space.public_recipe())) store.get(Recipe(space.private_recipe())) except NoRecipeError: raise HTTP404('%s does not exist' % space_name) start_response('204 No Content', []) return ['']
31,819
def find_ppp_device_status(address=None, username=None): """Find device status node based on address and/or username. This is currently only used by the web UI. For the web UI this is the best guess for identifying the device related to a forced web forward; which allows the web UI to default username for user login, for instance. """ def _f1(d): return (address is not None) and (d.getS(ns.pppAddress, rdf.IPv4Address) == address) def _f2(d): return (username is not None) and (d.getS(ns.username, rdf.String) == username) # There may be multiple matching devices in corner cases, e.g. two devices # in RDF with the same IP address. License monitor reconcile process should # eliminate these discrepancies eventually but here we may still encounter # them from time to time. # # If there are multiple matching entries, we take the newest one and assume # that is the desired one. If the entries have a different username, this # may match to the wrong user. This is not critical: the web UI does not # allow the user to make any user-related changes until the user has logged # in (providing his password). This function only provides the default value # for login username. # # So: return device with latest startTime (newest connection), or first in # list if no startTime is found. [filter_ppp_device_statuses_single does this.] return filter_ppp_device_statuses_single([_f1, _f2])
31,820
def smibbler(search, plot): """ Print info fetched from omdbapi.com """ # generate search string and fetch data query = search.replace(' ', '+') request = 'http://www.omdbapi.com/?t={0}&y=&plot=short&r=json'.format(query) res = urllib2.urlopen(request) data = json.load(res) movie = Movie.Load_JSON(data, plot) if movie is None: click.echo('Movie not found') else: click.echo(movie)
31,821
def get_L_dash_prm_bath_OS_90(house_insulation_type, floor_bath_insulation): """主開口方向から時計回りに90°の方向の外気に面した浴室の土間床等の外周部の長さ (m) Args: house_insulation_type(str): 床断熱住戸'または'基礎断熱住戸' floor_bath_insulation(str): 床断熱住戸'または'基礎断熱住戸'または'浴室の床及び基礎が外気等に面していない' Returns: float: 主開口方向から時計回りに90°の方向の外気に面した浴室の土間床等の外周部の長さ (m) """ return get_table_3(38, house_insulation_type, floor_bath_insulation)
31,822
def init(): """Authorize twitter app using tweepy library""" # ensure environment variables are set if not os.environ.get("consumer_key"): raise RuntimeError("consumer_key not set") if not os.environ.get("consumer_secret"): raise RuntimeError("consumer_secret not set") auth = tweepy.OAuthHandler(os.environ.get("consumer_key"), os.environ.get("consumer_secret")) auth.set_access_token(os.environ.get("access_token"), os.environ.get("access_token_secret")) return tweepy.API(auth)
31,823
def generate_books(request, form): """ Returns a list of books. """ list_of_books = Book.generate_existing_books(form.cleaned_data['part']) return HttpResponse(json.dumps(list_of_books), content_type='application/json')
31,824
def window_reverse_4d(windows, window_size, H_q, W_q, H_s, W_s): """ Args: windows: (num_windows*B, window_size, window_size, window_size, window_size, C) window_size (int): size of window H_q (int): Height of query image W_q (int): Width of query image H_s (int): Height of support image W_s (int): Width of support image Returns: x: (B, H_q, W_q, H_s, W_s, C) """ kwargs = { 'H_q': H_q // window_size, 'W_q': W_q // window_size, 'H_s': H_s // window_size, 'W_s': W_s // window_size } x = rearrange(windows, '(B H_q W_q H_s W_s) W_1 W_2 W_3 W_4 C -> B (H_q W_1) (W_q W_2) (H_s W_3) (W_s W_4) C', **kwargs) return x
31,825
def parse_arguments(): """ Function to parse command line arguements from the user Returns ------- opts : dict command line arguements from the user """ info = 'Divides pdb info files for parallelization' parser = argparse.ArgumentParser(description=info) # program arguments parser.add_argument('-f', '--in-file', type=str, required=True, help='PDB info file to divide') parser.add_argument('-n', '--num-splits', default=1000, type=int, help='Number of splits to perform (Default: 1000)') parser.add_argument('-m', '--mut-file', type=str, required=True, help='File containing mutation information') parser.add_argument('--split-dir', default = "../data/split_pdbs/", type=str, help='Output directory for split PDB info files') args = parser.parse_args() opts = vars(args) return opts
31,826
def numeric(symbols, negative, value): """Implement the algorithm for `type: numeric`.""" if value == 0: return symbols[0] is_negative = value < 0 if is_negative: value = abs(value) prefix, suffix = negative reversed_parts = [suffix] else: reversed_parts = [] length = len(symbols) value = abs(value) while value != 0: reversed_parts.append(symbols[value % length]) value //= length if is_negative: reversed_parts.append(prefix) return ''.join(reversed(reversed_parts))
31,827
def register_extension(id, extension): """ Registers an image extension. This function should not be used in application code. :param id: An image format identifier. :param extension: An extension used for this format. """ EXTENSION[extension.lower()] = id.upper()
31,828
def test__get(): """ test automol.par.typ test automol.par.spin test automol.par.radrad """ assert automol.par.typ(RCLASS1) == ( ReactionClass.Typ.HYDROGEN_ABSTRACTION) assert automol.par.spin(RCLASS1) == ( ReactionClass.HIGHSPIN) assert not automol.par.radrad(RCLASS1) assert not automol.par.isc(RCLASS1) assert automol.par.typ(RCLASS6) == ( ReactionClass.Typ.SUBSTITUTION) assert automol.par.spin(RCLASS6) == ( ReactionClass.LOWSPIN) assert automol.par.radrad(RCLASS6) assert not automol.par.isc(RCLASS1) assert automol.par.typ(RCLASS9) == ( ReactionClass.Typ.ADDITION) assert automol.par.spin(RCLASS9) is None assert not automol.par.radrad(RCLASS9) assert automol.par.isc(RCLASS9)
31,829
def my_function_lower(sdk): """ Example of a custom function. Gets sites object jdout to str, call .lower(), and print. :param sdk: Authenticated CloudGenix SDK Constructor. :return: No Return """ print(jdout(sdk.get.sites()).lower()) return
31,830
def fetch_MNI6(): """ Expected templates: tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-01_T1w.nii.gz tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-02_T1w.nii.gz tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-01_desc-brain_mask.nii.gz tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-02_atlas-HCP_dseg.nii.gz """ template = "MNI152NLin6Asym" tf.get(template, resolution=(1, 2), desc=None, suffix="T1w") tf.get(template, resolution=(1, 2), desc="brain", suffix="mask") # CIFTI tf.get(template, resolution=2, atlas="HCP", suffix="dseg")
31,831
def test_ct_d018_ct_d018_v(mode, save_output, output_format): """ TEST :Syntax Checking for top level complexType Declaration : simpleContent, content of restriction and content of maxInclusive """ assert_bindings( schema="msData/complexType/ctD018.xsd", instance="msData/complexType/ctD018.xml", class_name="Root", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
31,832
def scale_value_dict(dct: Dict[str, float], problem: InnerProblem): """Scale a value dictionary.""" scaled_dct = {} for key, val in dct.items(): x = problem.get_for_id(key) scaled_dct[key] = scale_value(val, x.scale) return scaled_dct
31,833
def get_and_log_environment(): """Grab and log environment to use when executing command lines. The shell environment is saved into a file at an appropriate place in the Dockerfile. Returns: environ (dict) the shell environment variables """ environment_file = FWV0 / "gear_environ.json" log.debug("Grabbing environment from %s", environment_file) with open(environment_file, "r") as f: environ = json.load(f) # Add environment to log if debugging kv = "" for k, v in environ.items(): kv += k + "=" + v + " " log.debug("Environment: " + kv) return environ
31,834
def green(string: str) -> str: """Add green colour codes to string Args: string (str): Input string Returns: str: Green string """ return "\033[92m" + string + "\033[0m"
31,835
def test_cuboid_object_vs_lib(): """ includes a test of the input copy problem """ a = 1 mag = np.array([(10, 20, 30)]) dim = np.array([(a, a, a)]) pos = np.array([(2 * a, 2 * a, 2 * a)]) B0 = magpy.core.magnet_cuboid_field("B", pos, mag, dim) H0 = magpy.core.magnet_cuboid_field("H", pos, mag, dim) src = magpy.magnet.Cuboid(mag[0], dim[0]) B1 = src.getB(pos) H1 = src.getH(pos) np.testing.assert_allclose(B0[0], B1) np.testing.assert_allclose(H0[0], H1)
31,836
def from_6x6_to_21x1(T): """Convert symmetric second order tensor to first order tensor.""" C2 = np.sqrt(2) V = np.array([[T[0, 0], T[1, 1], T[2, 2], C2 * T[1, 2], C2 * T[0, 2], C2 * T[0, 1], C2 * T[0, 3], C2 * T[0, 4], C2 * T[0, 5], C2 * T[1, 3], C2 * T[1, 4], C2 * T[1, 5], C2 * T[2, 3], C2 * T[2, 4], C2 * T[2, 5], T[3, 3], T[4, 4], T[5, 5], C2 * T[3, 4], C2 * T[4, 5], C2 * T[5, 3]]]).T return V
31,837
def test_missing_manifest_package_fields(demo_manifest, key): """Test that exceptions are raised when a manifest is missing required fields in the 'packages.packages.afw' object. """ yaml = ruamel.yaml.YAML() manifest = yaml.load(demo_manifest) del manifest['packages']['afw'][key] with pytest.raises(ValidationError): Manifest.validate(manifest)
31,838
def test_observations_query_raw(patch_get): """ test querying raw """ result = gemini.Observations.query_raw('GMOS-N', 'BIAS', progid='GN-CAL20191122') assert isinstance(result, Table) assert len(result) > 0
31,839
def main(): """ main function """ global START_TIME, MINER_URL, MINER_TOKEN, DAEMON_URL, DAEMON_TOKEN # Start execution time mesurement START_TIME = time.time() # SET API IP PORT AND AUTH if MINER_URL == '': miner_config = toml.load(str(Path.home()) + "/.lotusminer/config.toml") miner_api_ip = "127.0.0.1" miner_api_port = "2345" # try to read configuration file to identify miner url if "API" in miner_config.keys(): if "ListenAddress" in miner_config["API"].keys(): miner_api = miner_config["API"]["ListenAddress"].split("/") miner_api_ip = miner_api[2].replace("0.0.0.0", "127.0.0.1") miner_api_port = miner_api[4] MINER_URL = "http://" + miner_api_ip + ":" + miner_api_port + "/rpc/v0" if DAEMON_URL == '': daemon_config = toml.load(str(Path.home()) + "/.lotus/config.toml") daemon_api_ip = "127.0.0.1" daemon_api_port = "1234" # try to read configuration file to identify daemon url if "API" in daemon_config.keys(): if "ListenAddress" in daemon_config["API"].keys(): daemon_api = daemon_config["API"]["ListenAddress"].split("/") daemon_api_ip = daemon_api[2].replace("0.0.0.0", "127.0.0.1") daemon_api_port = daemon_api[4] DAEMON_URL = "http://" + daemon_api_ip + ":" + daemon_api_port + "/rpc/v0" if MINER_TOKEN == '': with open(str(Path.home()) + "/.lotusminer/token", "r") as text_file: MINER_TOKEN = text_file.read() if DAEMON_TOKEN == '': with open(str(Path.home()) + "/.lotus/token", "r") as text_file: DAEMON_TOKEN = text_file.read() ################################################################################# # MAIN ################################################################################# # SCRAPE METRIC DEFINITION print("# HELP lotus_scrape_execution_succeed return 1 if lotus-farcaster execution was successfully") print("# TYPE lotus_scrape_execution_succeed gauge") # LOCAL TIME METRIC print("# HELP lotus_local_time time on the node machine when last execution start in epoch") print("# TYPE lotus_local_time gauge") print(f'lotus_local_time {{ }} { int(time.time()) }') # RETRIEVE MINER ID actoraddress = miner_get_json("ActorAddress", []) miner_id = actoraddress['result'] # RETRIEVE TIPSET + CHAINHEAD chainhead = daemon_get_json("ChainHead", []) tipsetkey = chainhead["result"]["Cids"] # XXX small hack trying to speedup the script empty_tipsetkey = [] print("# HELP lotus_chain_height return current height") print("# TYPE lotus_chain_height counter") print(f'lotus_chain_height {{ miner_id="{miner_id}" }} {chainhead["result"]["Height"]}') checkpoint("ChainHead") # GENERATE CHAIN SYNC STATUS print("# HELP lotus_chain_sync_diff return daemon sync height diff with chainhead for each daemon worker") print("# TYPE lotus_chain_sync_diff gauge") print("# HELP lotus_chain_sync_status return daemon sync status with chainhead for each daemon worker") print("# TYPE lotus_chain_sync_status gauge") sync_status = daemon_get_json("SyncState", []) for worker in sync_status["result"]["ActiveSyncs"]: try: diff_height = worker["Target"]["Height"] - worker["Base"]["Height"] except Exception: diff_height = -1 print(f'lotus_chain_sync_diff {{ miner_id="{ miner_id }", worker_id="{ sync_status["result"]["ActiveSyncs"].index(worker) }" }} { diff_height }') print(f'lotus_chain_sync_status {{ miner_id="{ miner_id }", worker_id="{ sync_status["result"]["ActiveSyncs"].index(worker) }" }} { worker["Stage"] }') checkpoint("ChainSync") # GENERATE MINER INFO miner_version = miner_get_json("Version", []) checkpoint("Miner") # RETRIEVE MAIN ADDRESSES daemon_stats = daemon_get_json("StateMinerInfo", [miner_id, empty_tipsetkey]) miner_owner = daemon_stats["result"]["Owner"] miner_owner_addr = daemon_get_json("StateAccountKey", [miner_owner, empty_tipsetkey])["result"] miner_worker = daemon_stats["result"]["Worker"] miner_worker_addr = daemon_get_json("StateAccountKey", [miner_worker, empty_tipsetkey])["result"] try: miner_control0 = daemon_stats["result"]["ControlAddresses"][0] except: miner_control0 = miner_worker miner_control0_addr = daemon_get_json("StateAccountKey", [miner_control0, empty_tipsetkey])["result"] print("# HELP lotus_miner_info lotus miner information like adress version etc") print("# TYPE lotus_miner_info gauge") print("# HELP lotus_miner_info_sector_size lotus miner sector size") print("# TYPE lotus_miner_info_sector_size gauge") print(f'lotus_miner_info {{ miner_id = "{miner_id}", version="{ miner_version["result"]["Version"] }", owner="{ miner_owner }", owner_addr="{ miner_owner_addr }", worker="{ miner_worker }", worker_addr="{ miner_worker_addr }", control0="{ miner_control0 }", control0_addr="{ miner_control0_addr }" }} 1') print(f'lotus_miner_info_sector_size {{ miner_id = "{miner_id}" }} { daemon_stats["result"]["SectorSize"] }') checkpoint("StateMinerInfo") # GENERATE DAEMON INFO daemon_network = daemon_get_json("StateNetworkName", []) daemon_network_version = daemon_get_json("StateNetworkVersion", [empty_tipsetkey]) daemon_version = daemon_get_json("Version", []) print("# HELP lotus_info lotus daemon information like adress version, value is set to network version number") print("# TYPE lotus_info gauge") print(f'lotus_info {{ miner_id="{miner_id}", version="{ daemon_version["result"]["Version"] }", network="{ daemon_network["result"] }"}} { daemon_network_version["result"]}') checkpoint("Daemon") # GENERATE WALLET + LOCKED FUNDS BALANCES walletlist = daemon_get_json("WalletList", []) print("# HELP lotus_wallet_balance return wallet balance") print("# TYPE lotus_wallet_balance gauge") for addr in walletlist["result"]: balance = daemon_get_json("WalletBalance", [addr]) short = addr[0:5] + "..." + addr[-5:] print(f'lotus_wallet_balance {{ miner_id="{miner_id}", address="{ addr }", short="{ short }" }} { int(balance["result"])/1000000000000000000 }') # Add miner balance : miner_balance_available = daemon_get_json("StateMinerAvailableBalance", [miner_id, empty_tipsetkey]) print(f'lotus_wallet_balance {{ miner_id="{miner_id}", address="{ miner_id }", short="{ miner_id }" }} { int(miner_balance_available["result"])/1000000000000000000 }') # Retrieve locked funds balance locked_funds = daemon_get_json("StateReadState", [miner_id, empty_tipsetkey]) print("# HELP lotus_wallet_locked_balance return miner wallet locked funds") print("# TYPE lotus_wallet_locked_balance gauge") for i in ["PreCommitDeposits", "LockedFunds", "FeeDebt", "InitialPledge"]: print(f'lotus_wallet_locked_balance {{ miner_id="{miner_id}", address="{ miner_id }", locked_type ="{ i }" }} { int(locked_funds["result"]["State"][i])/1000000000000000000 }') checkpoint("Balances") # GENERATE POWER powerlist = daemon_get_json("StateMinerPower", [miner_id, empty_tipsetkey]) print("# HELP lotus_power return miner power") print("# TYPE lotus_power gauge") for minerpower in powerlist["result"]["MinerPower"]: print(f'lotus_power {{ miner_id="{miner_id}", scope="miner", power_type="{ minerpower }" }} { powerlist["result"]["MinerPower"][minerpower] }') for totalpower in powerlist["result"]["TotalPower"]: print(f'lotus_power {{ miner_id="{miner_id}", scope="network", power_type="{ totalpower }" }} { powerlist["result"]["TotalPower"][totalpower] }') # Mining eligibility print("# HELP lotus_power_mining_eligibility return miner mining eligibility") print("# TYPE lotus_power_mining_eligibility gauge") base_info = daemon_get_json("MinerGetBaseInfo", [miner_id, chainhead["result"]["Height"], tipsetkey]) if base_info["result"] is None: print(f'ERROR MinerGetBaseInfo return no result', file=sys.stderr) print(f'KNOWN_REASON your miner reports wrong info to the chain and thats pretty bad (not just for the dashboard)', file=sys.stderr) print(f'SOLUTION restart your miner and node', file=sys.stderr) print('lotus_scrape_execution_succeed { } 0') sys.exit(0) if base_info["result"]["EligibleForMining"]: eligibility = 1 else: eligibility = 0 print(f'lotus_power_mining_eligibility {{ miner_id="{miner_id}" }} { eligibility }') checkpoint("Power") # GENERATE MPOOL mpoolpending = daemon_get_json("MpoolPending", [empty_tipsetkey]) print("# HELP lotus_mpool_total return number of message pending in mpool") print("# TYPE lotus_mpool_total gauge") print("# HELP lotus_mpool_local_total return total number in mpool comming from local adresses") print("# TYPE lotus_power_local_total gauge") print("# HELP lotus_mpool_local_message local message details") print("# TYPE lotus_mpool_local_message gauge") mpool_total = 0 mpool_local_total = 0 for message in mpoolpending["result"]: mpool_total += 1 frm = message["Message"]["From"] if frm in walletlist["result"]: mpool_local_total += 1 if frm == miner_owner_addr: display_addr = "owner" elif frm == miner_worker_addr: display_addr = "worker" elif frm == miner_control0_addr: display_addr = "control0" elif frm != miner_id: display_addr = frm[0:5] + "..." + frm[-5:] print(f'lotus_mpool_local_message {{ miner_id="{miner_id}", from="{ display_addr }", to="{ message["Message"]["To"] }", nonce="{ message["Message"]["Nonce"] }", value="{ message["Message"]["Value"] }", gaslimit="{ message["Message"]["GasLimit"] }", gasfeecap="{ message["Message"]["GasFeeCap"] }", gaspremium="{ message["Message"]["GasPremium"] }", method="{ message["Message"]["Method"] }" }} 1') print(f'lotus_mpool_total {{ miner_id="{miner_id}" }} { mpool_total }') print(f'lotus_mpool_local_total {{ miner_id="{miner_id}" }} { mpool_local_total }') checkpoint("MPool") # GENERATE NET_PEERS daemon_netpeers = daemon_get_json("NetPeers", []) print("# HELP lotus_netpeers_total return number netpeers") print("# TYPE lotus_netpeers_total gauge") print(f'lotus_netpeers_total {{ miner_id="{miner_id}" }} { len(daemon_netpeers["result"]) }') miner_netpeers = miner_get_json("NetPeers", []) print("# HELP lotus_miner_netpeers_total return number netpeers") print("# TYPE lotus_miner_netpeers_total gauge") print(f'lotus_miner_netpeers_total {{ miner_id="{miner_id}" }} { len(miner_netpeers["result"]) }') checkpoint("NetPeers") # GENERATE NETSTATS XXX Verfier la qualité des stats ... lotus net, API et Grafana sont tous differents print("# HELP lotus_net_protocol_in return input net per protocol") print("# TYPE lotus_net_protocol_in counter") print("# HELP lotus_net_protocol_out return output per protocol net") print("# TYPE lotus_net_protocol_out counter") protocols_list = daemon_get_json("NetBandwidthStatsByProtocol", []) for protocol in protocols_list["result"]: print(f'lotus_net_protocol_in {{ miner_id="{miner_id}", protocol="{ protocol }" }} { protocols_list["result"][protocol]["TotalIn"] }') print(f'lotus_net_protocol_out {{ miner_id="{miner_id}", protocol="{ protocol }" }} { protocols_list["result"][protocol]["TotalOut"] }') print("# HELP lotus_miner_net_protocol_in return input net per protocol") print("# TYPE lotus_miner_net_protocol_in counter") print("# HELP lotus_miner_net_protocol_out return output per protocol net") print("# TYPE lotus_miner_net_protocol_out counter") protocols_list = miner_get_json("NetBandwidthStatsByProtocol", []) for protocol in protocols_list["result"]: print(f'lotus_miner_net_protocol_in {{ miner_id="{miner_id}", protocol="{ protocol }" }} { protocols_list["result"][protocol]["TotalIn"] }') print(f'lotus_miner_net_protocol_out {{ miner_id="{miner_id}", protocol="{ protocol }" }} { protocols_list["result"][protocol]["TotalOut"] }') print("# HELP lotus_net_total_in return input net") print("# TYPE lotus_net_total_in counter") print("# HELP lotus_net_total_out return output net") print("# TYPE lotus_net_total_out counter") net_list = daemon_get_json("NetBandwidthStats", []) print(f'lotus_net_total_in {{ miner_id="{miner_id}" }} { net_list["result"]["TotalIn"] }') print(f'lotus_net_total_out {{ miner_id="{miner_id}" }} { net_list["result"]["TotalOut"] }') print("# HELP lotus_miner_net_total_in return input net") print("# TYPE lotus_miner_net_total_in counter") print("# HELP lotus_miner_net_total_out return output net") print("# TYPE lotus_miner_net_total_out counter") net_list = miner_get_json("NetBandwidthStats", []) print(f'lotus_miner_net_total_in {{ miner_id="{miner_id}" }} { net_list["result"]["TotalIn"] }') print(f'lotus_miner_net_total_out {{ miner_id="{miner_id}" }} { net_list["result"]["TotalOut"] }') checkpoint("NetBandwidth") # GENERATE WORKER INFOS workerstats = miner_get_json("WorkerStats", []) # XXX 1.2.1 introduce a new worker_id format. Later we should delete it, its a useless info. #print("# HELP lotus_miner_worker_id All lotus worker information prfer to use workername than workerid which is changing at each restart") #print("# TYPE lotus_miner_worker_id gauge") print("# HELP lotus_miner_worker_mem_physical_used worker minimal memory used") print("# TYPE lotus_miner_worker_mem_physical_used gauge") print("# HELP lotus_miner_worker_mem_vmem_used worker maximum memory used") print("# TYPE lotus_miner_worker_mem_vmem_used gauge") print("# HELP lotus_miner_worker_mem_reserved worker memory reserved by lotus") print("# TYPE lotus_miner_worker_mem_reserved gauge") print("# HELP lotus_miner_worker_gpu_used is the GPU used by lotus") print("# TYPE lotus_miner_worker_gpu_used gauge") print("# HELP lotus_miner_worker_cpu_used number of CPU used by lotus") print("# TYPE lotus_miner_worker_cpu_used gauge") print("# HELP lotus_miner_worker_cpu number of CPU") print("# TYPE lotus_miner_worker_cpu gauge") print("# HELP lotus_miner_worker_gpu number of GPU") print("# TYPE lotus_miner_worker_gpu gauge") print("# HELP lotus_miner_worker_mem_physical server RAM") print("# TYPE lotus_miner_worker_mem_physical gauge") print("# HELP lotus_miner_worker_mem_swap server SWAP") print("# TYPE lotus_miner_worker_mem_swap gauge") for val in workerstats["result"].items(): val = val[1] info = val["Info"] worker_host = info["Hostname"] mem_physical = info["Resources"]["MemPhysical"] mem_swap = info["Resources"]["MemSwap"] mem_reserved = info["Resources"]["MemReserved"] cpus = info["Resources"]["CPUs"] gpus = len(info["Resources"]["GPUs"]) mem_used_min = val["MemUsedMin"] mem_used_max = val["MemUsedMax"] if val["GpuUsed"]: gpu_used = 1 else: gpu_used = 0 cpu_used = val["CpuUse"] print(f'lotus_miner_worker_cpu {{ miner_id="{miner_id}", worker_host="{worker_host}" }} { cpus }') print(f'lotus_miner_worker_gpu {{ miner_id="{miner_id}", worker_host="{worker_host}" }} { gpus }') print(f'lotus_miner_worker_mem_physical {{ miner_id="{miner_id}", worker_host="{worker_host}" }} { mem_physical }') print(f'lotus_miner_worker_mem_swap {{ miner_id="{miner_id}", worker_host="{worker_host}" }} { mem_swap }') print(f'lotus_miner_worker_mem_physical_used {{ miner_id="{miner_id}", worker_host="{worker_host}" }} {mem_used_min}') print(f'lotus_miner_worker_mem_vmem_used {{ miner_id="{miner_id}", worker_host="{worker_host}" }} {mem_used_max}') print(f'lotus_miner_worker_mem_reserved {{ miner_id="{miner_id}", worker_host="{worker_host}" }} {mem_reserved}') print(f'lotus_miner_worker_gpu_used {{ miner_id="{miner_id}", worker_host="{worker_host}" }} {gpu_used}') print(f'lotus_miner_worker_cpu_used {{ miner_id="{miner_id}", worker_host="{worker_host}" }} {cpu_used}') checkpoint("Workers") # GENERATE JOB INFOS workerjobs = miner_get_json("WorkerJobs", []) print("# HELP lotus_miner_worker_job status of each individual job running on the workers. Value is the duration") print("# TYPE lotus_miner_worker_job gauge") for (wrk, job_list) in workerjobs["result"].items(): for job in job_list: job_id = job['ID']['ID'] sector = str(job['Sector']['Number']) try: worker_host = workerstats["result"][wrk]["Info"]["Hostname"] except: # sometime WorkerJobs return invalid worker_id like 0000-000000-0000... in that case return unknown worker_host = "unknown" task = str(job['Task']) job_start_time = str(job['Start']) job_start_time = job_start_time[:-1] run_wait = str(job['RunWait']) job_start_epoch = time.mktime(time.strptime(job_start_time[:19], '%Y-%m-%dT%H:%M:%S')) print(f'lotus_miner_worker_job {{ miner_id="{miner_id}", job_id="{job_id}", worker_host="{ worker_host }", task="{task}", sector_id="{sector}", job_start_time="{job_start_time}", run_wait="{run_wait}" }} { START_TIME - job_start_epoch }') checkpoint("Jobs") # GENERATE JOB SCHEDDIAG scheddiag = miner_get_json("SealingSchedDiag", [True]) if scheddiag["result"]["SchedInfo"]["Requests"]: for req in scheddiag["result"]["SchedInfo"]["Requests"]: sector = req["Sector"]["Number"] task = req["TaskType"] print(f'lotus_miner_worker_job {{ miner_id="{miner_id}", job_id="", worker="", task="{task}", sector_id="{sector}", start="", run_wait="99" }} 0') checkpoint("SchedDiag") # GENERATE SECTORS print("# HELP lotus_miner_sector_state sector state") print("# TYPE lotus_miner_sector_state gauge") print("# HELP lotus_miner_sector_event contains important event of the sector life") print("# TYPE lotus_miner_sector_event gauge") print("# HELP lotus_miner_sector_sealing_deals_info contains information related to deals that are not in Proving and Removed state.") print("# TYPE lotus_miner_sector_sealing_deals_info gauge") sector_list = miner_get_json("SectorsList", []) #sectors_counters = {} # remove duplicate (bug) unique_sector_list = set(sector_list["result"]) for sector in unique_sector_list: detail = miner_get_json("SectorsStatus", [sector, False]) deals = len(detail["result"]["Deals"])-detail["result"]["Deals"].count(0) creation_date = detail["result"]["Log"][0]["Timestamp"] packed_date = "" finalized_date = "" verified_weight = detail["result"]["VerifiedDealWeight"] for log in range(len(detail["result"]["Log"])): if detail["result"]["Log"][log]["Kind"] == "event;sealing.SectorPacked": packed_date = detail["result"]["Log"][log]["Timestamp"] if detail["result"]["Log"][log]["Kind"] == "event;sealing.SectorFinalized": finalized_date = detail["result"]["Log"][log]["Timestamp"] if detail["result"]["Log"][0]["Kind"] == "event;sealing.SectorStartCC": pledged = 1 else: pledged = 0 print(f'lotus_miner_sector_state {{ miner_id="{miner_id}", sector_id="{ sector }", state="{ detail["result"]["State"] }", pledged="{ pledged }", deals="{ deals }", verified_weight="{ verified_weight }" }} 1') if packed_date != "": print(f'lotus_miner_sector_event {{ miner_id="{miner_id}", sector_id="{ sector }", event_type="packed" }} { packed_date }') if creation_date != "": print(f'lotus_miner_sector_event {{ miner_id="{miner_id}", sector_id="{ sector }", event_type="creation" }} { creation_date }') if finalized_date != "": print(f'lotus_miner_sector_event {{ miner_id="{miner_id}", sector_id="{ sector }", event_type="finalized" }} { finalized_date }') if detail["result"]["State"] not in ["Proving", "Removed"]: for deal in detail["result"]["Deals"]: if deal != 0: try: deal_info = daemon_get_json("StateMarketStorageDeal", [deal, empty_tipsetkey]) except: deal_is_verified = "unknown" deal_size = "unknown" deal_slash_epoch = "unknown" deal_price_per_epoch = "unknown" deal_provider_collateral = "unknown" deal_client_collateral = "unknown" deal_start_epoch = "unknown" deal_end_epoch = "unknown" else: deal_is_verified = deal_info["result"]["Proposal"]["VerifiedDeal"] deal_size = deal_info["result"]["Proposal"]["PieceSize"] deal_slash_epoch = deal_info["result"]["State"]["SlashEpoch"] deal_price_per_epoch = deal_info["result"]["Proposal"]["StoragePricePerEpoch"] deal_provider_collateral = deal_info["result"]["Proposal"]["ProviderCollateral"] deal_client_collateral = deal_info["result"]["Proposal"]["ClientCollateral"] deal_start_epoch = deal_info["result"]["Proposal"]["StartEpoch"] deal_end_epoch = deal_info["result"]["Proposal"]["EndEpoch"] print(f'lotus_miner_sector_sealing_deals_size {{ miner_id="{miner_id}", sector_id="{ sector }", deal_id="{ deal }", deal_is_verified="{ deal_is_verified }", deal_slash_epoch="{ deal_slash_epoch }", deal_price_per_epoch="{ deal_price_per_epoch }",deal_provider_collateral="{ deal_provider_collateral }", deal_client_collateral="{ deal_client_collateral }", deal_size="{ deal_size }", deal_start_epoch="{ deal_start_epoch }", deal_end_epoch="{ deal_end_epoch }" }} 1') # GENERATE DEADLINES proven_partitions = daemon_get_json("StateMinerDeadlines", [miner_id, empty_tipsetkey]) deadlines = daemon_get_json("StateMinerProvingDeadline", [miner_id, empty_tipsetkey]) dl_epoch = deadlines["result"]["CurrentEpoch"] dl_index = deadlines["result"]["Index"] dl_open = deadlines["result"]["Open"] dl_numbers = deadlines["result"]["WPoStPeriodDeadlines"] dl_window = deadlines["result"]["WPoStChallengeWindow"] print("# HELP lotus_miner_deadline_info deadlines and WPoSt informations") print("# TYPE lotus_miner_deadline_info gauge") print(f'lotus_miner_deadline_info {{ miner_id="{miner_id}", current_idx="{ dl_index }", current_epoch="{ dl_epoch }",current_open_epoch="{ dl_open }", wpost_period_deadlines="{ dl_numbers }", wpost_challenge_window="{ dl_window }" }} 1') print("# HELP lotus_miner_deadline_active_start remaining time before deadline start") print("# TYPE lotus_miner_deadline_active_start gauge") print("# HELP lotus_miner_deadline_active_sectors_all number of sectors in the deadline") print("# TYPE lotus_miner_deadline_active_sectors_all gauge") print("# HELP lotus_miner_deadline_active_sectors_recovering number of sectors in recovering state") print("# TYPE lotus_miner_deadline_active_sectors_recovering gauge") print("# HELP lotus_miner_deadline_active_sectors_faulty number of faulty sectors") print("# TYPE lotus_miner_deadline_active_sectors_faulty gauge") print("# HELP lotus_miner_deadline_active_sectors_live number of live sectors") print("# TYPE lotus_miner_deadline_active_sectors_live gauge") print("# HELP lotus_miner_deadline_active_sectors_active number of active sectors") print("# TYPE lotus_miner_deadline_active_sectors_active gauge") print("# HELP lotus_miner_deadline_active_partitions number of partitions in the deadline") print("# TYPE lotus_miner_deadline_active_partitions gauge") print("# HELP lotus_miner_deadline_active_partitions_proven number of partitions already proven for the deadline") print("# TYPE lotus_miner_deadline_active_partitions_proven gauge") for c_dl in range(dl_numbers): idx = (dl_index + c_dl) % dl_numbers opened = dl_open + dl_window * c_dl partitions = daemon_get_json("StateMinerPartitions", [miner_id, idx, empty_tipsetkey]) if partitions["result"]: faulty = 0 recovering = 0 alls = 0 active = 0 live = 0 count = len(partitions["result"]) proven = bitfield_count(proven_partitions["result"][idx]["PostSubmissions"]) for partition in partitions["result"]: faulty += bitfield_count(partition["FaultySectors"]) recovering += bitfield_count(partition["RecoveringSectors"]) active += bitfield_count(partition["ActiveSectors"]) live += bitfield_count(partition["LiveSectors"]) alls = bitfield_count(partition["AllSectors"]) print(f'lotus_miner_deadline_active_start {{ miner_id="{miner_id}", index="{ idx }" }} { (opened - dl_epoch) * 30 }') print(f'lotus_miner_deadline_active_partitions_proven {{ miner_id="{miner_id}", index="{ idx }" }} { proven }') print(f'lotus_miner_deadline_active_partitions {{ miner_id="{miner_id}", index="{ idx }" }} { count }') print(f'lotus_miner_deadline_active_sectors_all {{ miner_id="{miner_id}", index="{ idx }" }} { alls }') print(f'lotus_miner_deadline_active_sectors_recovering {{ miner_id="{miner_id}", index="{ idx }" }} { recovering }') print(f'lotus_miner_deadline_active_sectors_faulty {{ miner_id="{miner_id}", index="{ idx }" }} { faulty }') print(f'lotus_miner_deadline_active_sectors_active {{ miner_id="{miner_id}", index="{ idx }" }} { active }') print(f'lotus_miner_deadline_active_sectors_live {{ miner_id="{miner_id}", index="{ idx }" }} { live }') checkpoint("Deadlines") # GENERATE STORAGE INFO print("# HELP lotus_miner_storage_info get storage info state") print("# TYPE lotus_miner_storage_info gauge") print("# HELP lotus_miner_storage_capacity get storage total capacity") print("# TYPE lotus_miner_storage_capacity gauge") print("# HELP lotus_miner_storage_available get storage available capacity") print("# TYPE lotus_miner_storage_available gauge") print("# HELP lotus_miner_storage_reserved get storage reserved capacity") print("# TYPE lotus_miner_storage_reserved gauge") storage_list = miner_get_json("StorageList", []) storage_local_list = miner_get_json("StorageLocal", []) for storage in storage_list["result"].keys(): storage_info = miner_get_json("StorageInfo", [storage]) if storage in storage_local_list["result"].keys(): storage_path = storage_local_list["result"][storage] else: storage_path = '' storage_id = storage_info["result"]["ID"] storage_url = urlparse(storage_info["result"]["URLs"][0]) storage_host_ip = storage_url.hostname try: storate_host_name = socket.gethostbyaddr(storage_host_ip)[0] except Exception: storate_host_name = storage_host_ip storage_host_port = storage_url.port storage_weight = storage_info["result"]["Weight"] storage_can_seal = storage_info["result"]["CanSeal"] storage_can_store = storage_info["result"]["CanStore"] try: storage_stat = miner_get_json("StorageStat", [storage]) except: storage_capacity = 0 storage_available = 0 storage_reserved = 0 else: storage_capacity = storage_stat["result"]["Capacity"] storage_available = storage_stat["result"]["Available"] storage_reserved = storage_stat["result"]["Reserved"] print(f'lotus_miner_storage_info {{ miner_id="{miner_id}", storage_id="{ storage_id }", storage_url="{ storage_info["result"]["URLs"][0] }", storage_host_name="{ storate_host_name }", storage_host_ip="{ storage_host_ip }", storage_host_port="{ storage_host_port }", weight="{ storage_weight }", can_seal="{ storage_can_seal }", can_store="{ storage_can_store }", path="{ storage_path }" }} 1') print(f'lotus_miner_storage_capacity {{ miner_id="{miner_id}", storage_id="{ storage_id }" }} { storage_capacity }') print(f'lotus_miner_storage_available {{ miner_id="{miner_id}", storage_id="{ storage_id }" }} { storage_available }') print(f'lotus_miner_storage_reserved {{ miner_id="{miner_id}", storage_id="{ storage_id }" }} { storage_reserved }') checkpoint("Storage") # GENERATE SCRAPE TIME print(f'lotus_scrape_duration_seconds {{ collector="All" }} {time.time() - START_TIME}') print('lotus_scrape_execution_succeed { } 1') # XXX TODO # GENERATE STORAGE MARKET #print(miner_get_json("MarketGetAsk",[])) #print(miner_get_json("DealsConsiderOnlineStorageDeals",[])) #print(miner_get_json("DealsConsiderOfflineStorageDeals",[])) #print(miner_get_json("DealsConsiderOnlineRetrievalDeals",[])) #print(miner_get_json("DealsConsiderOfflineRetrievalDeals",[])) #print(miner_get_json("SectorGetSealDelay",[])) #print(miner_get_json("MarketListDeals",[])) # GENERATE RETRIEVAL MARKET #print(miner_get_json("MarketGetRetrievalAsk",[])) #print(miner_get_json("MarketListRetrievalDeals",[])) # GENERATE DATA TRANSFERS #print(miner_get_json("MarketListDataTransfers",[])) # XXX rajouter les errors de sectors #print(daemon_get_json("StateMinerFaults",[miner_id,empty_tipsetkey])) # GAs price # XXX Display SectorGetSealDelay / XXX Display sealing Sectors and details # Winning blocks # WaitDeal (list of deals/ Since / SealWaitingTime) # XXX LA PoST n'impact pas l'etat les ressources des MAchines voir pour changer ca # Gerer une metrics : service UP/DOWN # XXX TROUVER LA LISTE DES FAULTY SECTORS # XXX A quoi correcpond le champs retry dans le SectorStatus
31,840
def post_add_skit_reply(): """ removes a skit if authored by the current user """ email = is_authed(request) if email and csrf_check(request): # same as args, form data is also immutable request.form = dict(request.form) request.form['email'] = email p_resp = proxy(RUBY, request) return create_response(p_resp) return BADUSER
31,841
def register(request): """Register new account.""" token_int = int(datetime.datetime.strftime( datetime.datetime.now(), '%Y%m%d%H%M%S%f')) token = short_url.encode_url(token_int) if (not request.playstore_url and not request.appstore_url and not request.winstore_url and not request.default_url): return False, 'Insufficient information to register.', None account = models.Accounts( playstore_url=request.playstore_url, appstore_url=request.appstore_url, winstore_url=request.winstore_url, default_url=request.default_url, title=request.title, banner=request.banner, description=request.description, token=token ) account.put() return True, None, token
31,842
def predictRegions(regionName, regionType='country', target='confirmed', predictionsPercentiles=PREDICTIONS_PERCENTILES, siteData=SITE_DATA, priorLogCarryingCapacity=PRIOR_LOG_CARRYING_CAPACITY, priorMidPoint=PRIOR_MID_POINT, priorGrowthRate=PRIOR_GROWTH_RATE, priorSigma=PRIOR_SIGMA, logGrowthModel=None, databasePath=DATABASE_PATH, nLimitRegions=None, **kwargs ): """Generate forecasts for regions Parameters ---------- regionName: A country key of Cryostation, or 'all' target: 'confirmed' or 'deaths' predictionsPercentiles: The posterior percentiles to compute siteData: The directory for output data regionType: 'country' or 'stateUS' priorLogCarryingCapacity priorMidPoint priorGrowthRate priorSigma logGrowthModel: A compiled pystan model databasePath: Path to virustrack.db nLimitRegions: Maximum number of regions to train in alphabetical order kwargs: Optional named arguments for covidvu.predictLogisticGrowth Returns ------- JSON dump of mean prediction and confidence intervals """ if logGrowthModel is None: print('Building model. This may take a few moments...') logGrowthModel = buildLogisticModel(priorLogCarryingCapacity= priorLogCarryingCapacity, priorMidPoint=priorMidPoint, priorGrowthRate=priorGrowthRate, priorSigma=priorSigma, ) print('Done.') else: assert isinstance(logGrowthModel, StanModel) if regionName == 'all': if regionType == 'country': with Cryostation(databasePath) as cs: countries = cs.allCountryNames() for i, country in enumerate(countries): print(f'Training {country}') if nLimitRegions: if i > nLimitRegions-1: break prediction = predictLogisticGrowth(logGrowthModel, country, regionType=regionType, predictionsPercentiles=predictionsPercentiles, target=target, **kwargs ) _dumpRegionPrediction(prediction, siteData, predictionsPercentiles, meanFilename=PREDICTION_MEAN_JSON_FILENAME_WORLD, confIntFilename=PREDICTION_CI_JSON_FILENAME_WORLD, ) print('Done.') elif regionType == 'stateUS': with Cryostation(databasePath) as cs: statesUS = cs.allProvincesOf('US') for i, state in enumerate(statesUS): if nLimitRegions: if i > nLimitRegions: break print(f'Training {state}') prediction = predictLogisticGrowth(logGrowthModel, state, regionType=regionType, predictionsPercentiles=predictionsPercentiles, target=target, **kwargs ) _dumpRegionPrediction(prediction, siteData, predictionsPercentiles, meanFilename=PREDICTION_MEAN_JSON_FILENAME_US, confIntFilename=PREDICTION_CI_JSON_FILENAME_US, ) print('Done.') else: raise ValueError(f'regionType = {regionType} not understood') else: print(f'Training {regionName}') prediction = predictLogisticGrowth(logGrowthModel, regionName, regionType=regionType, predictionsPercentiles=predictionsPercentiles, target=target, **kwargs, ) if regionType == 'country': _dumpRegionPrediction(prediction, siteData, predictionsPercentiles, meanFilename=PREDICTION_MEAN_JSON_FILENAME_WORLD, confIntFilename=PREDICTION_CI_JSON_FILENAME_WORLD, ) elif regionType == 'stateUS': _dumpRegionPrediction(prediction, siteData, predictionsPercentiles, meanFilename=PREDICTION_MEAN_JSON_FILENAME_US, confIntFilename=PREDICTION_CI_JSON_FILENAME_US, ) print('Done.')
31,843
def stringify_parsed_email(parsed): """ Convert a parsed email tuple into a single email string """ if len(parsed) == 2: return f"{parsed[0]} <{parsed[1]}>" return parsed[0]
31,844
def standard_simplex_vol(sz: int): """Returns the volume of the sz-dimensional standard simplex""" result = cm_matrix_det_ns(np.identity(sz, dtype=DTYPE)) if result == math.inf: raise ValueError(f'Cannot compute volume of standard {sz}-simplex') return result
31,845
def all_saveable_objects(scope=None): """ Copied private function in TF source. This is what tf.train.Saver saves if var_list=None is passed. """ return (tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) + tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS, scope))
31,846
def base_to_str( base ): """Converts 0,1,2,3 to A,C,G,T""" if 0 == base: return 'A' if 1 == base: return 'C' if 2 == base: return 'G' if 3 == base: return 'T' raise RuntimeError( 'Bad base: %d' % base )
31,847
def parse_args(args): """ function parse_args takes arguments from CLI and return them parsed for later use. :param list args : pass arguments from sys cmd line or directly :return dict: parsed arguments """ parser = argparse.ArgumentParser() required = parser.add_argument_group() required.add_argument("-d", "--date", help="Date of Flight", required=True) # name origin used because of conflicting name "from" in tests etc. cannot use parser.from required.add_argument("-fr", "--from", help="IATA code of Departure", dest="origin", required=True) required.add_argument("-t", "--to", help="IATA code of Destination", required=True) days_in_destination = parser.add_mutually_exclusive_group() days_in_destination.add_argument("-o", "--one-way", action="store_const", help="Oneway ticket", dest="days_in_destination", const="oneway") days_in_destination.add_argument("-r", "--return", action="store", help="Round ticket followed by number of days in destination", dest="days_in_destination") days_in_destination.set_defaults(days_in_destination='oneway') sort = parser.add_mutually_exclusive_group() sort.add_argument("-c", "--cheapest", action="store_const", help="Book cheapest flight", dest="sort", const="price") sort.add_argument("-fa", "--fastest", action="store_const", help="Book fastest flight", dest="sort", const="duration") sort.set_defaults(sort='price') parser.add_argument("-b", "--bags", help="Number of checked-in baggages", default='0') parser.add_argument("-v", "--verbose", help="sets verbose output", action='store_true') return parser.parse_args(args)
31,848
def attr(accessing_obj, accessed_obj, *args, **kwargs): """ Usage: attr(attrname) attr(attrname, value) attr(attrname, value, compare=type) where compare's type is one of (eq,gt,lt,ge,le,ne) and signifies how the value should be compared with one on accessing_obj (so compare=gt means the accessing_obj must have a value greater than the one given). Searches attributes *and* properties stored on the checking object. The first form works like a flag - if the attribute/property exists on the object, the value is checked for True/False. The second form also requires that the value of the attribute/property matches. Note that all retrieved values will be converted to strings before doing the comparison. """ # deal with arguments if not args: return False attrname = args[0].strip() value = None if len(args) > 1: value = args[1].strip() compare = 'eq' if kwargs: compare = kwargs.get('compare', 'eq') def valcompare(val1, val2, typ='eq'): "compare based on type" try: return CF_MAPPING.get(typ, 'default')(val1, val2) except Exception: # this might happen if we try to compare two things that cannot be compared return False # first, look for normal properties on the object trying to gain access if hasattr(accessing_obj, attrname): if value: return valcompare(str(getattr(accessing_obj, attrname)), value, compare) return bool(getattr(accessing_obj, attrname)) # will return Fail on False value etc # check attributes, if they exist if (hasattr(accessing_obj, 'has_attribute') and accessing_obj.has_attribute(attrname)): if value: return (hasattr(accessing_obj, 'get_attribute') and valcompare(accessing_obj.get_attribute(attrname), value, compare)) return bool(accessing_obj.get_attribute(attrname)) # fails on False/None values return False
31,849
def group_connected(polygon_map, mask=None): """Group all connected nodes.""" # Wrap :c:`group_connected()` from ``polygon_map.c``. polygon_map = mask_polygon_map(polygon_map, mask) queue = Queue(len(polygon_map) + 1) group_ids = np.full(len(polygon_map), -1, np.intp, order="C") groups_count: int groups_count = slug.dll.group_connected(ptr(polygon_map), ptr(polygon_map.ctypes.shape), ptr(group_ids), queue._raw._ptr) return group_ids, groups_count
31,850
def pause_refreshing() -> None: """ Pauses refreshing to speed up drawing. :rtype: None """ turtle.tracer(0, 0)
31,851
def test_hash_fuction_returns_additative_value_lowercase(ht_fixture): """Test the hash function returns and additive value.""" word = 'word' word_value = 0 for letter in word: word_value += ord(letter) assert ht_fixture._hash('word') == word_value
31,852
def dictmask(data, mask, missing_keep=False): """dictmask masks dictionary data based on mask""" if not isinstance(data, dict): raise ValueError("First argument with data should be dictionary") if not isinstance(mask, dict): raise ValueError("Second argument with mask should be dictionary") if not isinstance(missing_keep, bool): raise ValueError("Argument missing_keep should be bool type") res = {} for k, v in data.items(): if k not in mask: if missing_keep is True: res[k] = v continue if mask[k] is None or mask[k] is False: continue if mask[k] is True or data[k] is None: res[k] = v continue if isinstance(data[k], dict) and isinstance(mask[k], dict): res[k] = dictmask(data[k], mask[k]) continue if isinstance(data[k], list) and isinstance(mask[k], list): if len(mask[k]) != 1: raise ValueError("Mask inside list should have only one item") res2 = [] for i in range(len(data[k])): res2.append(dictmask(data[k][i], mask[k][0], missing_keep)) res[k] = res2 else: raise ValueError( f"Cannot proceed key {k} with values of different types:" f"{type(data[k])}, {type(mask[k])}" ) return res
31,853
def connect(file=None, port=8100, counter_max=5000): """Open libreoffice and enable conection with Calc. Args: file (str or pathlib.Path, optional): Filepath. If None, a new Calc instance will be opened. port (int, optional): port for connection. counter_max (int, optional): Max number of tentatives to establish a connection. Returns: Calc object. Examples: Open new instance of Calc: >>> calcObject = calc.connect() Adds one sheet ('Sheet2') at position 1: >>> calcObject.insert_sheets_new_by_name('Sheet2', 1) Add multiple sheets ('Sheet3' and 'Sheet4) at position 2: >>> calcObject.insert_multisheets_new_by_name(['Sheet3', 'Sheet4'], 2) >>> # Get number of sheets >>> print(calcObject.get_sheets_count()) 4 >>> # Remove sheets >>> calcObject.remove_sheets_by_name('Sheet3') >>> # get sheet data >>> sheet1 = calcObject.get_sheet_by_name('Sheet1') >>> sheet2 = calcObject.get_sheet_by_index(0) """ # open libreoffice libreoffice = subprocess.Popen([f"soffice --nodefault --accept='socket,host=localhost,port={port};urp;'"], shell=True, close_fds=True) # connect to libreoffice connected = False counter = 0 while connected == False: time.sleep(0.5) try: context = connect(Socket('localhost', f'{port}')) connected = True except: counter += 1 if counter == counter_max: raise ConnectionError('Cannot establish connection, maybe try increasing counter_max value.') pass if file is None: return Calc(context) else: file = Path(file) return Calc(context, convert_path_to_url(str(file)))
31,854
def stub_config(): """Builds a standardized Configuration object and returns it, but does not load it as the active configuration returned by dallinger.config.get_config() """ defaults = { u'ad_group': u'Test ad group', u'approve_requirement': 95, u'assign_qualifications': True, u'auto_recruit': True, u'aws_access_key_id': u'fake aws key', u'aws_secret_access_key': u'fake aws secret', u'aws_region': u'us-east-1', u'base_payment': 0.01, u'base_port': 5000, u'browser_exclude_rule': u'MSIE, mobile, tablet', u'clock_on': True, u'contact_email_on_error': u'error_contact@test.com', u'dallinger_email_address': u'test@example.com', u'database_size': u'standard-0', u'redis_size': u'premium-0', u'database_url': u'postgresql://postgres@localhost/dallinger', u'description': u'fake HIT description', u'duration': 1.0, u'dyno_type': u'free', u'heroku_auth_token': u'heroku secret', u'heroku_team': u'', u'host': u'0.0.0.0', u'id': u'some experiment uid', u'keywords': u'kw1, kw2, kw3', u'lifetime': 1, u'logfile': u'-', u'loglevel': 0, u'mode': u'debug', u'notification_url': u'https://url-of-notification-route', u'num_dynos_web': 1, u'num_dynos_worker': 1, u'organization_name': u'Monsters University', u'sentry': True, u'smtp_host': u'smtp.fakehost.com:587', u'smtp_username': u'fake email username', u'smtp_password': u'fake email password', u'threads': u'1', u'title': u'fake experiment title', u'us_only': True, u'webdriver_type': u'phantomjs', u'whimsical': True } from dallinger.config import default_keys from dallinger.config import Configuration config = Configuration() for key in default_keys: config.register(*key) config.extend(defaults.copy()) config.ready = True return config
31,855
def clean_local_folder(): """Cleans the livestock/local folder on the C drive.""" if os.path.isdir(local_path): for file in os.listdir(local_path): os.remove(local_path + '/' + file) else: os.mkdir(local_path)
31,856
def example_empty_filter(): """ 演示邮箱验证 """ resp = client.get("/empty", data={ "params": "wudong@eastwu.cn", "must": "must" }) print(resp.data) # params参数不传时,会默认填充default值 resp = client.get("/empty", data={ "must": "must", }) print(resp.data) # 不允许为空的值如果不填写会报错 resp = client.get("/empty", data={ "params": "ss" }) print(resp.data)
31,857
def terminate_execution(execution: Execution, reason: Union[None, str] = None) -> None: """Terminate an execution.""" for service in execution.services: # type: Service terminate_service(service) execution.set_terminated(reason)
31,858
def _prepare_policy_input( observations, vocab_size, observation_space, action_space ): """Prepares policy input based on a sequence of observations.""" if vocab_size is not None: (batch_size, n_timesteps) = observations.shape[:2] serialization_kwargs = init_serialization( vocab_size, observation_space, action_space, n_timesteps ) actions = np.zeros( (batch_size, n_timesteps - 1) + action_space.shape, dtype=action_space.dtype, ) reward_mask = np.ones((batch_size, n_timesteps - 1), dtype=np.int32) (policy_input, _) = serialization_utils.serialize_observations_and_actions( observations=observations, actions=actions, mask=reward_mask, **serialization_kwargs ) return policy_input else: return observations
31,859
def RGB2raw(R, G, B): """Convert RGB channels to Raw image.""" h, w = R.shape raw = np.empty(shape=(2*h, 2*w), dtype=R.dtype) raw[::2, ::2] = R raw[1::2, 1::2] = B raw[1::2, 0::2] = G raw[0::2, 1::2] = G return raw
31,860
def get_algs_from_ciphersuite_name(ciphersuite_name): """ Return the 3-tuple made of the Key Exchange Algorithm class, the Cipher class and the HMAC class, through the parsing of the ciphersuite name. """ tls1_3 = False if ciphersuite_name.startswith("TLS"): s = ciphersuite_name[4:] if s.endswith("CCM") or s.endswith("CCM_8"): kx_name, s = s.split("_WITH_") kx_alg = _tls_kx_algs.get(kx_name) hash_alg = _tls_hash_algs.get("SHA256") cipher_alg = _tls_cipher_algs.get(s) hmac_alg = None else: if "WITH" in s: kx_name, s = s.split("_WITH_") kx_alg = _tls_kx_algs.get(kx_name) else: tls1_3 = True kx_alg = _tls_kx_algs.get("TLS13") hash_name = s.split('_')[-1] hash_alg = _tls_hash_algs.get(hash_name) cipher_name = s[:-(len(hash_name) + 1)] if tls1_3: cipher_name += "_TLS13" cipher_alg = _tls_cipher_algs.get(cipher_name) hmac_alg = None if cipher_alg is not None and cipher_alg.type != "aead": hmac_name = "HMAC-%s" % hash_name hmac_alg = _tls_hmac_algs.get(hmac_name) elif ciphersuite_name.startswith("SSL"): s = ciphersuite_name[7:] kx_alg = _tls_kx_algs.get("SSLv2") cipher_name, hash_name = s.split("_WITH_") cipher_alg = _tls_cipher_algs.get(cipher_name.rstrip("_EXPORT40")) kx_alg.export = cipher_name.endswith("_EXPORT40") hmac_alg = _tls_hmac_algs.get("HMAC-NULL") hash_alg = _tls_hash_algs.get(hash_name) return kx_alg, cipher_alg, hmac_alg, hash_alg, tls1_3
31,861
def remove_object_entry(vmfObject, key, value): """ Removes a particular object entry from the given VMF object. """ objectEntry = vmfObject[key] try: objectEntry.remove(value) except AttributeError: # The entry is the last entry. Remove it. assert ( isinstance(objectEntry, dict) or isinstance(objectEntry, str) ) del vmfObject[key] else: if len(objectEntry) == 1: # The entry is now a singleton list. Flatten it to simply # refer to the object itself. vmfObject[key] = objectEntry[0]
31,862
def centroid(window): """Centroid interpolation for sub pixel shift""" ip = lambda x : (x[2] - x[0])/(x[0] + x[1] + x[2]) return ip(window[:, 1]), ip(window[1])
31,863
def eval_fasterrcnn(config, dataset_path, ckpt_path, anno_path, target_device): """FasterRcnn evaluation.""" if not os.path.isfile(ckpt_path): raise RuntimeError(f"CheckPoint file {ckpt_path} is not valid.") ds = create_fasterrcnn_dataset(config, dataset_path, batch_size=config.test_batch_size, is_training=False) session, input_names = create_session(ckpt_path, target_device) eval_iter = 0 total = ds.get_dataset_size() outputs = [] if config.dataset != "coco": dataset_coco = COCO() dataset_coco.dataset, dataset_coco.anns, dataset_coco.cats, dataset_coco.imgs = {}, {}, {}, {} dataset_coco.imgToAnns, dataset_coco.catToImgs = defaultdict(list), defaultdict(list) dataset_coco.dataset = parse_json_annos_from_txt(anno_path, config) dataset_coco.createIndex() else: dataset_coco = COCO(anno_path) print("\n========================================\n") print("total images num: ", total) print("Processing, please wait a moment.") max_num = 128 for data in ds.create_dict_iterator(num_epochs=1): eval_iter = eval_iter + 1 start = time.time() input_data = [data[i].asnumpy() for i in ('image', 'image_shape', 'box', 'label', 'valid_num')] output = session.run(None, dict(zip(input_names, input_data))) end = time.time() print(f"Iter {eval_iter} cost time {end - start}") # output all_bbox, all_label, all_mask = output for j in range(config.test_batch_size): all_bbox_squee = np.squeeze(all_bbox[j, :, :]) all_label_squee = np.squeeze(all_label[j, :, :]) all_mask_squee = np.squeeze(all_mask[j, :, :]) all_bboxes_tmp_mask = all_bbox_squee[all_mask_squee, :] all_labels_tmp_mask = all_label_squee[all_mask_squee] if all_bboxes_tmp_mask.shape[0] > max_num: inds = np.argsort(-all_bboxes_tmp_mask[:, -1]) inds = inds[:max_num] all_bboxes_tmp_mask = all_bboxes_tmp_mask[inds] all_labels_tmp_mask = all_labels_tmp_mask[inds] outputs_tmp = bbox2result_1image(all_bboxes_tmp_mask, all_labels_tmp_mask, config.num_classes) outputs.append(outputs_tmp) eval_types = ["bbox"] result_files = results2json(dataset_coco, outputs, "./results.pkl") coco_eval(config, result_files, eval_types, dataset_coco, single_result=True, plot_detect_result=True)
31,864
def _format_warning(message, category, filename, lineno, line=None): """ Replacement for warnings.formatwarning that disables the echoing of the 'line' parameter. """ return "{}:{}: {}: {}\n".format(filename, lineno, category.__name__, message)
31,865
def precision_at_k(predictions: List[int], targets: List[int], k: int = 10) -> float: """Computes `Precision@k` from the given predictions and targets sets.""" predictions_set = set(predictions[:k]) targets_set = set(targets) result = len(targets_set & predictions_set) / float(len(predictions_set)) return result
31,866
def assign_metrics(nb_graph: nx.DiGraph, pipeline_metrics: dict): """Assign pipeline metrics to specific pipeline steps. This assignment follows a similar logic to the detection of `out` dependencies. Starting from a temporary step - child of all the leaf nodes, all the nodes in the pipelines are traversed in reversed topological order. When a step shows one of the metrics as part of its code, then that metric is assigned to the step. Args: nb_graph: nx DiGraph with pipeline code blocks pipeline_metrics (dict): a dict of pipeline metrics where the key is the KFP sanitized name and the value the name of the original variable. """ # create a temporary step at the end of the pipeline to simplify the # iteration from the leaf steps tmp_step = "_tmp" leaf_steps = graphutils.get_leaf_nodes(nb_graph) if not leaf_steps: return [nb_graph.add_edge(node, tmp_step) for node in leaf_steps] # pipeline_metrics is a dict having sanitized variable names as keys and # the corresponding variable names as values. Here we need to refer to # the sanitized names using the python variables. # XXX: We could change parse_metrics_print_statements() to return the # XXX: reverse dictionary, but that would require changing either # XXX: rpc.nb.get_pipeline_metrics() or change in the JupyterLab Extension # XXX: parsing of the RPC result rev_pipeline_metrics = {v: k for k, v in pipeline_metrics.items()} metrics_left = set(rev_pipeline_metrics.keys()) for anc in graphutils.get_ordered_ancestors(nb_graph, tmp_step): if not metrics_left: break anc_data = nb_graph.nodes(data=True)[anc] anc_source = '\n'.join(anc_data['source']) # get all the marshal candidates from father's source and intersect # with the metrics that have not been matched yet marshal_candidates = kale_ast.get_marshal_candidates(anc_source) assigned_metrics = metrics_left.intersection(marshal_candidates) # Remove the metrics that have already been assigned. metrics_left.difference_update(assigned_metrics) # Generate code to produce the metrics artifact in the current step if assigned_metrics: code = METRICS_TEMPLATE % (" " + ",\n ".join( ['"%s": %s' % (rev_pipeline_metrics[x], x) for x in sorted(assigned_metrics)])) anc_data['source'].append(code) # need to have a `metrics` flag set to true in order to set the # metrics output artifact in the pipeline template nx.set_node_attributes(nb_graph, {anc: {'metrics': True}}) nb_graph.remove_node(tmp_step)
31,867
def test_film_metadata_added() -> None: """Tests film metadata is is still added if one of them is an empty string""" source_path = "test_abc.tiff" item = Item(source_path) item.collection = Collection("Collection") metadata = {"film": "123", "film_sequence_no": "234", "physical_film_condition": "", "format": "23 cm x 23 cm"} metadata_loader_imagery_historic = MetadataLoaderImageryHistoric() metadata_loader_imagery_historic.add_film_metadata(item, asset_metadata=metadata) assert StacExtensions.film.value in item.stac_extensions assert item.properties["film:id"] == "123" assert item.properties["film:negative_sequence"] == 234 assert "film:physical_condition" not in item.properties.keys() assert item.properties["film:physical_size"] == "23 cm x 23 cm"
31,868
def wordify_open(p, word_chars): """Prepend the word start markers.""" return r"(?<![{0}]){1}".format(word_chars, p)
31,869
def Mt_times_M(M): """Compute M^t @ M Args: M : (batched) matrix M Returns: tf.Tensor: solution of M^t @ M """ if isinstance(M, tf.Tensor): linop = tf.linalg.LinearOperatorFullMatrix(M) return linop.matmul(M, adjoint=True) elif isinstance(M, (tf.linalg.LinearOperatorFullMatrix, tf.linalg.LinearOperatorLowerTriangular)): return M.matmul(M.to_dense(), adjoint=True) elif is_diagonal_linop(M): return diagonal_M_times_Mt(M) else: raise TypeError("cannot compute M_times_Mt, invalid type")
31,870
def update_external_wires(): """Remove the wires included into the wrapper from the external wire list"""
31,871
def make_values(ints: typing.Iterable[int]): """Make datasets. """ return [ ('int', ints), ('namedtuple', [IntNamedTuple(i) for i in ints]), ('class', [IntObject(i) for i in ints]), ]
31,872
def attach_trans_dict(model, objs): """Put all translations from all non-deferred translated fields from objs into a translations dict on each instance.""" # Get the ids of all the translations we need to fetch. try: deferred_fields = objs[0].get_deferred_fields() except IndexError: return fields = [ field for field in model._meta.translated_fields if field.attname not in deferred_fields ] ids = [ getattr(obj, field.attname) for field in fields for obj in objs if getattr(obj, field.attname, None) is not None ] if ids: # Get translations in a dict, ids will be the keys. It's important to # consume the result of sorted_groupby, which is an iterator. qs = Translation.objects.filter(id__in=ids, localized_string__isnull=False) else: qs = [] all_translations = { field_id: sorted(list(translations), key=lambda t: t.locale) for field_id, translations in sorted_groupby(qs, lambda t: t.id) } def get_locale_and_string(translation, new_class): """Convert the translation to new_class (making PurifiedTranslations and LinkifiedTranslations work) and return locale / string tuple.""" converted_translation = new_class() converted_translation.__dict__ = translation.__dict__ return (converted_translation.locale.lower(), str(converted_translation)) # Build and attach translations for each field on each object. for obj in objs: if not obj: continue obj.translations = collections.defaultdict(list) for field in fields: t_id = getattr(obj, field.attname, None) field_translations = all_translations.get(t_id, None) if not t_id or field_translations is None: continue obj.translations[t_id] = [ get_locale_and_string(t, field.remote_field.model) for t in field_translations ]
31,873
def test_base_probe_export_no_results(caplog): """Should log a warning if the probe has no results.""" probe = BaseProbe() with caplog.at_level(logging.WARNING): probe.export() assert len(caplog.records) == 1 for record in caplog.records: assert record.levelname == "WARNING" assert "no results available" in record.message
31,874
def list_known_protobufs(): """ Returns the list of known protobuf model IDs """ return [k for k in proto_data_structure]
31,875
def hash_value(*args): """ hash_value(NodeConstHandle t) -> std::size_t hash_value(BufferConstHandle t) -> std::size_t hash_value(FileConstHandle t) -> std::size_t """ return _RMF.hash_value(*args)
31,876
def test_properties(mqtt_client: MockedMQTT): """Test properties of 360 Heurist.""" device = Dyson360Heurist(SERIAL, CREDENTIAL) device.connect(HOST) assert device.current_power_mode == VacuumHeuristPowerMode.QUIET assert device.default_power_mode == VacuumHeuristPowerMode.HIGH assert device.current_cleaning_mode == CleaningMode.ZONE_CONFIGURED assert device.default_cleaning_mode == CleaningMode.GLOBAL assert device.is_bin_full is False new_status = { "currentVacuumPowerMode": "2", "defaultVacuumPowerMode": "3", "currentCleaningMode": "global", "defaultCleaningMode": "zoneConfigured", "faults": { "AIRWAYS": {"active": True, "description": "1.0.-1"}, }, } mqtt_client.state_change(new_status) assert device.current_power_mode == VacuumHeuristPowerMode.HIGH assert device.default_power_mode == VacuumHeuristPowerMode.MAX assert device.current_cleaning_mode == CleaningMode.GLOBAL assert device.default_cleaning_mode == CleaningMode.ZONE_CONFIGURED assert device.is_bin_full is True
31,877
def read_examples(input_file): """Read a list of `InputExample`s from an input file.""" examples = [] unique_id = 0 # with tf.gfile.GFile(input_file, "r") as reader: with open(input_file, "r") as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if not line: break line = line.strip() text_a = None text_b = None m = re.match(r"^(.*) \|\|\| (.*)$", line) if m is None: text_a = line else: text_a = m.group(1) text_b = m.group(2) examples.append( InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)) unique_id += 1 return examples
31,878
def get_finance_sentiment_dataset(split: str='sentences_allagree') -> list: """ Load financial dataset from HF: https://huggingface.co/datasets/financial_phrasebank Note that there's no train/validation/test split: the dataset is available in four possible configurations depending on the percentage of agreement of annotators. By default, load just sentences for which all annotators agree. """ from datasets import load_dataset dataset = load_dataset("financial_phrasebank", split) return dataset['train']
31,879
def process_guam_rasters(): """Process rasters for Guam to be uploaded to CREST. Requirements: - target data type - target nodata value - lzw compression """ input_raster_dir = "D:/NFWF_PhaseIII/Guam/FOR CREST/raw_rasters_from_mxd" target_raster_dir = "D:/NFWF_PhaseIII/Guam/FOR CREST/for_upload" if not os.path.exists(target_raster_dir): os.makedirs(target_raster_dir) input_bn_list = [ os.path.basename(f) for f in os.listdir(input_raster_dir) if f.endswith('.tif')] for bn in input_bn_list: input_path = os.path.join(input_raster_dir, bn) target_path = os.path.join(target_raster_dir, bn) print('processing raster: {}'.format(bn)) reclassify_nodata(input_path, target_path)
31,880
def list_databases(): """ List tick databases and associated aggregate databases. Returns ------- dict dict of {tick_db: [agg_dbs]} """ response = houston.get("/realtime/databases") houston.raise_for_status_with_json(response) return response.json()
31,881
def price2bean(outdir): """将价格历史输出为 beancount 格式""" if not os.path.exists(outdir): os.makedirs(outdir) for deal in Deal.select(Deal.asset).distinct(): asset = deal.asset if asset.category not in ('stock', 'fund', 'bond'): continue code, suffix = asset.zs_code.split('.') name = f'{suffix}{code}' with open(os.path.join(outdir, f'{name}.bean'), 'w') as fout: for record in asset.history.order_by(AssetMarketHistory.date): if suffix == 'OF': price = record.nav else: price = record.close_price print(f'{record.date} price {name} {price:0.4f} CNY', file=fout)
31,882
def nice_range(bounds): """ Given a range, return an enclosing range accurate to two digits. """ step = bounds[1] - bounds[0] if step > 0: d = 10 ** (floor(log10(step)) - 1) return floor(bounds[0]/d)*d, ceil(bounds[1]/d)*d else: return bounds
31,883
def connect_to_portal(config): """ The portal/metadata schema is completely optional. """ if config.portal_schema: return aws.connect_to_db(**config.rds_config, schema=config.portal_schema)
31,884
def get_words_for_source(): """ Gets JSON to populate words for source """ source_label = request.args.get("source") source = create_self_summary_words(source_label) return json.dumps(source)
31,885
def raster(event_times_list): """ Creates a raster plot Parameters ---------- event_times_list : iterable a list of event time iterables color : string color of vlines Returns ------- ax : an axis containing the raster plot """ color='k' ax = plt.gca() for ith, trial in enumerate(event_times_list): plt.vlines(trial, ith + .5, ith + 1.5, color=color) plt.ylim(.5, len(event_times_list) + .5) return ax
31,886
def for_v(): """ *'s printed in the shape of v """ for row in range(5): for col in range(9): if row==col or row +col ==8: print('*',end=' ') else: print(' ',end=' ') print()
31,887
def FindOneDocument(queryDocument, database='NLP', collection="Annotations", host='localhost', port='27017'): """ This method returns the first document in the backing store that matches the criteria specified in queryDocument. :param queryDocument: [dict] A pymongo document used to query the MongoDB instance. :param database: [string] The name of the MongoDB database instance that holds "collection". Defaults to NLP. :param collection: [string] The name of the collection that stores the document instances. Defaults to "Annotations". :param host: [string] The host IP address, defaults to localhost. :param port: [string] The port on which the MongoDB server is listening. Defaults to 27017. :return: [object | None] A single Document object if the query matches any documents, otherwise None. """ client = MongoClient('mongodb://%s:%s/' % (host, port)) collection = client[database][collection] mongoDoc = collection.find_one(queryDocument) client.close() return constructAnnotationDocument(mongoDoc)
31,888
def user_info(context, **kwargs): """ Отображает информацию о текущем авторизованом пользователе, либо ссылки на авторизацию и регистрацию Пример использования:: {% user_info %} :param context: контекст :param kwargs: html атрибуты оборачивающего тега :return: """ request = context['request'] return {'user': request.user, 'data': kwargs}
31,889
def test_agent_proxy_wait_running(nsproxy, timeout): """ Using `wait_for_running` on a proxy after initialization should block until the agent is running or time out. """ AgentProcess('agent').start() # Get "offline" proxy agent = Proxy('agent') time0 = time.time() Timer(abs(timeout) / 2, agent.run).start() proxy = Proxy('agent').wait_for_running(timeout=timeout) elapsed = time.time() - time0 assert proxy.ping() == 'pong' assert elapsed >= abs(timeout) / 2 assert elapsed <= abs(timeout)
31,890
def transactions(request): """Transaction list""" tt = TimerThing('transactions') # Get profile profile = request.user.profile characters = Character.objects.filter( apikeys__user=request.user, apikeys__valid=True, apikeys__key_type__in=[APIKey.ACCOUNT_TYPE, APIKey.CHARACTER_TYPE] ).distinct() character_ids = [c.id for c in characters] corporation_ids = Corporation.get_ids_with_access(request.user, APIKey.CORP_ASSET_LIST_MASK) corporations = Corporation.objects.filter( pk__in=corporation_ids ) tt.add_time('init') # Get a QuerySet of transactions by this user transaction_ids = Transaction.objects.filter( ( Q(character__in=character_ids) & Q(corp_wallet__isnull=True) ) | Q(corp_wallet__corporation__in=corporation_ids) ) transaction_ids = transaction_ids.order_by('-date') # Get a QuerySet of transactions IDs by this user # characters = list(Character.objects.filter(apikeys__user=request.user.id).values_list('id', flat=True)) # transaction_ids = Transaction.objects.filter(character_id__in=characters) # transaction_ids = transaction_ids.order_by('-date') # Get only the ids, at this point joining the rest is unnecessary transaction_ids = transaction_ids.values_list('pk', flat=True) tt.add_time('transaction ids') # Parse and apply filters filters = parse_filters(request, FILTER_EXPECTED) if 'char' in filters: qs = [] for fc, fv in filters['char']: if fc == 'eq': qs.append(Q(character=fv)) elif fc == 'ne': qs.append(~Q(character=fv)) transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs)) if 'corp' in filters: qs = [] for fc, fv in filters['corp']: if fc == 'eq': qs.append(Q(corp_wallet__corporation=fv)) elif fc == 'ne': qs.append(~Q(corp_wallet__corporation=fv)) transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs)) # Client is a special case that requires some extra queries if 'client' in filters: qs = [] for fc, fv in filters['client']: if fc == 'eq': qs.append(Q(name=fv)) elif fc == 'ne': qs.append(~Q(name=fv)) elif fc == 'in': qs.append(Q(name__icontains=fv)) qs_reduced = reduce(q_reduce_or, qs) char_ids = list(Character.objects.filter(qs_reduced).values_list('id', flat=True)) corp_ids = list(Corporation.objects.filter(qs_reduced).values_list('id', flat=True)) transaction_ids = transaction_ids.filter( Q(other_char_id__in=char_ids) | Q(other_corp_id__in=corp_ids) ) if 'date' in filters: qs = [] for fc, fv in filters['date']: if fc == 'eq': try: start = datetime.datetime.strptime(fv, '%Y-%m-%d') end = datetime.datetime.strptime('%s 23:59:59' % (fv), '%Y-%m-%d %H:%M:%S') qs.append(Q(date__range=(start, end))) except ValueError: pass elif fc == 'bt': parts = fv.split(',') if len(parts) == 2: try: start = datetime.datetime.strptime(parts[0], '%Y-%m-%d') end = datetime.datetime.strptime('%s 23:59:59' % (parts[1]), '%Y-%m-%d %H:%M:%S') if start < end: qs.append(Q(date__range=(start, end))) except ValueError: pass if qs: transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs)) if 'item' in filters: qs = [] for fc, fv in filters['item']: if fc == 'eq': qs.append(Q(item__name=fv)) elif fc == 'ne': qs.append(~Q(item__name=fv)) elif fc == 'in': qs.append(Q(item__name__icontains=fv)) transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs)) if 'total' in filters: qs = [] for fc, fv in filters['total']: if fc == 'eq': if fv < 0: qs.append(Q(buy_transaction=True, total_price=abs(fv))) else: qs.append(Q(buy_transaction=False, total_price=fv)) elif fc == 'ne': qs.append(~Q(total_price=fv)) elif fc == 'gt': if fv > 0: qs.append(Q(buy_transaction=False, total_price__gt=fv)) else: qs.append( Q(buy_transaction=False, total_price__gt=abs(fv)) | Q(buy_transaction=True, total_price__lt=abs(fv)) ) elif fc == 'gte': if fv >= 0: qs.append(Q(buy_transaction=False, total_price__gte=fv)) else: qs.append( Q(buy_transaction=False, total_price__gte=abs(fv)) | Q(buy_transaction=True, total_price__lte=abs(fv)) ) elif fc == 'lt': if fv > 0: qs.append( Q(buy_transaction=False, total_price__lt=fv) | Q(buy_transaction=True, total_price__gt=0) ) else: qs.append(Q(buy_transaction=True, total_price__gt=abs(fv))) elif fc == 'lte': if fv >= 0: qs.append( Q(buy_transaction=False, total_price__lte=fv) | Q(buy_transaction=True, total_price__gte=0) ) else: qs.append(Q(buy_transaction=True, total_price__gte=abs(fv))) transaction_ids = transaction_ids.filter(reduce(q_reduce_or, qs)) tt.add_time('filters') # Create a new paginator paginator = Paginator(transaction_ids, profile.entries_per_page) # If page request is out of range, deliver last page of results try: paginated = paginator.page(request.GET.get('page')) except PageNotAnInteger: # Page is not an integer, use first page paginated = paginator.page(1) except EmptyPage: # Page is out of range, deliver last page paginated = paginator.page(paginator.num_pages) tt.add_time('paginator') # Do page number things hp = paginated.has_previous() hn = paginated.has_next() prev = [] next = [] if hp: # prev and next, use 1 of each if hn: prev.append(paginated.previous_page_number()) next.append(paginated.next_page_number()) # no next, add up to 2 previous links else: for i in range(paginated.number - 1, 0, -1)[:2]: prev.insert(0, i) else: # no prev, add up to 2 next links for i in range(paginated.number + 1, paginator.num_pages)[:2]: next.append(i) # Build the transaction queryset now to avoid nasty subqueries transactions = Transaction.objects.filter(pk__in=paginated) transactions = transactions.select_related('corp_wallet__corporation', 'item', 'station', 'character', 'other_char', 'other_corp') transactions = transactions.order_by('-date') transactions = list(transactions) tt.add_time('transactions') # Build filter links, urgh for transaction in transactions: transaction.z_client_filter = build_filter(filters, 'client', 'eq', transaction.other_char or transaction.other_corp) transaction.z_item_filter = build_filter(filters, 'item', 'eq', transaction.item.name) tt.add_time('build links') # Ready template things values = { 'chars': characters, 'corps': corporations, } tt.add_time('template bits') # Render template out = render_page( 'thing/transactions.html', { 'json_data': _json_data(characters, corporations, filters), 'transactions': transactions, 'show_item_icons': request.user.profile.show_item_icons, 'paginated': paginated, 'next': next, 'prev': prev, 'values': values, }, request, character_ids, corporation_ids, ) tt.add_time('template') if settings.DEBUG: tt.finished() return out
31,891
def to_bytes(val): """Takes a text message and return a tuple """ if val is NoResponse: return val val = val.replace('\\r', '\r').replace('\\n', '\n') return val.encode()
31,892
def trainer(input_hdf5=None, input_trainset=None, output_name=None, input_dimention=(6000, 3), gmlp_blocks=5, gmlp_dim=32, seq_len = 375, activation = 'relu', drop_rate=0.1, shuffle=True, label_type='gaussian', normalization_mode='max', augmentation=True, add_event_r=0.6, shift_event_r=0.99, add_noise_r=0.3, drop_channel_r=0.5, add_gap_r=0.2, coda_ratio=1.4, scale_amplitude_r=None, pre_emphasis=False, loss_weights=[0.05, 0.40, 0.55], loss_types=F.binary_cross_entropy_with_logits, train_valid_test_split=[0.80, 0.20], mode='generator', batch_size=200, epochs=200, monitor='val_loss', patience=3): """ Generate a model and train it. Parameters ---------- input_hdf5: str, default=None Path to an hdf5 file containing only one class of data with NumPy arrays containing 3 component waveforms each 1 min long. input_csv: str, default=None Path to a CSV file with one column (trace_name) listing the name of all datasets in the hdf5 file. output_name: str, default=None Output directory. input_dimention: tuple, default=(6000, 3) OLoss types for detection, P picking, and S picking respectively. cnn_blocks: int, default=5 The number of residual blocks of convolutional layers. lstm_blocks: int, default=2 The number of residual blocks of BiLSTM layers. activation: str, default='relu' Activation function used in the hidden layers. drop_rate: float, default=0.1 Dropout value. shuffle: bool, default=True To shuffle the list prior to the training. label_type: str, default='triangle' Labeling type. 'gaussian', 'triangle', or 'box'. normalization_mode: str, default='std' Mode of normalization for data preprocessing, 'max': maximum amplitude among three components, 'std', standard deviation. augmentation: bool, default=True If True, data will be augmented simultaneously during the training. add_event_r: float, default=0.6 Rate of augmentation for adding a secondary event randomly into the empty part of a trace. shift_event_r: float, default=0.99 Rate of augmentation for randomly shifting the event within a trace. add_noise_r: float, defaults=0.3 Rate of augmentation for adding Gaussian noise with different SNR into a trace. drop_channel_r: float, defaults=0.4 Rate of augmentation for randomly dropping one of the channels. add_gap_r: float, defaults=0.2 Add an interval with zeros into the waveform representing filled gaps. coda_ratio: float, defaults=0.4 % of S-P time to extend event/coda envelope past S pick. scale_amplitude_r: float, defaults=None Rate of augmentation for randomly scaling the trace. pre_emphasis: bool, defaults=False If True, waveforms will be pre-emphasized. Defaults to False. loss_weights: list, defaults=[0.03, 0.40, 0.58] Loss weights for detection, P picking, and S picking respectively. loss_types: list, defaults=['binary_crossentropy', 'binary_crossentropy', 'binary_crossentropy'] Loss types for detection, P picking, and S picking respectively. train_valid_test_split: list, defaults=[0.85, 0.05, 0.10] Precentage of data split into the training, validation, and test sets respectively. mode: str, defaults='generator' Mode of running. 'generator', or 'preload'. batch_size: int, default=200 Batch size. epochs: int, default=200 The number of epochs. monitor: int, default='val_loss' The measure used for monitoring. patience: int, default=12 The number of epochs without any improvement in the monitoring measure to automatically stop the training. Returns -------- output_name/models/output_name_.h5: This is where all good models will be saved. output_name/final_model.h5: This is the full model for the last epoch. output_name/model_weights.h5: These are the weights for the last model. output_name/history.npy: Training history. output_name/X_report.txt: A summary of the parameters used for prediction and performance. output_name/test.npy: A number list containing the trace names for the test set. output_name/X_learning_curve_f1.png: The learning curve of Fi-scores. output_name/X_learning_curve_loss.png: The learning curve of loss. Notes -------- 'generator' mode is memory efficient and more suitable for machines with fast disks. 'pre_load' mode is faster but requires more memory and it comes with only box labeling. """ args = { "input_hdf5": input_hdf5, "input_trainset": input_trainset, "output_name": output_name, "input_dimention": input_dimention, "gmlp_blocks": gmlp_blocks, "gmlp_dim": gmlp_dim, "seq_len": seq_len, "activation": activation, "drop_rate": drop_rate, "shuffle": shuffle, "label_type": label_type, "normalization_mode": normalization_mode, "augmentation": augmentation, "add_event_r": add_event_r, "shift_event_r": shift_event_r, "add_noise_r": add_noise_r, "add_gap_r": add_gap_r, "coda_ratio": coda_ratio, "drop_channel_r": drop_channel_r, "scale_amplitude_r": scale_amplitude_r, "pre_emphasis": pre_emphasis, "loss_weights": loss_weights, "loss_types": loss_types, "train_valid_test_split": train_valid_test_split, "mode": mode, "batch_size": batch_size, "epochs": epochs, "monitor": monitor, "patience": patience } save_dir, save_models=_make_dir(args['output_name']) training, validation=_split(args,save_dir) # print(training,validation) model=_build_model(args) # model.cuda() print(model) start_training = time.time() if args['mode'] == 'generator': params_training = {'file_name': str(args['input_hdf5']), 'dim': args['input_dimention'][0], 'batch_size': args['batch_size'], 'n_channels': args['input_dimention'][-1], 'shuffle': args['shuffle'], 'norm_mode': args['normalization_mode'], 'label_type': args['label_type'], 'augmentation': args['augmentation'], 'add_event_r': args['add_event_r'], 'add_gap_r': args['add_gap_r'], 'coda_ratio': args['coda_ratio'], 'shift_event_r': args['shift_event_r'], 'add_noise_r': args['add_noise_r'], 'drop_channe_r': args['drop_channel_r'], 'scale_amplitude_r': args['scale_amplitude_r'], 'pre_emphasis': args['pre_emphasis']} params_validation = {'file_name': str(args['input_hdf5']), 'dim': args['input_dimention'][0], 'batch_size': args['batch_size'], 'n_channels': args['input_dimention'][-1], 'shuffle': False, 'norm_mode': args['normalization_mode'], 'label_type': args['label_type'], 'augmentation': False, 'coda_ratio': args['coda_ratio']} training_generator = DataGenerator(training, **params_training) validation_generator = DataGenerator(validation, **params_validation) checkpoint_callback = ModelCheckpoint(monitor=monitor,dirpath=save_models,save_top_k=3,verbose=True,save_last=True) early_stopping = EarlyStopping(monitor=monitor,patience=args['patience']) # patience=3 tb_logger = pl_loggers.TensorBoardLogger(save_dir) trainer = pl.Trainer(precision=16, gpus=1,callbacks=[early_stopping, checkpoint_callback],check_val_every_n_epoch=1,profiler="simple",num_sanity_val_steps=0, logger =tb_logger) train_loader = DataLoader(training_generator, batch_size = 1, num_workers=8, pin_memory=True, prefetch_factor=5) # print(next(train_loader)) val_loader = DataLoader(validation_generator, batch_size =1 , num_workers=8, pin_memory=True, prefetch_factor=5) # print(next(train_loader)) print('Started training in generator mode ...') trainer.fit(model, train_dataloaders = train_loader, val_dataloaders = val_loader) end_training = time.time() print('Finished Training')
31,893
def get_standard_container_list() -> List[str]: """get list of standard container names Returns: List[str]: names """
31,894
def unread_ratio_reload(self): """ Updates the available/showable ration. Rationale: As this purely relies on the backend data, Metis reload and Secretary reload must occur before calling this method. """ self.unread = len(set(filter(self.Metis.is_available, self.Metis.collection.values()))) self.population = len(set(filter(self.Metis.is_showable, self.Metis.collection.values()))) self.lbl_unread.config(text=f'Books Left: {self.unread} / {self.population}')
31,895
def compare_sfs(id,era,xvar='pt',dms=None,gms=None,wps=None,tag="",verb=0): """Compare old (ROOT) vs. new (JSON) SFs.""" header(f"Compare {id} SF tools") if xvar=='eta': jname = f"data/tau/new/tau_sf_eta_{id}_{era}{tag}.json" else: jname = f"data/tau/new/tau_sf_pt-dm_{id}_{era}{tag}.json" cset = loadeval(jname,rename=id,verb=verb) # wrap to create C++ object that can be evaluated assert id in cset, "Did not find ID {id} in cset!" newtool = cset[id] dms = dms or [-1,0,1,2,5,10,11] gms = gms or [0,1,2,3,4,5,6,7] wps = wps or ['Medium','Tight'] pts = [10.,21.,26.,31.,36.,41.,501.,750.,999.,2000.] etas = [-2.0,-1.0,0.0,1.1,2.0,2.5,3.0] #print(f">>>\n>>> {id}: {newtool.description}") #print(f">>> inputs: {newtool.input}") if 'dm' in xvar: xbins = dms head = "%3s"%("gm")+" ".join(" %-18d"%(d) for d in dms)+" " elif 'eta' in xvar: xbins = etas head = "%3s"%("gm")+" ".join(" %-18.1f"%(e) for e in etas)+" " else: xbins = pts head = "%3s"%("gm")+" ".join(" %-18.1f"%(p) for p in pts)+" " for wp in wps: print(f">>>\n>>> WP={wp}") oldtool = TauIDSFTool(era,id,wp=wp,dm=(xvar=='dm'),path=oldata,verbose=True) if 'dm' in xvar: oldmeth = lambda a: oldtool.getSFvsDM(*a) elif 'eta' in xvar: oldmeth = lambda a: oldtool.getSFvsEta(*a) else: oldmeth = lambda a: oldtool.getSFvsPT(*a) print(f">>> \033[4m{head}\033[0m") for gm in gms: row1 = ">>> %3d"%(gm) row2 = ">>> "+3*" " for x in xbins: if 'pt' in xvar: args1 = (x,gm,'All') args2 = (x,-1,gm,wp) # (pt,dm,gm,wp,syst,flag) largs2 = (xvar,) # last arguments (after syst) elif 'dm' in xvar: args1 = (45.,x,gm,'All') args2 = (45.,x,gm,wp) # (pt,dm,gm,wp,syst,flag) largs2 = (xvar,) # last arguments (after syst) else: # eta ? args1 = (x,gm,'All') args2 = (x,gm,wp) # (eta,gm,wp,syst) largs2 = tuple() # last arguments (after syst) str1, str2 = eval2str(oldmeth,newtool,args1,args2,largs2) row1 += str1 row2 += str2 print(row1) print(row2) print(">>>")
31,896
def get_vrfs(table) -> None: """Gets VRFs from the device. Databse default is "global""" vrf = {} vrfs = cursor.execute(f'SELECT vrf FROM {table}') for i in vrfs: vrf[i] = None print("\nVRFs ---------\n") if len(vrf) == 0: print("\nPress enter to use global") else: for k in vrf.keys(): print(f"+ {k[0]}") print("\n")
31,897
def test_atomic_integer_min_exclusive_3_nistxml_sv_iv_atomic_integer_min_exclusive_4_2(mode, save_output, output_format): """ Type atomic/integer is restricted by facet minExclusive with value 470740450062970382. """ assert_bindings( schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-minExclusive-4.xsd", instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-minExclusive-4-2.xml", class_name="NistschemaSvIvAtomicIntegerMinExclusive4", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
31,898
def _create_fake_data_fn(train_length=_DATA_LENGTH, valid_length=50000, num_batches=40): """ Creates fake dataset Data is returned in NCHW since this tends to be faster on GPUs """ logger = _get_logger() logger.info("Creating fake data") data_array = _create_data(_BATCHSIZE, num_batches, (_HEIGHT, _WIDTH), _CHANNELS) labels_array = _create_labels(_BATCHSIZE, num_batches, 1000) def fake_data_generator(): for i in range(num_batches): yield data_array[i * _BATCHSIZE : (i + 1) * _BATCHSIZE], labels_array[ i * _BATCHSIZE : (i + 1) * _BATCHSIZE ] train_data = tf.data.Dataset().from_generator( fake_data_generator, output_types=(tf.float32, tf.int32), output_shapes=( tf.TensorShape([None, _CHANNELS, _HEIGHT, _WIDTH]), tf.TensorShape([None]), ), ) train_data = train_data.shuffle(40 * _BATCHSIZE).repeat().prefetch(_BUFFER) validation_data = tf.data.Dataset().from_generator( fake_data_generator, output_types=(tf.float32, tf.int32), output_shapes=( tf.TensorShape([None, _CHANNELS, _HEIGHT, _WIDTH]), tf.TensorShape([None]), ), ) validation_data = validation_data.prefetch(_BUFFER) def _train_input_fn(): return train_data.make_one_shot_iterator().get_next() def _validation_input_fn(): return validation_data.make_one_shot_iterator().get_next() _train_input_fn.length = train_length _validation_input_fn.length = valid_length _train_input_fn.classes = 1000 _validation_input_fn.classes = 1000 return _train_input_fn, _validation_input_fn
31,899