content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def reorder_by_driver(driver, driven): """ Reorders timeseries of driver and driven variable by driver quicksort. """ idx_sort = np.argsort(driver, kind='quicksort') driver = driver[idx_sort] driven = driven[idx_sort] return driver, driven
772737c8918363dd6ce59eeb3c79474292d9c9a2
3,634,500
import os def linux_sys_new(): """ """ path = None # Some systems seem to have BAT1 but not BAT0, so use the first one we # encounter. for i in range(0, 4): p = '/sys/class/power_supply/BAT{}'.format(i) if os.path.exists(p): path = p break if path is None: return False if not os.path.exists('{}/energy_now'.format(path)): return False r = lambda f: open('{}/{}'.format(path, f), 'r').read().strip() ri = lambda f: int(r(f)) status = r('status') if status == 'Charging': ac = True charging = True elif status == 'Discharging': ac = False charging = False elif status == 'Full': ac = True charging = False else: ac = False charging = False percent = ri('capacity') drain_rate = ri('power_now') full_capacity = ri('energy_full') remaining = ri('energy_now') if charging: lifetime = (full_capacity - remaining) / drain_rate * 60 elif drain_rate > 0: lifetime = remaining / drain_rate * 60 else: lifetime = -1 return (1, ac, charging, percent, lifetime)
fabc9d22b0415dc84057e04da3223ac44e747118
3,634,501
import uuid def get_bucket_policy(s3bucket): """ Gets S3 Bucket policy :param s3bucket: S3 bucket to get the policy :return: Bucket Policy Object """ s3_client = boto3.client('s3') try: bucket_policy = s3_client.get_bucket_policy(Bucket=s3bucket) except: bucket_policy = {u'Policy': u'{"Version":"2012-10-17","Id":"Default-Policy",' u'"Statement":[{"Sid":"' + str(uuid.uuid4()) + u'","Effect":"Allow",' u'"Principal":{"AWS":"arn:aws:iam::' + accountid + u':root"},"Action":"s3:GetObject",' u'"Resource":"arn:aws:s3:::' + s3bucket + u'/*"}]}' } return bucket_policy
33a4f16df595b040b255ea73354d18719ccba8b5
3,634,502
def is_gregorian( y, m, d ): """ The `is_gregorian` function enables array input. Documentation see the `_is_julian` function. """ years = np.array(y,ndmin=1) months = np.array(m,ndmin=1) days = np.array(d,ndmin=1) years_count = np.size(years) dim_check = ((years_count == np.size(months)) and (years_count == np.size(days))) if not dim_check: raise ValueError('dimension mismatch') ret = np.zeros(years_count,dtype=np.bool_) for ix in range(years_count): try: ret[ix] = _is_gregorian( years[ix], months[ix], days[ix] ) except: ret[ix] = np.nan return ret
2e91330242cde9ef4a3483700d0a029a488e2bc5
3,634,503
import json def parse(filename): """ Decode filename into an object """ template = None template_lines = None try: (template, template_lines) = tf_plan_json.load(filename) except tf_plan_json.JSONDecodeError: pass except json.decoder.JSONDecodeError: # Most parsing errors will get caught by the exception above. But, if the file is totally empty, and perhaps in # other specific cases, the json library will not even begin parsing with our custom logic that throws the # exception above, and will fail with this exception instead. pass if template is not None and isinstance(template, dict_node) and 'terraform_version' in template and 'planned_values' in template: return template, template_lines return None, None
d0dec4d63da1d8a92f6e025148ac69c6efde9234
3,634,504
def get_feature_dropping_corrections(bird_id='z007', session='day-2016-09-09', feat_type: str = 'pow', verbose=True): """ Import the results of make_parameter_sweep :param bird_id: :param session: :param verbose: :return: accuracy : ndarray, (bin_widths, offsets, num_folds, frequencies) ndarray of the k-fold accuracies confusions : ndarray, (bin_widths, offsets, num_folds, frequencies) ndarray of the k-fold confusion matrices """ assert feat_type in ['pow', 'phase', 'both'], "invalid feat_type" mean_correction_name = "accuracy_" + feat_type mean_correction = _load_numpy_data(data_name=mean_correction_name, bird_id=bird_id, session=session, source=channel_drop_path, verbose=verbose) std_correction_name = "std_all_" + feat_type std_correction = _load_numpy_data(data_name=std_correction_name, bird_id=bird_id, session=session, source=channel_drop_path, verbose=verbose) return mean_correction, std_correction
1e7ee58c892673475369d0f6649cc42b96606f3a
3,634,505
def build_pretraining_pipeline( input_file, output_dir, output_suffix, config, dupe_factor, min_num_rows, min_num_columns, num_random_table_bins = 1_000, add_random_table = False, num_corpus_bins = 1_000, add_numeric_values = True, ): """Pipeline that maps interactions to TF examples. Args: input_file: Input in Interaction proto format. output_dir: Output directory. output_suffix: Extension to use to save the train/test files. config: Pretrain config. dupe_factor: Number of duplicates per input. min_num_rows: Min num of rows required to create example. min_num_columns: Min num of cols required to create example. num_random_table_bins: This is for sampling negative examples smaller number will result in a higher number of negatives but might be slow. add_random_table: If true, pair 50% of examples with random tables. num_corpus_bins: Number of bins for random sharding. Determines test size. add_numeric_values: If true, parse numeric values (needed for ranks). Returns: The pipeline function. """ def _pipeline(root): """Pipeline.""" interactions = read_interactions(root, input_file, name='input') interactions = ( interactions | 'CheckTableId' >> beam.FlatMap(pretrain_utils.check_table_id_fn) | 'CheckTableSize' >> beam.FlatMap(pretrain_utils.check_tale_size_fn, min_num_rows, min_num_columns)) if add_numeric_values: interactions = ( interactions | 'AddNumericValues' >> beam.Map(pretrain_utils.add_numeric_values_fn)) else: interactions = (interactions | 'AddText' >> beam.Map(_add_text_fn)) interactions = ( interactions | 'Duplicate' >> beam.FlatMap(pretrain_utils.duplicate_fn, dupe_factor)) if add_random_table: interactions = ( interactions | 'Key With Random Key' >> beam.Map( _key_with_random_key, config.random_seed, num_random_table_bins, num_corpus_bins) | 'GroupByKey' >> beam.GroupByKey() | 'MergeRandomInteractions' >> beam.FlatMap(_merge_random_interactions)) else: interactions = ( interactions | 'PairWithNone' >> beam.FlatMap(pretrain_utils.pair_with_none_fn)) examples = ( interactions | f'ToTensorflowExample_{config.max_seq_length}' >> beam.ParDo( pretrain_utils.ToTensorflowExample(config))) pretrain_utils.split_by_table_id_and_write( examples, output_dir=output_dir, train_suffix=output_suffix, test_suffix=output_suffix, proto_message=tf.train.Example, num_splits=num_corpus_bins, ) return _pipeline
f8401afb557a7fdcfa9f69aa86e0c60f1807c44f
3,634,506
import logging def parse_arg_params(parser, upper_dirs=None): """ parse all params :param parser: object of parser :param list(str) upper_dirs: list of keys in parameters with item for which only the parent folder must exist :return dict: parameters """ # SEE: https://docs.python.org/3/library/argparse.html args = vars(parser.parse_args()) logging.info('ARGUMENTS: \n %r', args) # remove all None parameters args = {k: args[k] for k in args if args[k] is not None} # extend and test all paths in params args, missing = update_paths(args, upper_dirs=upper_dirs) assert not missing, 'missing paths: %r' % {k: args[k] for k in missing} return args
b770224cf54b7dc23896db88eb63d258c96b9da6
3,634,507
def rgb_to_hex_string(value): """Convert from an (R, G, B) tuple to a hex color. :param value: The RGB value to convert :type value: tuple R, G and B should be in the range 0.0 - 1.0 """ color = ''.join(['%02x' % x1 for x1 in [int(x * 255) for x in value]]) return '#%s' % color
6449d5ecf8f3134ca320c784293d8ece44a84148
3,634,508
def view(): """ This intercepts the /view URL get request. Displays the Page details for a application in a specific category. It reads the query parameters given when passing in the URL :return: """ application_category = request.args.get("show").upper() result_data = application.get(session["email"], application_category) print(result_data) return render_template( "view_list.html", data=result_data, upcoming_events=upcoming_events )
d2e62b12f2cc6e351d5b264a09c1c95a29f9baba
3,634,509
def ancestors(G, x, G_reversed=None): """ Set of all ancestors of node in a graph, not including itself. :param G: target graph :param x: target node :param G_reversed: you can supply graph with reversed edges for speedup :return: set of ancestors """ if G_reversed is None: G_reversed = G.reverse() return descendants(G_reversed, x)
11a6930038807a677c3645d908a45b068c0f013b
3,634,510
def range_to_list(): """ This function is used to create an array of values from a dataset that's limits are given by a list lower and upper limits. THIS IS CONFIGURED FOR MY COMPUTER, CHANGE THE DIRECTORY TO USE. """ dat1, filename1 = pick_dat(['t', 'm'], "RDAT_Test", "Select dataset to draw from") dat2 = read_csv("C:\\Users\\Josh\\IdeaProjects\\PulsedNMR\\Ranges\\{}".format(filename1), names=['Lower Bound', 'LowerIndex', 'Upper Bound', 'UpperIndex']) xrange = [] yrange = [] xranges = {} yranges = {} x_append = xrange.append y_append = yrange.append for o in range(0, len(dat2)): x_append((dat1['t'][dat2['LowerIndex'][o]:dat2['UpperIndex'][o] + 1]).values) y_append((dat1['m'][dat2['LowerIndex'][o]:dat2['UpperIndex'][o] + 1]).values) for o in range(0, len(xrange)): xranges[o] = xrange[o] yranges[o] = yrange[o] return xranges, yranges, xrange, yrange, filename1, dat1
f4cd269f13a4580d43a4279a539d8b6a87a06683
3,634,511
def compute_a2b2(Q): """ Given the second moment matrix, compute a^2 and b^2 """ Q11 = Q[0, 0] Q22 = Q[1, 1] Q12 = Q[0, 1] a2t = 0.5 * (Q11 + Q22 + np.sqrt((Q11 - Q22) ** 2 + 4 * Q12 ** 2)) b2t = 0.5 * (Q11 + Q22 - np.sqrt((Q11 - Q22) ** 2 + 4 * Q12 ** 2)) a2, b2 = max(a2t, b2t), min(a2t, b2t) assert a2 >= b2 return a2, b2
60d363066389d17a2d2bafa9b1c3733d884a7bcb
3,634,512
import sys import yaml def configure_app(args): """ configure the app, import swagger """ global application # global beacon_api global g2p_api def function_resolver(operation_id): """Map the operation_id to the function in this class.""" if '.' in operation_id: _, function_name = operation_id.rsplit('.', 1) else: function_name = operation_id function = getattr(sys.modules[__name__], function_name) return function app = connexion.App( __name__, swagger_ui=True, swagger_json=True) CORS(app.app) swagger_host = None if args.swagger_host: swagger_host = args.swagger_host else: host = 'localhost' # socket.gethostname() if args.port != 80: host += ':{}'.format(args.port) swagger_host = '{}'.format(host) # with open('swagger-beacon.yaml', 'r') as stream: # swagger_beacon = yaml.load(stream) # # with open('swagger-g2p.yaml', 'r') as stream: # swagger_combined = yaml.load(stream) # # swagger_beacon['host'] = swagger_host # swagger_combined['host'] = swagger_host with open('swagger-combined.yaml', 'r') as stream: swagger_combined = yaml.load(stream) swagger_combined['host'] = swagger_host log.info('advertise swagger host as {}'.format(swagger_host)) # # remove schemes that do not apply # if args.key_file: # # swagger_beacon['schemes'].remove('http') # swagger_combined['schemes'].remove('http') # else: # # swagger_beacon['schemes'].remove('https') # swagger_combined['schemes'].remove('https') # beacon_api = app.add_api(swagger_beacon, base_path='/v1/beacon', # resolver=function_resolver) g2p_api = app.add_api(swagger_combined, base_path='/api/v1', resolver=function_resolver) log.info('g2p_api.version {}'.format( g2p_api.specification['info']['version'] )) # set global application = app.app return (app, g2p_api)
7894b86cfeff66b1fd3a48fd6468b593151c6a22
3,634,513
def GausCV(traj,sample): """ returns matrix of gaussian CV's """ #m=7 - good m=10 pen=0. x = np.linspace(-5,5,m) y = np.linspace(-5,5,m) sigma_squared = 3.0 xx, yy = np.meshgrid(x,y) d = m**2 #print(xx) mu = np.concatenate((xx.reshape((-1,1)),yy.reshape((-1,1))),axis=1) #traj = np.repeat(traj[:,np.newaxis,:], 25, axis=1) #print(traj.shape) traj_adj = (np.repeat(traj[:,np.newaxis,:], d, axis=1)-mu[np.newaxis,:])/sigma_squared #print(traj_adj.shape) psi_matr = np.zeros((traj.shape[0],d)) for i in range(d): psi_matr[:,i] = np.exp(-np.sum((traj-mu[i].reshape((1,2)))**2, axis=1)/(2*sigma_squared)) #print(psi_matr.shape) cov = np.dot(sample.T - sample.mean(), psi_matr - psi_matr.mean(axis=0)) / traj.shape[0] print(cov.shape) jac_matr = -traj_adj*(psi_matr.reshape((psi_matr.shape[0],psi_matr.shape[1],1))) H = np.mean(np.matmul(jac_matr,jac_matr.transpose(0,2,1)),axis=0) param_CV = np.linalg.inv(H + pen*np.eye(H.shape[0])).dot(cov.T) print(np.sqrt(np.sum(param_CV**2))) jac_star = np.sum(jac_matr*param_CV[np.newaxis,:],axis=1) delta_star = (psi_matr*(np.sum(traj_adj**2, axis=2)-traj.shape[1]/sigma_squared)).dot(param_CV) return jac_star,delta_star
d366b5bc94fb1fb3b3201e0e54eaed3336067776
3,634,514
def _rgb_to_hex_string(rgb: tuple) -> str: """Convert RGB tuple to hex string.""" def clamp(x): return max(0, min(x, 255)) return "#{0:02x}{1:02x}{2:02x}".format(clamp(rgb[0]), clamp(rgb[1]), clamp(rgb[2]))
eafd166a67ac568cfad3da1fa16bdfcd054a914a
3,634,515
import math def update_one_contribute_score(user_total_click_num): """ itemcf update sim contribution score by user """ return 1/math.log10(1 + user_total_click_num)
b6dadc87150e33e1ba2d806e18856f10fd43035a
3,634,516
def NewtonRaphson(F, J, X0, eps=1e-4, mxiter=100): """ Solve nonlinear system F=0 by Newton's method. J is the Jacobian of F. Both F and J must be functions of x. At input, x holds the start value. The iteration continues until ||F|| < eps. Required Arguments: ------------------- F: The Non-Linear System; a function handle/instance. The input function must accept only one (1) argument as an array or int/float representing the variables required. J: The Jacobian of F; a function handle/instance. The input Jacobian of F must accept only one (1) argument as an array or int/float representing the variables required. X0: The Initial Value (or initial guess); a representative array. Optional Arguments: ------------------- eps: Epsilon - The error value, default=0.0001 mxiter: Maximum Iterations - The highest number of iterations allowed, default=100 Returns: -------- X0: The computed result Optional Returns: ----------------- iteration_counter: The number of iterations completed before returning either due to solution being found, or max iterations being surpassed. """ # Test for one-variable inputs ftype = str(type(F(X0))) jtype = str(type(J(X0))) if(ftype == tint or ftype == tfloat): # System is size-1 if((jtype != tint) and (jtype != tfloat)): # Jacobian isn't size-1 raise ValueError("ERROR: The Jacobian isn't size-1.") return( newton( F, X0, J ) ) # Test for valid argument sizes f0sz = len(F(X0)) j0sz = len(J(X0)) if(f0sz!=j0sz): # Size mismatch raise ValueError("ERROR: The arguments return arrays or lists"+ " of different sizes: f0="+str(f0sz)+"; j0="+str(j0sz)) F_value = F(X0) F_norm = np.linalg.norm(F_value, ord=2) # L2 norm of vector iteration_counter = 0 while abs(F_norm) > eps and iteration_counter < mxiter: delta = np.linalg.solve(J(X0), -F_value) X0 = X0 + delta F_value = F(X0) F_norm = np.linalg.norm(F_value, ord=2) iteration_counter += 1 # Here, either a solution is found, or too many iterations if abs(F_norm) > eps: iteration_counter = -1 return(X0, iteration_counter)
f93858c5e540d67a8d78315705a6b199e2cba365
3,634,517
def export2tf2onnx(model_onnx, opset=None, verbose=True, name=None, rename=False, autopep_options=None): """ Exports an ONNX model to the :epkg:`tensorflow-onnx` syntax. :param model_onnx: string or ONNX graph :param opset: opset to export to (None to select the one from the graph) :param verbose: inserts prints :param name: to overwrite onnx name :param rename: rename the names to get shorter names :param autopep_options: :epkg:`autopep8` options :return: python code .. runpython:: :showcode: import numpy from sklearn.cluster import KMeans from skl2onnx import to_onnx from mlprodict.onnx_tools.onnx_export import export2tf2onnx X = numpy.arange(20).reshape(10, 2).astype(numpy.float32) tr = KMeans(n_clusters=2) tr.fit(X) onx = to_onnx(tr, X, target_opset=14) code = export2tf2onnx(onx) print(code) """ if isinstance(model_onnx, str): model_onnx = onnx.load(model_onnx) code = export_template(model_onnx, templates=get_tf2onnx_template(), opset=opset, verbose=verbose, name=name, rename=rename, use_onnx_tensor=True, autopep_options=autopep_options) code = code.replace("], ]", "]]") return code
3ef4234a3e7e86634db3d7178a006789f0cac9d9
3,634,518
def default_interest_payment_date(): """ 利払日オブジェクトのデフォルト値 """ return { f'interestPaymentDate{index}': '' for index in range(1, 13) }
77d51cd5c7c76347a5c53e3d816985eeac1a568b
3,634,519
from datetime import datetime def log_message_prefix_generator(log_level: str) -> str: """ Parameters ---------- text: log_level log level e.g. "INFO", "WARN", ... Returns ---------- str logger prefix e.g. "[2020-06-17 20:21:12] [INFO]" """ now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') return f'[{now}] [{log_level}]'
3b573632a9fce77a555531043eb3be0c92a862d5
3,634,520
from typing import Collection def delete_beatmap(request, collection_id, beatmap_entry_id): """View for delete beatmap entry""" collection = get_object_or_404(Collection, id=collection_id) beatmap_entry = get_object_or_404(BeatmapEntry, id=beatmap_entry_id, collection=collection) if request.user != collection.author: messages.error(request, "Hey! That's nonsense") return redirect('collection', collection_id=collection_id) beatmap_entry.delete() messages.success(request, 'Delete beatmap from collection successfully!') return redirect('manage_beatmap', collection_id=collection_id)
ce894b94287efa0a4b779537bde4122b78bd3378
3,634,521
import requests def interactors_form(path, name): """ Parse file and retrieve a summary associated with a token :param path: Absolute path to file to be read with custom interactor :param name: Name which identifies the sample :return: """ headers = { 'accept': 'application/json', 'content-type': 'text/plain', } params = ( ('name', name), ) url = 'https://reactome.org/ContentService/interactors/upload/tuple/form' data = open(path, 'rb').read() try: response = requests.post(url=url, headers=headers, params=params, data=data) except ConnectionError as e: print(e) if response.status_code == 200: return response.json() else: print('Status code returned a value of %s' % response.status_code)
e193f0d8e1b9cf1fadcf4fff2668a09f755dab41
3,634,522
def _parent(child): """ Given a toast tile, return the address of the parent, as well as the corner of the parent that this tile occupies Returns ------- Pos, xcorner, ycorner """ parent = Pos(n=child.n - 1, x=child.x // 2, y=child.y // 2) left = child.x % 2 top = child.y % 2 return (parent, left, top)
918feb49611be02c3ae686cbb0f06bf089187e92
3,634,523
from typing import List from typing import Dict import json def get_all_set_list(files_to_ignore: List[str]) -> List[Dict[str, str]]: """ This will create the SetList.json file by getting the info from all the files in the set_outputs folder and combining them into the old v3 structure. :param files_to_ignore: Files to ignore in set_outputs folder :return: List of all set dicts """ all_sets_data: List[Dict[str, str]] = [] for set_file in mtgjson4.COMPILED_OUTPUT_DIR.glob("*.json"): if set_file.stem in files_to_ignore: continue with set_file.open("r", encoding="utf-8") as f: file_content = json.load(f) set_data = { "name": file_content.get("name"), "code": file_content.get("code"), "releaseDate": file_content.get("releaseDate"), "type": file_content.get("type"), "meta": file_content.get("meta"), } if "parentCode" in file_content.keys(): set_data["parentCode"] = file_content["parentCode"] all_sets_data.append(set_data) return sorted(all_sets_data, key=lambda set_info: set_info["name"])
2f13a73d9a07e9790d23f0e6d961b6f3d058949f
3,634,524
def extinction_afterglow_galactic_dust_to_gas_ratio(time, lognh, factor=2.21, **kwargs): """ Extinction with afterglow models and a dust-to-gas ratio :param time: time in observer frame in days :param lognh: log10 hydrogen column density :param factor: factor to convert nh to av i.e., av = nh/factor :param kwargs: Must be all the parameters required by the base_model specified using kwargs['base_model'] :return: flux_density or magnitude depending on kwargs['output_format'] """ factor = factor * 1e21 nh = 10 ** lognh av = nh / factor output = extinction_with_afterglow_base_model(time=time, av=av, **kwargs) return output
1d1bd28482361e5efc666f0111270c2c15500a7c
3,634,525
def get_md_module(force_field): """ Returns the specific interface module that is referenced by force_field. """ if force_field.startswith('GROMACS'): return gromacs elif force_field.startswith('AMBER'): return amber elif force_field.startswith('NAMD'): return namd else: raise ValueError("unrecognized force-field" + force_field)
c6cc1c082f98cce3150f5e22b5cf0a6c3d654dd1
3,634,526
def select_student(database): """ Query student :param database: database name :return: student """ conn = create_connection(database) with conn: cur = conn.cursor() cur.execute("SELECT * FROM student") student = cur.fetchone() conn.commit() return student
8ab7f01d769af28df95bd3de62634cb7148d9829
3,634,527
import torch import copy def clones(module, N): """Produce N identical layers. """ return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
2def7cf89def4d598253ca48cb04e670ecb54dfd
3,634,528
import json def validate_search_results(search_results): """ Expects a list of mongo objects """ if not search_results: return json.dumps({"Result Count": 0, "Results": []}) final_objs = format_mongo_objs(search_results) response = {"Result Count": len(final_objs), "Results": final_objs} return json.dumps(response)
9209cf534a6553b0f1a3354a68e833dc832dc62b
3,634,529
def format_command_args(args): """Format a command by removing unwanted values Restrict what we keep from the values sent (with a SET, HGET, LPUSH, ...): - Skip binary content - Truncate """ length = 0 out = [] for arg in args: try: if isinstance(arg, (binary_type, text_type)): cmd = ensure_text(arg, errors="backslashreplace") else: cmd = stringify(arg) if len(cmd) > VALUE_MAX_LEN: cmd = cmd[:VALUE_MAX_LEN] + VALUE_TOO_LONG_MARK if length + len(cmd) > CMD_MAX_LEN: prefix = cmd[: CMD_MAX_LEN - length] out.append("%s%s" % (prefix, VALUE_TOO_LONG_MARK)) break out.append(cmd) length += len(cmd) except Exception: out.append(VALUE_PLACEHOLDER) break return " ".join(out)
2d79adce1f4ec466f2ffc56f93a8fade8421dca5
3,634,530
import unittest def suite() -> TestSuite: """You need to change the name of the test class here also.""" testSuite: TestSuite = TestSuite() # noinspection PyUnresolvedReferences testSuite.addTest(unittest.makeSuite(TestCoordinates)) return testSuite
ba8c21072dd6ee178ca4070b21607e95bcef3d93
3,634,531
def flow_diffusion_ode(C, X, pars): """ Scott's master, p. 60. X is the new Y and Z is the new X. """ C_N = C[-1] C_ = C[0] - pars["alpha"] * (C[0] - pars["Cg"]) * pars["dZ"] C_up = np.append(C[1:], C_N) C_down = np.append(C_, C[:-1]) d2CdZ2 = (C_up - 2 * C + C_down) * pars["1/dZ**2"] # I assume multiplication to be faster than division dCdX = d2CdZ2 * pars["1/beta"] return dCdX
8c0af7a42c3821cc6735a0971555e624c30b693f
3,634,532
import math def bl2xy(lon: float, lat: float): """ 大地2000,经纬度转平面坐标,3度带 Param: lon (float): 经度 lat (float): 纬度 Returns: (x , y) : x坐标对应经度,y坐标对应纬度 """ # 3.1415926535898/180.0 iPI = 0.0174532925199433 # 3度带 zoneWide = 3 # 长半轴 a = 6378137 # 扁率 f = 1/298.257222101 projNo = int(lon/zoneWide) longitude0 = projNo*3 longitude0 = longitude0 * iPI longitude1 = lon * iPI latitude1 = lat * iPI e2 = 2 * f - f * f ee = e2 * (1.0 - e2) NN = a / math.sqrt(1.0 - e2 * math.sin(latitude1) * math.sin(latitude1)) T = math.tan(latitude1) * math.tan(latitude1) C = ee * math.cos(latitude1) * math.cos(latitude1) A = (longitude1 - longitude0) * math.cos(latitude1) M = a * ((1 - e2 / 4 - 3 * e2 * e2 / 64 - 5 * e2 * e2 * e2 / 256) * latitude1 - (3 * e2 / 8 + 3 * e2 * e2 / 32 + 45 * e2 * e2 * e2 / 1024) * math.sin(2 * latitude1) + (15 * e2 * e2 / 256 + 45 * e2 * e2 * e2 / 1024) * math.sin(4 * latitude1) - (35 * e2 * e2 * e2 / 3072) * math.sin(6 * latitude1)) xval = NN * (A + (1 - T + C) * A * A * A / 6 + (5 - 18 * T + T * T + 72 * C - 58 * ee) * A * A * A * A * A / 120) yval = M + NN * math.tan(latitude1) * (A * A / 2 + (5 - T + 9 * C + 4 * C * C) * A * A * A * A / 24 + (61 - 58 * T + T * T + 600 * C - 330 * ee) * A * A * A * A * A * A / 720) X0 = 1000000 * projNo + 500000 Y0 = 0 xval = xval + X0 yval = yval + Y0 return (xval, yval)
4f2166d7878998da5373a4fa6aff5fcee6f32c61
3,634,533
import numpy def process_image(obj, img, config, each_blob=None, care_about_ar=True): """ :param obj: Object we're tracking :param img: Input image :param config: Controls :param each_blob: function, taking a SimpleCV.Blob as an argument, that is called for every candidate blob :return: Mask with candidates """ hsv_image = img.toHSV() segmented = Image(cv2.inRange(hsv_image.flipHorizontal().rotate90().getNumpyCv2(), numpy.array([config.min_hue, config.min_sat, config.min_val]), numpy.array([config.max_hue, config.max_sat, config.max_val]))) segmented = segmented.dilate(2) blobs = segmented.findBlobs() if blobs: for b in blobs: if b.radius() > 30: rect_width = b.minRectWidth() rect_height = b.minRectHeight() max_side = max(rect_width, rect_height) min_side = min(rect_width, rect_height) rect_width = max_side rect_height = min_side aspect_ratio = rect_width / rect_height square_error = abs(obj.aspect_ratio - aspect_ratio) / abs(aspect_ratio) if square_error < 0.1 or not care_about_ar: if not each_blob: # default to just outlining # minRectX and minRectY actually give the center point, not the minX and minY, so we shift by 1/2 rect_ctr_x = b.minRectX() mrX = rect_ctr_x-rect_width/2 mrY = b.minRectY()-rect_height/2 segmented.drawRectangle(mrX, mrY, rect_width, rect_height, color=Color.GREEN, width=6) # px * (px/cm) = cm offset = int(round((rect_ctr_x - segmented.width/2) * (obj.width / rect_width))) segmented.drawText('Offset %s cm' % offset, mrX, mrY, Color.RED, 64) segmented.drawText('Width,Height (px): %s, %s' % (int(rect_width), int(rect_height)), 0, mrY+30, fontsize=64) segmented.drawText('Distance (cm): %s' % (int(obj.width*FOCAL_LENGTH/rect_width)), 0, mrY+60, fontsize=64) else: each_blob(b) # Give the result mask return segmented.applyLayers()
e0de830cb843d6644634b08e80ace4ec911d16c5
3,634,534
def szepes_ml(local_d): """maximum likelihood estimator from local FSA estimates (for k=1) :param numpy.ndarray of float local_d: local FSA estimates :return: global ML-FSA estimate """ return hmean(local_d) / np.log(2)
00dd82e634f8606c7bbde24daf2fc1c64ac8492a
3,634,535
import os import distutils def find_aapt(): """Find the aapt (Android Asset Packaging Tool). Returns: Path to aapt if successful, empty string otherwise. """ # NOTE: This is far from perfect since this will pick up the first instance # of aapt installed and not necessarily the newest version. # Use the path to the "android" SDK tool to determine the SDK path. build_tools_dir = os.path.realpath( os.path.join(distutils.spawn.find_executable('android'), os.path.pardir, os.path.pardir, 'build-tools')) for dirpath, unused_dirnames, filenames in os.walk(build_tools_dir): for filename in filenames: if os.path.splitext(filename)[0] == 'aapt': return os.path.join(dirpath, filename) return ''
a2640fdc5f4e64207cb0f8450c583d4130e077c0
3,634,536
import math def create_low_latency_conv_model(fingerprint_input, model_settings, is_training): """Builds a convolutional model with low compute requirements. This is roughly the network labeled as 'cnn-one-fstride4' in the 'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper: http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf Here's the layout of the graph: (fingerprint_input) v [Conv2D]<-(weights) v [BiasAdd]<-(bias) v [Relu] v [MatMul]<-(weights) v [BiasAdd]<-(bias) v [MatMul]<-(weights) v [BiasAdd]<-(bias) v [MatMul]<-(weights) v [BiasAdd]<-(bias) v This produces slightly lower quality results than the 'conv' model, but needs fewer weight parameters and computations. During training, dropout nodes are introduced after the relu, controlled by a placeholder. Args: fingerprint_input: TensorFlow node that will output audio feature vectors. model_settings: Dictionary of information about the model. is_training: Whether the model is going to be used for training. Returns: TensorFlow node outputting logits results, and optionally a dropout placeholder. """ if is_training: dropout_prob = tf.placeholder(tf.float32, name='dropout_prob') input_frequency_size = model_settings['dct_coefficient_count'] input_time_size = model_settings['spectrogram_length'] fingerprint_4d = tf.reshape(fingerprint_input, [-1, input_time_size, input_frequency_size, 1]) first_filter_width = 8 first_filter_height = input_time_size first_filter_count = 186 first_filter_stride_x = 1 first_filter_stride_y = 1 first_weights = tf.Variable( tf.truncated_normal( [first_filter_height, first_filter_width, 1, first_filter_count], stddev=0.01)) first_bias = tf.Variable(tf.zeros([first_filter_count])) first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [ 1, first_filter_stride_y, first_filter_stride_x, 1 ], 'VALID') + first_bias first_relu = tf.nn.relu(first_conv) if is_training: first_dropout = tf.nn.dropout(first_relu, dropout_prob) else: first_dropout = first_relu first_conv_output_width = math.floor( (input_frequency_size - first_filter_width + first_filter_stride_x) / first_filter_stride_x) first_conv_output_height = math.floor( (input_time_size - first_filter_height + first_filter_stride_y) / first_filter_stride_y) first_conv_element_count = int( first_conv_output_width * first_conv_output_height * first_filter_count) flattened_first_conv = tf.reshape(first_dropout, [-1, first_conv_element_count]) first_fc_output_channels = 128 first_fc_weights = tf.Variable( tf.truncated_normal( [first_conv_element_count, first_fc_output_channels], stddev=0.01)) first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels])) first_fc = tf.matmul(flattened_first_conv, first_fc_weights) + first_fc_bias if is_training: second_fc_input = tf.nn.dropout(first_fc, dropout_prob) else: second_fc_input = first_fc second_fc_output_channels = 128 second_fc_weights = tf.Variable( tf.truncated_normal( [first_fc_output_channels, second_fc_output_channels], stddev=0.01)) second_fc_bias = tf.Variable(tf.zeros([second_fc_output_channels])) second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias if is_training: final_fc_input = tf.nn.dropout(second_fc, dropout_prob) else: final_fc_input = second_fc label_count = model_settings['label_count'] final_fc_weights = tf.Variable( tf.truncated_normal( [second_fc_output_channels, label_count], stddev=0.01)) final_fc_bias = tf.Variable(tf.zeros([label_count])) final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias if is_training: return final_fc, dropout_prob else: return final_fc
964be361d32e3b79e8909be4628ef6897f8d16e6
3,634,537
def sampling_from_enum_with_dirichlet_lm(enum_pool_dict_all, df_log2prob_by_syl, orig_seg_syl_df, lm_orig_dict, scale_num = 1000): """ get a sample lexicon with a orig language model params: @enum_pool_dict_all: enumerated words of all syllable lengths (filtered to make sure that all words are shorter than max_seglen, enumerated words for each syllable length is of that syllable length) @orig_seg_syl_df: original lexicon with words shorter than max_seglen, df with counts of seg-syl combination @lm_orig: original language model -> new_lexicon """ # 1. get a new randomized language model for every syllable length -> lm_dict_update: sy_len: {context: (seg, prob)} lm_dict_update = {} for syl_len in enum_pool_dict_all: lm_dict_update[syl_len] = randomize_language_model_dirichlet(lm_orig_dict[syl_len], scale_num) # 2. assign new probs to enum_pool + pr(syl) for each syllable length for syl_len in lm_orig_dict: enum_pool_dict_all[syl_len]['logprob'] = enum_pool_dict_all[syl_len]['ipa'].apply( assign_prob_to_word_syl_known, args = (syl_len, df_log2prob_by_syl, lm_dict_update)) # 3. get one sample for each syllen+seglen combination new_lexicon = pd.DataFrame() for i in np.arange(len(orig_seg_syl_df)): # for one row in seg-syl df # sampling one seg-syl combination syl_len = orig_seg_syl_df.at[i, 'syllen'] seg_len = orig_seg_syl_df.at[i, 'seglen'] count = orig_seg_syl_df.at[i, 'count'] wordlist_syl = enum_pool_dict_all[syl_len] # subset enum words to be sampled from source = wordlist_syl[wordlist_syl.seglen == seg_len][['ipa', 'logprob']] # normalize probabilities for words to be sampled based on p_source = source.logprob new_p = np.power(2, p_source) new_p /= new_p.sum() sampled_indices = np.random.choice(source.index, count, replace = False, p = new_p) words_output = source.loc[sampled_indices, ] words_output['seglen'] = seg_len words_output['syllen'] = syl_len new_lexicon = pd.DataFrame.append(new_lexicon, words_output) return new_lexicon
94016dad1dc453ee217e57ade39f6478b670a9b5
3,634,538
def action_store(raw_val): """Auto type convert the value, if possible.""" if raw_val not in EMPTY_VALUES: return auto_type_convert(raw_val) else: return raw_val
3bed313c12f2a348cafd73111d3704c8c09198d9
3,634,539
def haversine_distance(coordinate: Point) -> float: """ Obtain the haversine distance between two cordinates points in the map :param coordinate: shapely.geometry.point :return: haversine distance in km: """ # MKAD coordinate lat1, lon1 = 55.755826, 37.6173 # address coordinate lat2, lon2 = coordinate.y, coordinate.x # distance between latitudes # and longitudes distance_lat = np.radians(lat2 - lat1) distance_lon = np.radians(lon2 - lon1) # convert to radians lat1 = np.radians(lat1) lat2 = np.radians(lat2) # apply the haversine formula a = (pow(np.sin(distance_lat / 2), 2) + pow(np.sin(distance_lon / 2), 2) * np.cos(lat1) * np.cos(lat2)) rad = 6371 c = 2 * np.arcsin(np.sqrt(a)) # Return the result multiplied by world radius return rad * c
6a730ec1afb9e5e131fd05be11176155c14a19b9
3,634,540
def route_home(): """ Renders the default page of the webserver, the leaderboard display""" return render_template("home.html", data=leaderboard_manager.get_sorted_data())
94c44fb65615e40b67d1729d706e3e968434be71
3,634,541
def get_org_details(organization_id): """ Return the details for an organization CLI Example: .. code-block:: bash salt-run digicert.get_org_details 34 Returns a dictionary with the org details, or with 'error' and 'status' keys. """ qdata = salt.utils.http.query( "{}/organization/{}".format(_base_url(), organization_id), method="GET", decode=True, decode_type="json", header_dict={"X-DC-DEVKEY": _api_key(), "Content-Type": "application/json"}, ) return qdata
4c19248b4ce0f6984e667924f481f914d8ba4fa8
3,634,542
def updatelimit(): """Update sensorlimits.""" script_root() print(request.form['id']) print(request.form['value']) #limit = SensorLimit.query.filter_by(id=request.form['id']).first() #print(request.form['id']) #print(limit) #limit.value = request.form['value'] #db.session.commit() return "Hej"
46c67e995e642d84816e484017b22104b1867289
3,634,543
def trip_from_staging(conn, service, id_type = 'NUMERIC'): """Calculates the voronoi polygons for every active station in CitiBike and BayWheels Parameters ---------- conn: psycopg2.extensions.connection The connection to the database service: str The bike station service whose trip table will be created id_type: str The column type of the startID/endID column Returns ------- None: If executed properly a trip table will be created and filled for the service in the Trips schema """ trip_from_staging_query = f""" CREATE TABLE trips.{service}_trip as ( SELECT *, CASE WHEN duration > 0 THEN ROUND(distance/(duration / 60), 2) END AS speed FROM ( SELECT starttime, endtime, ROUND((EXTRACT(epoch FROM (endtime - starttime))/60)::NUMERIC, 2) AS duration, startid, startname, endid, endname, CASE WHEN s1.latitude > 0 AND s2.latitude > 0 THEN ROUND(CAST(ST_Distance(s1.geometry, s2.geometry)*0.000621371 AS NUMERIC),2) END AS distance FROM staging.{service}_trip AS {service} LEFT JOIN stations.{service}_station AS s1 ON {service}.startid = s1.stationid::NUMERIC LEFT JOIN stations.{service}_station AS s2 ON {service}.endid = s2.stationid::NUMERIC ) AS {service}_table ); """ if id_type == 'VARCHAR': trip_from_staging_query = f""" CREATE TABLE trips.{service}_trip AS ( SELECT *, CASE WHEN duration > 0 THEN ROUND(distance/(duration / 60), 2) END AS speed FROM ( SELECT {service}.starttime, {service}.endtime, ROUND((EXTRACT(epoch FROM (endtime - starttime))/60)::NUMERIC, 2) AS duration, replace({service}.startid, '.0','') as startid, startname, replace({service}.endid, '.0','') as endid, endname, CASE WHEN s1.latitude > 0 AND s2.latitude > 0 THEN ROUND(CAST(ST_Distance(s1.geometry, s2.geometry)*0.000621371 AS NUMERIC),2) END AS distance FROM staging.{service}_trip AS {service} LEFT JOIN stations.{service}_station AS s1 ON replace({service}.startid,'.0','') = s1.stationid LEFT JOIN stations.{service}_station AS s2 ON replace({service}.endid, '.0','') = s2.stationid ) AS {service}_table ); """ execute_query(conn, trip_from_staging_query) return None
cec99e09848f6fe828758960c5e53401a3f6116f
3,634,544
def six_plot(ts, *plotargs, **plotkwds): """ Output a matplotlib figure with full spectra, absorbance, area and stripchart. Figure should be plotly convertable through py.iplot_mpl(fig) assuming one is signed in to plotly through py.sign_in(user, apikey). Parameters ----------- title : str Title of the overall figure striplegend : bool (False) Add a legend to the strip chart colormap : string ('jet') Colormap applied to full and absorbance spectra. 'Jet' is applid to strip chart regardless. tight_layout: bool (False) Calls mpl.fig.tight_layout() """ title = plotkwds.pop('title', '') tight_layout = plotkwds.pop('tight_layout', False) figsize = plotkwds.pop('figsize', (10,8)) f, axes = put.splot(3,2, fig=True, figsize=figsize) f.suptitle(title, fontsize=20) if tight_layout: f.tight_layout() cmap = plotkwds.pop('colormap', 'jet') strip_cmap = 'spectral' striplegend = plotkwds.pop('striplegend', False) ts.plot(*plotargs, ax=axes[0], title='Spectra', colormap = cmap, fig=f, #for colorbar **plotkwds) range_timeplot(ts.wavelength_slices(8), ax=axes[1], legend=False, colormap = strip_cmap, title='Slices (Full)', **plotkwds) ts.plot(*plotargs, ax=axes[2], colormap=cmap, norm = 'r', title='Normalized (r)', **plotkwds) range_timeplot(ts.as_norm('r').wavelength_slices(8), *plotargs, ax=axes[3], legend=False, title='Slices (r)', fig=f, **plotkwds) ts.plot(*plotargs, ax=axes[4], norm='a', colormap=cmap, title='Normalized (a)', **plotkwds) areaplot(ts, *plotargs, ax=axes[5], title='Area', fig=f, **plotkwds) # Custom legend to strip chars (http://matplotlib.org/users/legend_guide.html#multicolumn-legend) if striplegend: for ax in [axes[1]]: #axes[5] ax.legend(loc='lower center', ncol=4, fontsize=5, # mode='expand', bbox_to_anchor=(0.5,-0.1)) # Right axes to y-axis for a in (axes[1], axes[3], axes[5]): a.yaxis.tick_right() a.yaxis.set_label_position("right") # Remove x-axis of area/stripchart for ax in (axes[0:4]): put.hide_axis(ax, axis='x') return f
4977eb62f7cc72c50599f5c5e64383b79881ef3f
3,634,545
def woodbury_solve(vector, low_rank_mat, woodbury_factor, shift): """ Solves the system of equations: :math:`(sigma*I + VV')x = b` Using the Woodbury formula. Input: - vector (size n) - right hand side vector b to solve with. - woodbury_factor (k x n) - The result of calling woodbury_factor on V and the shift, \sigma - shift (vector) - shift value sigma """ if vector.ndimension() > 1: shift = shift.unsqueeze(-1) right = low_rank_mat.transpose(-1, -2).matmul(woodbury_factor.matmul(vector / shift)) return (vector - right) / shift
92b25fe675671408c560008e4093c1e4b35d3c42
3,634,546
import os def create_callbacks(model, data, ARGS): """At the end of each epoch, determine various callback statistics (e.g. ROC-AUC) :param model: Keras model :type model: :class:`tensorflow.keras.Model` :param data: Validation data - data sequences (codes, visits, numeric values) and classifier. :type data: tuple( list( :class:`ndarray`), :class:`ndarray`) :param ARGS: Arguments object containing user-specified parameters :type ARGS: :class:`argparse.Namespace` :return: various callback objects - naming convention for saved HDF5 files, custom logging class, \ reduced learning rate :rtype: tuple(:class:`tensorflow.keras.callbacks.ModelCheckpoint`, :class:`LogEval`, \ :class:`tensorflow.keras.callbacks.ReduceLROnPlateau`) """ class LogEval(Callback): """Logging Callback""" def __init__(self, filepath, model, data, ARGS, interval=1): """Constructor for logging class :param str filepath: path for log file & Keras HDF5 files :param model: model from training used for end-of-epoch analytics :type model: :class:`keras.engine.training.Model` :param data: Validation data used for end-of-epoch analytics \ (e.g. data sequences (codes, visits, numerics) and classifier) :type data: tuple(list[:class:`ndarray`],:class:`ndarray`) :param ARGS: Arguments object containing user-specified parameters :type ARGS: :class:`argparse.Namespace` :param interval: Interval for logging (e.g. every epoch), defaults to 1 :type interval: int, optional """ super(Callback, self).__init__() self.filepath = filepath self.interval = interval self.data_test, self.y_test = data self.generator = SequenceBuilder(data=self.data_test, target=self.y_test, batch_size=ARGS.batch_size, ARGS=ARGS, target_out=False) self.model = model def on_epoch_end(self, epoch, logs={}): #Compute ROC-AUC and average precision the validation data every interval epochs if epoch % self.interval == 0: #Compute predictions of the model y_pred = [x[-1] for x in self.model.predict_generator(self.generator, verbose=0, use_multiprocessing=True, workers=5, max_queue_size=5)] score_roc = roc_auc_score(self.y_test, y_pred) score_pr = average_precision_score(self.y_test, y_pred) #Create log files if it doesn't exist, otherwise write to it if os.path.exists(self.filepath): append_write = 'a' else: append_write = 'w' with open(self.filepath, append_write) as file_output: file_output.write("\nEpoch: {:d}- ROC-AUC: {:.6f} ; PR-AUC: {:.6f}"\ .format(epoch, score_roc, score_pr)) print("\nEpoch: {:d} - ROC-AUC: {:.6f} PR-AUC: {:.6f}"\ .format(epoch, score_roc, score_pr)) #Create callbacks if not os.path.exists(ARGS.directory): os.makedirs(ARGS.directory) checkpoint = ModelCheckpoint(filepath=ARGS.directory+'/weights.{epoch:02d}.hdf5') log = LogEval(ARGS.directory+'/log.txt', model, data, ARGS) return(checkpoint, log)
9a6075958dd2cb668de8228e139b42e3f0beaa16
3,634,547
from typing import Dict from datetime import datetime import decimal import socket def nxlog_callback(ch, method, properties, body): """ Callback on consumed message :param ch: consuming channel :param method: :param properties: :param body: message from queue :return: """ def nx_formatter(event: dict) -> Dict: """ Форматирование nx'овой обвязки connector_id и dt - обязательные поля в событии :param event: :return: """ def cast(message: dict): """приведение типов""" for k, v in message.items(): if isinstance(v, datetime): message[k] = message[k].isoformat() if isinstance(v, decimal.Decimal): message[k] = int(message[k]) try: message[k] = int(message[k]) except (ValueError, TypeError): pass if k in ['username']: message[k] = str(message[k]) return message nx_attributes = get_nx_attributes(event['connector_id']) f_message = NxlogMessage(**nx_attributes) f_message.hostname = socket.gethostname() event_time = parse(event['dt']) f_message.event_time = event_time f_message.detection_time = event_time f_message.raw = event f_message.md5 = md5_from_raw(event) return cast(f_message.to_dict()) rmq_message = orjson.loads(body) logger.debug("Received message from queue: %s", rmq_message) metric_notify_counter(app_module=rmq_message['connector_id'], metric_name="stream-of-events") # if event is already exists in redis, there's no need in sending to nxlog rmq_message_id = f"{rmq_message['connector_id']}_{rmq_message['id']}_{md5_from_raw(rmq_message)}" if env.redis.exists(rmq_message_id): ch.basic_ack(delivery_tag=method.delivery_tag) logger.debug(f"{rmq_message['id']} already exist") return nx_message = nx_formatter(rmq_message) logger.debug("Try to send event to NXLog [%s] %s", nx_message['raw']['connector_id'], nx_message['raw']) if not env.nxlog_client: env.nxlog_client = NXLogClient(**env.nxlog_config['nx_collector']) if env.nxlog_client.send_event(nx_message): ch.basic_ack(delivery_tag=method.delivery_tag) metric_notify_counter(app_module=rmq_message['connector_id'], metric_name=f"sent_messages_{nx_message['DevType']}") # put into redis after successful sending env.redis.set(rmq_message_id, body, ex=1209600) # срок хранения данных в базе 14 дней metric_notify_counter(app_module=rmq_message['connector_id'], metric_name="received-events") return
f0199302b3e8f63ec3e6f6be0a9dc680954cfcc9
3,634,548
def get_user_by_email(email, create_pending=False): """finds a user based on his email address. :param email: The email address of the user. :param create_pending: If True, this function searches for external users and creates a new pending User in case no existing user was found. :return: A :class:`.User` instance or ``None`` if not exactly one user was found. """ email = email.lower().strip() if not email: return None if not create_pending: res = User.query.filter(~User.is_deleted, User.all_emails == email).all() else: res = search_users(exact=True, include_pending=True, include_blocked=True, external=True, email=email) if len(res) != 1: return None user_or_identity = next(iter(res)) if isinstance(user_or_identity, User): return user_or_identity elif not create_pending: return None # Create a new pending user data = user_or_identity.data user = User(first_name=data.get('first_name') or '', last_name=data.get('last_name') or '', email=email, address=data.get('address', ''), phone=data.get('phone', ''), affiliation=data.get('affiliation', ''), is_pending=True) db.session.add(user) db.session.flush() return user
668230ec815c42ac48dfaffd8c79d9d8d9032fca
3,634,549
def get_offset_from_var(var): """ Helper for get_variable_sizes)_ Use this to calculate var offset. e.g. var_90, __saved_edi --> 144, -1 """ instance = False i=0 # Parse string i = var.rfind(' ')+1 tmp = var[i:-1] # Parse var if tmp[0] == 'v': tmp = tmp[4:] j = tmp.find('_') # Handles SSA var instances (var_14_1) and converts c, 58, 88 --> 12, 88, 136 if (j != -1): tmp = tmp[:j] instance = True else: instance = False try: tmp = int(tmp, 16) except: tmp = -1 # -1 for non vars else: tmp = -1 return tmp, instance
6cf58d6dc2ffcb7a78d98ed83c2dbcf05933af76
3,634,550
def build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score): """ Takes as input a set of characters alphabet and three scores diag_score, off_diag_score, and dash_score. The function returns a dictionary of dictionaries whose entries are indexed by pairs of characters in alphabet plus '-'. The score for any entry indexed by one or more dashes is dash_score. The score for the remaining diagonal entries is diag_score. Finally, the score for the remaining off-diagonal entries is off_diag_score """ alphabet.add('-') scoring_matri = {} for first_ltr in alphabet: temp = {} for sec_ltr in alphabet: if first_ltr == sec_ltr and first_ltr != '-': temp[sec_ltr] = diag_score elif first_ltr == '-' or sec_ltr == '-': temp[sec_ltr] = dash_score else: temp[sec_ltr] = off_diag_score scoring_matri[first_ltr] = temp return scoring_matri
703c3ef7fb6899a46a26d55dae740705b6953adb
3,634,551
def _foldl_jax(fn, elems, initializer=None, parallel_iterations=10, # pylint: disable=unused-argument back_prop=True, swap_memory=False, name=None): # pylint: disable=unused-argument """tf.foldl, in JAX.""" if initializer is None: initializer = nest.map_structure(lambda el: el[0], elems) elems = nest.map_structure(lambda el: el[1:], elems) if len(set(nest.flatten(nest.map_structure(len, elems)))) != 1: raise ValueError( 'Mismatched element sizes: {}'.format(nest.map_structure(len, elems))) from jax import lax # pylint: disable=g-import-not-at-top return lax.scan( lambda carry, el: (fn(carry, el), None), initializer, elems)[0]
1f876a90c25d52f52b9d0315670d68f1d58e9791
3,634,552
def MAD(a, c=0.6745, axis=None): """ Median Absolute Deviation along given axis of an array: median(abs(a - median(a))) / c c = 0.6745 is the constant to convert from MAD to std; it is used by default """ a = ma.masked_where(a!=a, a) if a.ndim == 1: d = ma.median(a) m = ma.median(ma.fabs(a - d) / c) else: d = ma.median(a, axis=axis) # I don't want the array to change so I have to copy it? if axis > 0: aswp = ma.swapaxes(a,0,axis) else: aswp = a m = ma.median(ma.fabs(aswp - d) / c, axis=0) return m
39762026de548a077ccb4c599ad540a04d6c508e
3,634,553
import json def save_browser_tree_state(): """Save the browser tree state.""" data = request.form if request.form else request.data.decode('utf-8') old_data = get_setting('browser_tree_state') if old_data and old_data != 'null': if data: data = json.loads(data) old_data = json.loads(old_data) old_data.update(data) new_data = json.dumps(old_data) else: new_data = data try: store_setting('browser_tree_state', new_data) except Exception as e: current_app.logger.exception(e) return internal_server_error(errormsg=str(e)) return success_return()
6bdc8abc6c2189a6329f42f3df63f93c20aa9794
3,634,554
def prompt_yes_no(msg, default=False): """Prints the given message and continually prompts the user until they answer yes or no. Returns true if the answer was yes, false otherwise.""" default_str = "no" if default: default_str = "yes" result = prompt_w_default(msg, default_str, "^(Yes|yes|No|no)$") if result == "yes" or result == "Yes": return True return False
e42eb8e41c9251d0c5b046d445a6519b694cde4c
3,634,555
def persistence_distance( x: np.ndarray, y: np.ndarray, dimension: int=0, persistence_feature: str="persistence_landscape" ) -> float: """Distances are euclidean on persistence features. Args: x: First datset. y: Second dataset. dimension: Dimension for persistence diagram to compute distance. persistence_feature: Name of persistence featurizer. Returns: z: euclidean distance between persistence features. """ return np.linalg.norm( featurize_pointcloud(x, dimension, persistence_feature) - featurize_pointcloud(y, dimension, persistence_feature) )
17f9242ae56ed1ff12111d17da3359060aaec963
3,634,556
import os def get_user_pysit_path(): """ Returns the full path to the users .pysit directory and creates it if it does not exist.""" path = os.path.join(os.path.expanduser('~'), '.pysit') if not os.path.isdir(path): os.mkdir(path) return path
a37f762642ce986dbf3d488bcf5760512d5a0b6c
3,634,557
def plot_pit_qq(pdf_ens, ztrue, qbins=101, title=None, code=None, show_pit=True, show_qq=True, pit_out_rate=None, outdir="", savefig=False) -> str: """Quantile-quantile plot Ancillary function to be used by class Metrics. Parameters ---------- pit: `PIT` object class from metrics.py qbins: `int`, optional number of PIT bins/quantiles title: `str`, optional if None, use formatted sample's name (sample.name) label: `str`, optional if None, use formatted code's name (sample.code) show_pit: `bool`, optional include PIT histogram (default=True) show_qq: `bool`, optional include QQ plot (default=True) pit_out_rate: `ndarray`, optional print metric value on the plot panel (default=None) savefig: `bool`, optional save plot in .png file (default=False) """ if qbins is None: qbins = 100 if title is None: title = "" if code is None: code = "" label = "" else: label = code + "\n" if pit_out_rate is not None: try: label += "PIT$_{out}$: " label += f"{float(pit_out_rate):.4f}" except: print("Unsupported format for pit_out_rate.") plt.figure(figsize=[4, 5]) gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) ax0 = plt.subplot(gs[0]) pitobj = PIT(pdf_ens, ztrue) spl_ens, metamets = pitobj.evaluate() pit_vals = np.array(pitobj._pit_samps) pit_out_rate = PITOutRate(pit_vals, spl_ens).evaluate() # ################# q_theory = np.linspace(0., 1., qbins) q_data = np.quantile(pit_vals, q_theory) qq_vec = (q_theory, q_data) if show_qq: ax0.plot(qq_vec[0], qq_vec[1], c='r', linestyle='-', linewidth=3)#, label=label) ax0.plot([0, 1], [0, 1], color='k', linestyle='--', linewidth=2) ax0.set_ylabel("Q$_{data}$", fontsize=18) plt.ylim(-0.001, 1.001) plt.xlim(-0.001, 1.001) plt.title(title) if show_pit: try: y_uni = float(len(pit_vals)) / float(qbins) except: y_uni = float(len(pit_vals)) / float(len(qbins)) if not show_qq: ax0.hist(pit_vals, bins=qbins, alpha=0.7) #, label=label) ax0.set_ylabel('Number') ax0.hlines(y_uni, xmin=0, xmax=1, color='k') plt.ylim(0, ) # -0.001, 1.001) else: ax1 = ax0.twinx() ax1.hist(pit_vals, bins=qbins, alpha=0.7) ax1.set_ylabel('Number') ax1.hlines(y_uni, xmin=0, xmax=1, color='k') leg = ax0.legend(handlelength=0, handletextpad=0, fancybox=True) for item in leg.legendHandles: item.set_visible(False) if show_qq: ax2 = plt.subplot(gs[1]) ax2.plot(qq_vec[0], (qq_vec[1] - qq_vec[0]), c='r', linestyle='-', linewidth=3) plt.ylabel("$\Delta$Q", fontsize=18) ax2.plot([0, 1], [0, 0], color='k', linestyle='--', linewidth=2) plt.xlim(-0.001, 1.001) plt.ylim(np.min([-0.12, np.min(qq_vec[1] - qq_vec[0]) * 1.05]), np.max([0.12, np.max(qq_vec[1] - qq_vec[0]) * 1.05])) if show_pit: if show_qq: plt.xlabel("Q$_{theory}$ / PIT Value", fontsize=18) else: plt.xlabel("PIT Value", fontsize=18) else: if show_qq: plt.xlabel("Q$_{theory}$", fontsize=18) if savefig: fig_filename = str("plot_pit_qq_" + f"{(code).replace(' ', '_')}.jpg") plt.savefig(f"{outdir}/{fig_filename}", format='jpg') else: fig_filename = None return fig_filename
565aa0f3e4920f2e4e7340081d46a58647386b79
3,634,558
def rel_mole_weight(ion, ion_num, oxy_num): """ Calculating Relative Molecular Weight :param ion: Each cation :param ion_num: Number of cations per cation :param oxy_num: The number of oxygen atoms corresponding to each cation :return: Relative molecular weight """ ion_dict = {'Si':28.085, 'Ti':47.867, 'Al':26.981, 'Cr':51.996, 'Fe':55.845, 'Mn':54.938, 'Mg':24.305, 'Ca':40.078, 'Na':22.989, 'K':39.098, 'P':30.974, 'Ni':58.693, 'Zn':65.390, 'Li':6.941, 'Zr':91.224, 'V':50.941, 'O':15.999} length = len(ion) if length != len(ion_num) or length != len(oxy_num): raise Exception relative_molecular_weight = [] for i in range(length): a = ion_dict[ion[i]] * ion_num[i] + ion_dict['O'] * oxy_num[i] relative_molecular_weight.append(a) return relative_molecular_weight
c1d38209fb5468cac693bc90cfb333afff43100b
3,634,559
def get_admin_token(chat_id): """ Get a administrador chat_id """ session = Session() admin = session.query(Admin).\ filter_by( chat_id=chat_id).first() session.close() if admin: return admin.token else: return None
dca8be42237f62fc6336cb3d22637d2406aedfc6
3,634,560
import os def _get_vocab(name: str): """Retrieve model configuration. Arguments: ---------- name {str} -- Name of the model. Raises: ------- FileNotFoundError: No vocab file provided with model. ValueError: Bad name or unavailable model. Returns: -------- np.ndarray -- Vocab """ # retrieve known model vocab if name in MODEL_LIST: model_dir = os.path.join(MODEL_PATH, name) return np.load(os.path.join(model_dir, "vocab.npy")) # retrieve custom user vocab elif os.path.isdir(name): try: return np.load(os.path.join(name, "vocab.npy")) except FileNotFoundError: raise FileNotFoundError(f"No 'vocab.npy' configuration " f"file available in {name}.") else: raise ValueError(f'Unknown model "{name}". ' f'Available ESN models: {MODEL_LIST}.')
606cb864323cd3ace8a7c5b241fe74a8573a9e91
3,634,561
import os def filter_directory(directory: str, extension: str = '.py') -> str: """ Delete all files within the given directory with filenames not ending in the given extension """ for root, dirs, files in os.walk(directory): [os.remove(os.path.join(root, fi)) for fi in files if not fi.endswith(extension)] return directory
e0ad4853c6ca8c2337dbd3c7b9901c7e6e9ce6a4
3,634,562
from typing import List import os def get_batches_for_prefix(gcs_client: storage.Client, prefix_path: str, ignore_subprefix="_config/", ignore_file=SUCCESS_FILENAME) -> List[List[str]]: """ This function creates batches of GCS uris for a given prefix. This prefix could be a table prefix or a partition prefix inside a table prefix. returns an Array of their batches (one batch has an array of multiple GCS uris) """ batches = [] blob: storage.Blob = storage.Blob.from_string(prefix_path) bucket_name = blob.bucket.name prefix_name = blob.name prefix_filter = f"{prefix_name}" bucket = cached_get_bucket(gcs_client, bucket_name) blobs = list(bucket.list_blobs(prefix=prefix_filter, delimiter="/")) cumulative_bytes = 0 max_batch_size = int(os.getenv("MAX_BATCH_BYTES", DEFAULT_MAX_BATCH_BYTES)) batch: List[str] = [] for blob in blobs: # API returns root prefix also. Which should be ignored. # Similarly, the _SUCCESS file should be ignored. # Finally, anything in the _config/ prefix should be ignored. if (blob.name not in {f"{prefix_name}/", f"{prefix_name}/{ignore_file}"} or blob.name.startswith(f"{prefix_name}/{ignore_subprefix}")): if blob.size == 0: # ignore empty files print(f"ignoring empty file: gs://{bucket}/{blob.name}") continue cumulative_bytes += blob.size # keep adding until we reach threshold if cumulative_bytes <= max_batch_size or len( batch) > MAX_SOURCE_URIS_PER_LOAD: batch.append(f"gs://{bucket_name}/{blob.name}") else: batches.append(batch.copy()) batch.clear() batch.append(f"gs://{bucket_name}/{blob.name}") cumulative_bytes = blob.size # pick up remaining files in the final batch if len(batch) > 0: batches.append(batch.copy()) batch.clear() if len(batches) > 1: print(f"split into {len(batches)} load jobs.") elif len(batches) == 1: print("using single load job.") else: raise RuntimeError("No files to load!") return batches
172838ea9cd92f008eba453dbf45b13a569499b9
3,634,563
import collections import string def index_of_coincidence(text): """Index of coincidence of a string. This is low for random text, higher for natural langauge. """ stext = sanitise(text) counts = collections.Counter(stext) denom = len(stext) * (len(text) - 1) / 26 return ( sum(max(counts[l] * counts[l] - 1, 0) for l in string.ascii_lowercase) / denom )
a8a5e0f50dab24c3f3be525f30b6c5c5112a8a48
3,634,564
def get_allocations(jm_id:str) -> dict: """ Get Allocations Get project allocations for user currently connected to remote system. Parameters ---------- jm_id : str ID of Job Manager instance. Returns ------ allocations : dictionary Dictionary containing information on available project allocations. """ try: allocations = api_call('GET', f"{jm_id}/allocations") except TACCJMError as e: e.message = f"get_allocations error" logger.error(e.message) raise e return allocations
d3ad268b7f56bd48d51b1d150644e39ee6e8b7f3
3,634,565
import struct def set_real(bytearray_: bytearray, byte_index: int, real) -> bytearray: """Set Real value Notes: Datatype `real` is represented in 4 bytes in the PLC. The packed representation uses the `IEEE 754 binary32`. Args: bytearray_: buffer to write to. byte_index: byte index to start writing from. real: value to be written. Returns: Buffer with the value written. Examples: >>> data = bytearray(4) >>> snap7.util.set_real(data, 0, 123.321) bytearray(b'B\\xf6\\xa4Z') """ real = float(real) real = struct.pack('>f', real) _bytes = struct.unpack('4B', real) for i, b in enumerate(_bytes): bytearray_[byte_index + i] = b return bytearray_
bda32caab27adeae7c6710d4c26743b93533ccff
3,634,566
def LogNormalAddLoc(builder, loc): """This method is deprecated. Please switch to AddLoc.""" return AddLoc(builder, loc)
761522963da65b2ab05e4a687cad3ea44ebe3d1d
3,634,567
def newton_raphson(x, y): """ The implementation of the `Newton-Raphson <https://en.wikipedia.org/wiki/Newton%27s_method>`_ optimization procedure. It fits the knee curve :math:`f(x)` to the :math:`y` s of the corresponding :math:`x` s by tweaking the shape parameter :math:`c` from an initial guess. Args: x (``np.ndarray``): the ground truth :math:`x` coordinates y (``np.ndarray``): the ground truth :math:`y` coordinates Returns: ``float``: the optimal shape parameter :math:`c` which minimizes the squared error, up to a predefined tolerance level """ c = 0 new_c = 3 while abs(new_c - c) > TOLERANCE: c = new_c new_c = c - de_dc(y, x, c) / d2e_dc2(y, x, c) return new_c
6038effed89df29275123811f837f3d3e0f286a7
3,634,568
def _vx_no_BRST_check_massive_pp_zero(nhel, nsvahl): """ Parameters ---------- nhel: tf.Tensor, boson helicity of shape=() nsvahl: tf.Tensor, helicity times particle|anti-particle absolute value of shape=() Returns ------- tf.Tensor, of shape=(None,4) and dtype DTYPECOMPLEX """ hel0 = 1.0 - tfmath.abs(nhel) v = [[complex_tf(1,0)]] * 4 v[1] = [complex_tf(-nhel * SQH, 0.0)] v[2] = [complex_tf(0.0, nsvahl * SQH)] v[3] = [complex_tf(hel0, 0.0)] return tf.stack(v, axis=1)
bbe3fe72786f7944263254092da82d062310e10c
3,634,569
def rst2node(data, env): """Converts a reStructuredText into its node""" if not data: return parser = docutils.parsers.rst.Parser() document = docutils.utils.new_document("<>") document.settings = docutils.frontend.OptionParser().get_default_values() document.settings.tab_width = 4 document.settings.pep_references = False document.settings.rfc_references = False document.settings.character_level_inline_markup = False document.settings.env = env parser.parse(data, document) if len(document.children) == 1: return document.children[0].deepcopy() else: par = docutils.nodes.paragraph() for child in document.children: par += child.deepcopy() return par
7ab3f8f80860a73e35cfb8ae2f1421c4b8a09533
3,634,570
def current_branch(): """ Return the current branch """ return f'{REPO.active_branch}'
c995896fbde35b2d07a06139efb8fc9bc72dd667
3,634,571
from datetime import datetime import hashlib def _create_config_txn(pubkey, signing_key, setting_key_value): """Creates an individual sawtooth_config transaction for the given key and value. """ setting_key = setting_key_value[0] setting_value = setting_key_value[1] nonce = str(datetime.datetime.utcnow().timestamp()) proposal = ConfigProposal( setting=setting_key, value=setting_value, nonce=nonce) payload = ConfigPayload(data=proposal.SerializeToString(), action=ConfigPayload.PROPOSE).SerializeToString() header = TransactionHeader( signer_pubkey=pubkey, family_name='sawtooth_config', family_version='1.0', inputs=_config_inputs(setting_key), outputs=_config_outputs(setting_key), dependencies=[], payload_encoding='application/protobuf', payload_sha512=hashlib.sha512(payload).hexdigest(), batcher_pubkey=pubkey ).SerializeToString() signature = signing.sign(header, signing_key) return Transaction( header=header, header_signature=signature, payload=payload)
5a4057657dc41c6403983d9ecded8b7194c5989e
3,634,572
def round_unit(x, unit): """ 按特定单位量对x取倍率 round_int偏向于工程代码简化,round_unit偏向算法,功能不太一样,所以分组不同 Args: x: 原值 unit: 单位量 Returns: 新值,是unit的整数倍 >>> round_unit(1.2, 0.5) 1.0 >>> round_unit(1.6, 0.5) 1.5 >>> round_unit(7, 5) 5 >>> round_unit(13, 5) 15 """ return round_int(x / unit) * unit
b59f5f74fbf4622d1fa5b3fd7af6386b39eac784
3,634,573
import os import io def read(*paths): """Read a text file.""" basedir = os.path.dirname(__file__) full_path = os.path.join(basedir, *paths) contents = io.open(full_path, encoding='utf-8').read().strip() return contents
30b2310917a8b3f42fed6ab27dca2c087db02436
3,634,574
def centroid_1D(image, xpeak, xhw, debug=False): """ Fine location of the target by calculating the centroid for the region centered on the brightest checkbox. Performs the centroid calculation on the checkbox region calculated using the function checkbox_1D(). Keyword arguments: image -- 2 dimensional psf image xpeak -- The location of the brightest checkox in the flattened vector. xhw -- The halfwidth of the checkbox region calculated in checkbox_1D. Output(s): x_cen -- Target centroid location. c_cum -- The calculated flux sum within the checkbox region. Example usage: >> cb_centroid, cb_sum = centroid_1D(psf, cb_center, cb_hw) Find the vector centroid given the checkbox center and halfwidth. """ # Collapse input image unto x axis vector = np.sum(image, axis=0) c_sum = 0.0 xcen = 0.0 for ii in xrange(int(xpeak - xhw - 1), int(xpeak + xhw - 1)): c_sum = c_sum + vector[ii] xloc = ii + 1 xcen += xloc * vector[ii] print('(centroid_1D): Sum = ', c_sum) if c_sum == 0: print('(centroid_1D): ERROR - divide by zero') else: xcen /= c_sum print('(centroid_1D): Centroid = ', xcen-1) # -1 on both axes, as Python is 0 major return xcen-1, c_sum
49d8c3054c62bc00e96e028a17d7c662ef0b7066
3,634,575
import typing import json async def async_get_preference(connection, key: PreferenceKey) -> typing.Union[None, typing.Any]: """ Gets a preference by key. :param key: The preference key, from the `PreferenceKey` enum. :returns: An object with the preferences value, or `None` if unset and no default exists. """ proto = await iterm2.rpc.async_get_preference(connection, key.value) j = proto.preferences_response.results[0].get_preference_result.json_value return json.loads(j)
4c3c5d4ee71d0e7dc85b7ef85075137dd639ee25
3,634,576
def index(): """ View function for the index page. """ user: User = current_user return \ "<div>" +\ f"<a href=\"{url_for('auth.logout')}\">Log out</a>" +\ f"<h1>Welcome {str(user)}</h1>" +\ "</div>"
161d6bf4968bd3bed65d81470e33fff45a101329
3,634,577
import socket def wait_for_socket(hostname, port): # TODO: upstream this modified version into flocker (it was copied from # flocker.acceptance.test_api) """ Wait until remote TCP socket is available. :param str hostname: The host where the remote service is running. :return Deferred: Fires when socket is available. """ def api_available(): try: s = socket.socket() s.connect((hostname, port)) return True except socket.error: return False return loop_until(api_available)
d62c0f3ecb253fa3bdd5794f85171fb3fc794ad9
3,634,578
import typing def format_event_pull_request(data: typing.Dict[str, typing.Any]) -> str: """ Format a GitHub pull_request event into a string. """ resp = f"{format_author(data['sender'])} " description = f"{format_issue_or_pr(data['pull_request'])} in {format_repo(data['repository'])}" if data['action'] in ("opened", "edited"): resp += f"{data['action']} pull request {format_issue_or_pr(data['pull_request'], False)} in " resp += f"{format_repo(data['repository'])}:\n\n# {data['pull_request']['title']}\n{markdownify(data['pull_request']['body'])}" elif data['action'] == "closed": resp += f"{'merged' if data['pull_request']['merged'] else 'closed without merging'} pull request {description}." elif data['action'] in ("locked", "unlocked", "reopened"): resp += f"{data['action']} pull request {description}." elif data['action'] == "ready_for_review": resp = f"Pull request {description} is ready for review." elif data['action'] == "review_requested": resp += f"is requesting reviews for {description} from {format_author(data['requested_reviewer'])}." elif data['action'] == "review_request_removed": resp += f"is no longer requesting reviews for {description} from {format_author(data['requested_reviewer'])}." elif data['action'] in ("assigned", "unassigned"): resp += f"{data['action']} {format_author(data['assignee'])} to pull request {description}." elif data['action'] in ("labeled", "unlabeled"): resp += f"{data['action']} pull request {description} as [**{data['label']['name']}**]" resp += f"({data['repository']['html_url']}/labels/{data['label']['name']})." else: return None return resp
1c5dad5c4ca0218b14c46da6abc0615dbe7f8b6b
3,634,579
import math def project_gdf(gdf, to_crs=None, to_latlong=False): """ lovingly copied from OSMNX <https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py> Project a GeoDataFrame to the UTM zone appropriate for its geometries' centroid. The simple calculation in this function works well for most latitudes, but won't work for some far northern locations like Svalbard and parts of far northern Norway. Parameters ---------- gdf : GeoDataFrame the gdf to be projected to_crs : dict or string or pyproj.CRS if not None, just project to this CRS instead of to UTM to_latlong : bool if True, projects to latlong instead of to UTM Returns ------- GeoDataFrame """ assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.' # else, project the gdf to UTM # if GeoDataFrame is already in UTM, just return it if is_crs_utm(gdf.crs): return gdf # calculate the centroid of the union of all the geometries in the # GeoDataFrame avg_longitude = gdf['geometry'].unary_union.centroid.x # calculate the UTM zone from this avg longitude and define the UTM # CRS to project utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1) utm_crs = '+proj=utm +zone={} +ellps=WGS84 +datum=WGS84 +units=m +no_defs'.format(utm_zone) # project the GeoDataFrame to the UTM CRS projected_gdf = gdf.to_crs(utm_crs) return projected_gdf
984d3f4cdfdaec434ffc1049d189e0e7532afeae
3,634,580
import pathlib import glob import shutil import sys def _HadoopMovePartFile(local_path, file_extension: str): """ Internal function moving the single part file from a hadoop path to the local path Args: local_path: the local path where to export file_extension: the file extension which to search for """ path = pathlib.Path(local_path) if not (path / '_SUCCESS').exists(): # When the _SUCCESS file does not exist the file creation failed print(f'Error: _SUCCESS file does not exist in path {str(path.absolute())}', stdout=sys.stderr) return False files = glob.glob(str(path / f'part-*.{file_extension}')) if not files: print(f'Error: Could not find any file in folder matching *.{file_extension}', stdout=sys.stderr) return False if len(files) > 1: print(f'Found muliple files in folder matching *.{file_extension}. This function does not support merging serveral files.', stdout=sys.stderr) return False shutil.move(files[0], str(path.absolute()) + '.tmp') shutil.rmtree(str(path.absolute())) shutil.move(str(path.absolute()) + '.tmp', str(path.absolute())) return True
c236aa7c154296cf178255a913da57fdd4d65904
3,634,581
def get_shape(obj): """ Get the shape of a :code:'numpy.ndarray' or of a nested list. Parameters(obj): obj: The object of which to determine the shape. Returns: A tuple describing the shape of the :code:`ndarray` or the nested list or :code:`(1,)`` if obj is not an instance of either of these types. """ if hasattr(obj, "shape"): return obj.shape elif type(obj) == list: if obj == []: return (0,) else: return (len(obj),) + get_shape(obj[0]) else: return ()
d02d755f4b9e4a4dbde6c87ddfe0b5729a8c158e
3,634,582
import builtins def help_ui_check_answer(capsys, r_input): """a function to calculate an equation from a combination of four numbers to get 24""" final_result = '' with mock.patch.object(builtins, 'input', lambda _: r_input): g_c.ui_check_answer() out, err = capsys.readouterr() print(err) if out == '\nSeems no solutions\n\n': final_result = 'n' else: for i in out: if i == '\n': break if i == '×': final_result += '*' elif i == '÷': final_result += '/' else: final_result += i return final_result
1cae7d98ef1a87c4a53416f44d5c49312fced197
3,634,583
import collections def precision_recall(classifier, testfeats): """ computes precision and recall of a classifier """ refsets = collections.defaultdict(set) testsets = collections.defaultdict(set) for i, (feats, label) in enumerate(testfeats): refsets[label].add(i) observed = classifier.classify(feats) testsets[observed].add(i) precisions = {} recalls = {} for label in classifier.labels(): precisions[label] = precision(refsets[label], testsets[label]) recalls[label] = recall(refsets[label], testsets[label]) return precisions, recalls
97a76fe595b26a9a5a307e799659fb96f1642941
3,634,584
import logging def set_power_state_xavier(power_state: XavierPowerState) -> None: """Record the current power state and set power limit using nvpmodel.""" # Set power limit to the specified value if is_xavier_agx(): platform = "xavier_agx" elif is_xavier_nx(): platform = "xavier_nx" else: raise RuntimeError("Xavier platform must be AGX or NX") with open("build/nvpmodel.temp.conf", "w") as f: f.write(nvpmodel_template[platform].format(gpu_clock=power_state.gpu_freq, dla_clock=power_state.dla_freq, cpu_clock=power_state.cpu_freq, emc_clock=power_state.emc_freq)) cmd = "sudo /usr/sbin/nvpmodel -f build/nvpmodel.temp.conf -m 8 && sudo /usr/sbin/nvpmodel -d cool" logging.info(f"Setting current nvpmodel conf: {cmd}") run_command(cmd) return None
8b843acfff292b61dccf8ebc6433cb04446e5a7e
3,634,585
from typing import Any from typing import List def as_list(x: Any) -> List[Any]: """Wrap argument into a list if it is not iterable. :param x: a (potential) singleton to wrap in a list. :returns: [x] if x is not iterable and x if it is. """ # don't treat strings as iterables. if isinstance(x, str): return [x] try: _ = iter(x) return x except TypeError: return [x]
4b1b26857d209a9f5b142908e3a35b1ce7b05be4
3,634,586
def _sort_destinations(destinations): """ Takes a list of destination tuples and returns the same list, sorted in order of the jumps. """ results = [] on_val = 0 for dest in destinations: if len(results) == 0: results.append(dest) else: while on_val <= len(results): if on_val == len(results): results.append(dest) on_val = 0 break else: if dest[1] > results[on_val][1]: on_val += 1 else: results.insert(on_val, dest) on_val = 0 break return results
302480ef09f4b5a402a5c568c5d35d717db8c851
3,634,587
def Q8(): """ Return the matroid `Q_8`, represented as circuit closures. The matroid `Q_8` is a 8-element matroid of rank-4. It is a smallest non-representable matroid. See [Oxl2011]_, p. 647. EXAMPLES:: sage: from sage.matroids.advanced import setprint sage: M = matroids.named_matroids.Q8(); M Q8: Matroid of rank 4 on 8 elements with circuit-closures {3: {{'a', 'b', 'c', 'h'}, {'a', 'b', 'd', 'e'}, {'a', 'b', 'f', 'g'}, {'a', 'c', 'd', 'f'}, {'a', 'd', 'g', 'h'}, {'a', 'e', 'f', 'h'}, {'b', 'c', 'd', 'g'}, {'b', 'c', 'e', 'f'}, {'c', 'd', 'e', 'h'}, {'c', 'f', 'g', 'h'}, {'d', 'e', 'f', 'g'}}, 4: {{'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'}}} sage: setprint(M.flats(3)) [{'a', 'b', 'c', 'h'}, {'a', 'b', 'd', 'e'}, {'a', 'b', 'f', 'g'}, {'a', 'c', 'd', 'f'}, {'a', 'c', 'e'}, {'a', 'c', 'g'}, {'a', 'd', 'g', 'h'}, {'a', 'e', 'f', 'h'}, {'a', 'e', 'g'}, {'b', 'c', 'd', 'g'}, {'b', 'c', 'e', 'f'}, {'b', 'd', 'f'}, {'b', 'd', 'h'}, {'b', 'e', 'g'}, {'b', 'e', 'h'}, {'b', 'f', 'h'}, {'b', 'g', 'h'}, {'c', 'd', 'e', 'h'}, {'c', 'e', 'g'}, {'c', 'f', 'g', 'h'}, {'d', 'e', 'f', 'g'}, {'d', 'f', 'h'}, {'e', 'g', 'h'}] sage: M.is_valid() # long time True """ E = 'abcdefgh' CC = { 3: ['abfg', 'bcdg', 'defg', 'cdeh', 'aefh', 'abch', 'abed', 'cfgh', 'bcef', 'adgh', 'acdf'], 4: [E] } M = CircuitClosuresMatroid(groundset=E, circuit_closures=CC) M.rename('Q8: ' + repr(M)) return M
469ca05d13655ee19618dd9ffa3001c0792982ac
3,634,588
import re def remove_url(text): """ Supprime les URLs :param text: texte à transformer :return: texte transformé """ return re.sub(r'http\S+', '', text)
d0f3716808863d5e868da1efc4a7bb16ffa47ac1
3,634,589
def T_sun(a: float, m2_over_m1: float) -> float: """ T = 2 * pi * sqrt(a^3) / (K * sqrt(1 + m2/m1)) :param a: semi-major axis :type a: float :param m2_over_m1: m2 / m1 :type m2_over_m1: float :return: translation period :rtype: float """ return 2 * np.pi * np.sqrt(a * a * a / mu_sun(m2_over_m1))
6b88aba027a57a6a2b1d66bd7accdcde75d5f20d
3,634,590
def deleteDuplicates(head: ListNode) -> ListNode: """解法:重点已排序""" if not head or not head.next: return head pre, ptr =head, head.next while pre and ptr: if pre.val == ptr.val: pre.next = ptr.next ptr = pre.next else: pre = ptr ptr = ptr.next return head
09ac88103f9fb4ef85550ab201e0b05e1dd96dca
3,634,591
def predict_spectrum(svrs, X_, mask=None, scaler=None): """ predict a single spectrum given a list of svrs & mask Parameters ---------- svrs : list a list of svr objects X_ : ndarray the labels of predicted spectra mask : None | bool array predict the pixels where mask==True scaler : scaler object if not None, scale X_ before predictions using this scaler Returns ------- ys : ndarray predicted spectra """ if X_.ndim == 1: X_ = X_.reshape(1, -1) # scale X_ if necessary if scaler is not None: X_ = scaler.transform(X_) # default is to use all pixels if mask is None: mask = np.ones((len(svrs),), dtype=np.bool) # make predictions # print('number of true mask: ', np.sum(mask)) # print('mask len: ', mask.shape) ys = [predict_pixel(svr, X_, mask_) for svr, mask_ in zip(svrs, mask)] ys = np.array(ys, dtype=float).T return ys
49bc83c6f9a5bce2bdadc6a8b72313c95ab89c85
3,634,592
def _type_operand(): """ Parser for argument of a binary type operation. """ return binder_type() | agda_vars()
308b95cdd5378d7347ec4dd583ce387776e178cd
3,634,593
def kernel(process): """ Get the kernel id from a process """ for arg in process.cmdline(): if arg.endswith('.json') and '/kernel-' in arg: return splitext(basename(arg).replace('kernel-', ''))[0]
27e1235cf47bbb6c4db6f1853d51b2710a913212
3,634,594
import json def debug_status(status, error) -> str: """Return a debug string for the autoscaler.""" if not status: status = "No cluster status." else: status = status.decode("utf-8") as_dict = json.loads(status) lm_summary = LoadMetricsSummary(**as_dict["load_metrics_report"]) if "autoscaler_report" in as_dict: autoscaler_summary = AutoscalerSummary( **as_dict["autoscaler_report"]) status = format_info_string(lm_summary, autoscaler_summary) else: status = format_info_string_no_node_types(lm_summary) if error: status += "\n" status += error.decode("utf-8") return status
579fa8aba2a62f7dcd44a09343060e033aa552b9
3,634,595
def solve_ciknock( Sigma, tol=1e-5, num_iter=10, ): """ Computes S-matrix used to generate conditional independence knockoffs. Parameters ---------- Sigma : np.ndarray ``(p, p)``-shaped covariance matrix of X tol : float Minimum permissible eigenvalue of 2Sigma - S and S. num_iter : int The number of iterations in the binary search to ensure S is feasible. Returns ------- S : np.ndarray ``(p, p)``-shaped (block) diagonal matrix used to generate knockoffs Notes ----- When the S-matrix corresponding to conditional independence knockoffs is not feasible, this computes that S matrix and then does a binary search to find the maximum gamma such that gamma * S is feasible. """ # Compute vanilla S_CI S = 1 / (np.diag(np.linalg.inv(Sigma))) S = np.diag(S) # Ensure validity of solution S = utilities.shift_until_PSD(S, tol=tol) S, _ = utilities.scale_until_PSD(Sigma, S, tol=tol, num_iter=num_iter) return S
bf4183a3c7a0660d0f99bab27d83213e67892bc6
3,634,596
def main(request): """ Main view. Just shows the status of repos, with open prs, as well as a short list of recent jobs. Input: request: django.http.HttpRequest Return: django.http.HttpResponse based object """ limit = 30 repos, evs_info, default = get_user_repos_info(request, limit=limit) return render(request, 'ci/main.html', {'repos': repos, 'recent_events': evs_info, 'last_request': TimeUtils.get_local_timestamp(), 'event_limit': limit, 'update_interval': settings.HOME_PAGE_UPDATE_INTERVAL, 'default_view': default, })
4c9cdffeca6eaf0494c82bc834cf872ef67086aa
3,634,597
def is_match(text, full_hashed_value, **options): """ gets a value indicating that given text's hash is identical to given full hashed value. :param str text: text to be hashed. :param str full_hashed_value: full hashed value to compare with. :rtype: bool """ return get_component(HashingPackage.COMPONENT_NAME).is_match(text, full_hashed_value, **options)
fbafabe38626c5e189827c984d0c7af970623460
3,634,598
def remove_metrics(metrics_in, metric_collection, reduced_set=False, portraitplot=False): """ Removes some metrics from given list Inputs: ------ :param metrics_in: list of string List of metrics. :param metric_collection: string Name of a metric collection. **Optional arguments:** :param reduced_set: boolean, optional True to remove extra metrics that are not in the final set chosen by CLIVAR PRP. If set to False it removes metrics that are in more than one metric collection. Default value is False :param portraitplot: boolean, optional True to remove extra metrics that are not in the final set chosen by CLIVAR PRP but keep metrics that are in more than one metric collection. If set to False it removes metrics that are in more than one metric collection. Default value is False Output: ------ :return metrics_out: list of string Input list of metrics minus some metrics depending on given metric collection. """ metrics_out = deepcopy(metrics_in) if reduced_set is True: if portraitplot is True: if metric_collection == "ENSO_perf": to_remove = [ 'BiasSshLatRmse', 'BiasSshLonRmse', 'BiasSstLatRmse', 'BiasTauxLatRmse', 'EnsoPrTsRmse', 'EnsoSstDiversity_1', 'EnsoTauxTsRmse', 'NinaSstDur', 'NinaSstDur_1', 'NinaSstDur_2', 'NinaSstLonRmse', 'NinaSstLonRmse_1', 'NinaSstLonRmse_2', 'NinaSstTsRmse', 'NinaSstTsRmse_1', 'NinaSstTsRmse_2', 'NinoSstDiversity', 'NinoSstDiversity_1', 'NinoSstDiversity_2', 'NinoSstDur', 'NinoSstDur_1', 'NinoSstDur_2', 'NinoSstLonRmse', 'NinoSstLonRmse_1', 'NinoSstLonRmse_2', 'NinoSstTsRmse', 'NinoSstTsRmse_1', 'NinoSstTsRmse_2', "SeasonalSshLatRmse", "SeasonalSshLonRmse", "SeasonalSstLatRmse", "SeasonalTauxLatRmse"] elif metric_collection == "ENSO_proc": to_remove = [ 'BiasSshLonRmse', 'EnsodSstOce_1', 'EnsoFbSstLhf', 'EnsoFbSstLwr', 'EnsoFbSstShf', 'EnsoFbSstSwr'] else: to_remove = [ 'EnsoPrMapCorr', 'EnsoPrMapRmse', 'EnsoPrMapStd', 'EnsoPrMapDjfCorr', 'EnsoPrMapDjfStd', 'EnsoPrMapJjaCorr', 'EnsoPrMapJjaStd', 'EnsoSlpMapCorr', 'EnsoSlpMapRmse', 'EnsoSlpMapStd', 'EnsoSlpMapDjfCorr', 'EnsoSlpMapDjfRmse', 'EnsoSlpMapDjfStd', 'EnsoSlpMapJjaCorr', 'EnsoSlpMapJjaRmse', 'EnsoSlpMapJjaStd', 'EnsoSstMapCorr', 'EnsoSstMapRmse', 'EnsoSstMapStd', 'EnsoSstMapDjfCorr', 'EnsoSstMapDjfStd', 'EnsoSstMapJjaCorr', 'EnsoSstMapJjaStd', 'NinaPrMapCorr', 'NinaPrMap_1Corr', 'NinaPrMap_2Corr', 'NinaPrMapRmse', 'NinaPrMap_1Rmse', 'NinaPrMap_2Rmse', 'NinaPrMapStd', 'NinaPrMap_1Std', 'NinaPrMap_2Std', 'NinaSlpMapCorr', 'NinaSlpMap_1Corr', 'NinaSlpMap_2Corr', 'NinaSlpMapRmse', 'NinaSlpMap_1Rmse', 'NinaSlpMap_2Rmse', 'NinaSlpMapStd', 'NinaSlpMap_1Std', 'NinaSlpMap_2Std', 'NinaSstLonRmse', 'NinaSstLonRmse_1', 'NinaSstLonRmse_2', 'NinaSstMapCorr', 'NinaSstMap_1Corr', 'NinaSstMap_2Corr', 'NinaSstMapRmse', 'NinaSstMap_1Rmse', 'NinaSstMap_2Rmse', 'NinaSstMapStd', 'NinaSstMap_1Std', 'NinaSstMap_2Std', 'NinoPrMapCorr', 'NinoPrMap_1Corr', 'NinoPrMap_2Corr', 'NinoPrMapRmse', 'NinoPrMap_1Rmse', 'NinoPrMap_2Rmse', 'NinoPrMapStd', 'NinoPrMap_1Std', 'NinoPrMap_2Std', 'NinoSlpMapCorr', 'NinoSlpMap_1Corr', 'NinoSlpMap_2Corr', 'NinoSlpMap_1Rmse', 'NinoSlpMap_2Rmse', 'NinoSlpMapStd', 'NinoSlpMap_1Std', 'NinoSlpMap_2Std', 'NinoSstLonRmse', 'NinoSstLonRmse_1', 'NinoSstLonRmse_2', 'NinoSstMapCorr', 'NinoSstMap_1Corr', 'NinoSstMap_2Corr', 'NinoSstMapRmse', 'NinoSstMap_1Rmse', 'NinoSstMap_2Rmse', 'NinoSstMapStd', 'NinoSstMap_1Std', 'NinoSstMap_2Std'] else: if metric_collection == "ENSO_perf": to_remove = [ 'BiasSshLatRmse', 'BiasSshLonRmse', 'BiasSstLatRmse', 'BiasTauxLatRmse', 'EnsoPrTsRmse', 'EnsoSstDiversity_1', 'EnsoTauxTsRmse', 'NinaSstDur', 'NinaSstDur_1', 'NinaSstDur_2', 'NinaSstLonRmse', 'NinaSstLonRmse_1', 'NinaSstLonRmse_2', 'NinaSstTsRmse', 'NinaSstTsRmse_1', 'NinaSstTsRmse_2', 'NinoSstDiversity', 'NinoSstDiversity_1', 'NinoSstDiversity_2', 'NinoSstDur', 'NinoSstDur_1', 'NinoSstDur_2', 'NinoSstLonRmse', 'NinoSstLonRmse_1', 'NinoSstLonRmse_2', 'NinoSstTsRmse', 'NinoSstTsRmse_1', 'NinoSstTsRmse_2', "SeasonalSshLatRmse", "SeasonalSshLonRmse", "SeasonalSstLatRmse", "SeasonalTauxLatRmse"] elif metric_collection == "ENSO_proc": to_remove = [ 'BiasSshLonRmse', 'BiasSstLonRmse', 'BiasTauxLonRmse', 'EnsoAmpl', 'EnsoSeasonality', 'EnsoSstLonRmse', 'EnsoSstSkew', 'EnsodSstOce_1', 'EnsoFbSstLhf', 'EnsoFbSstLwr', 'EnsoFbSstShf', 'EnsoFbSstSwr'] else: to_remove = [ 'EnsoAmpl', 'EnsoSeasonality', 'EnsoSstLonRmse', 'EnsoPrMapCorr', 'EnsoPrMapRmse', 'EnsoPrMapStd', 'EnsoPrMapDjfCorr', 'EnsoPrMapDjfStd', 'EnsoPrMapJjaCorr', 'EnsoPrMapJjaStd', 'EnsoSlpMapCorr', 'EnsoSlpMapRmse', 'EnsoSlpMapStd', 'EnsoSlpMapDjfCorr', 'EnsoSlpMapDjfRmse', 'EnsoSlpMapDjfStd', 'EnsoSlpMapJjaCorr', 'EnsoSlpMapJjaRmse', 'EnsoSlpMapJjaStd', 'EnsoSstMapCorr', 'EnsoSstMapRmse', 'EnsoSstMapStd', 'EnsoSstMapDjfCorr', 'EnsoSstMapDjfStd', 'EnsoSstMapJjaCorr', 'EnsoSstMapJjaStd', 'NinaPrMapCorr', 'NinaPrMap_1Corr', 'NinaPrMap_2Corr', 'NinaPrMapRmse', 'NinaPrMap_1Rmse', 'NinaPrMap_2Rmse', 'NinaPrMapStd', 'NinaPrMap_1Std', 'NinaPrMap_2Std', 'NinaSlpMapCorr', 'NinaSlpMap_1Corr', 'NinaSlpMap_2Corr', 'NinaSlpMapRmse', 'NinaSlpMap_1Rmse', 'NinaSlpMap_2Rmse', 'NinaSlpMapStd', 'NinaSlpMap_1Std', 'NinaSlpMap_2Std', 'NinaSstLonRmse', 'NinaSstLonRmse_1', 'NinaSstLonRmse_2', 'NinaSstMapCorr', 'NinaSstMap_1Corr', 'NinaSstMap_2Corr', 'NinaSstMapRmse', 'NinaSstMap_1Rmse', 'NinaSstMap_2Rmse', 'NinaSstMapStd', 'NinaSstMap_1Std', 'NinaSstMap_2Std', 'NinoPrMapCorr', 'NinoPrMap_1Corr', 'NinoPrMap_2Corr', 'NinoPrMapRmse', 'NinoPrMap_1Rmse', 'NinoPrMap_2Rmse', 'NinoPrMapStd', 'NinoPrMap_1Std', 'NinoPrMap_2Std', 'NinoSlpMapCorr', 'NinoSlpMap_1Corr', 'NinoSlpMap_2Corr', 'NinoSlpMap_1Rmse', 'NinoSlpMap_2Rmse', 'NinoSlpMapStd', 'NinoSlpMap_1Std', 'NinoSlpMap_2Std', 'NinoSstLonRmse', 'NinoSstLonRmse_1', 'NinoSstLonRmse_2', 'NinoSstMapCorr', 'NinoSstMap_1Corr', 'NinoSstMap_2Corr', 'NinoSstMapRmse', 'NinoSstMap_1Rmse', 'NinoSstMap_2Rmse', 'NinoSstMapStd', 'NinoSstMap_1Std', 'NinoSstMap_2Std'] else: if portraitplot is True: to_remove = [] else: if metric_collection == "ENSO_perf": to_remove = ['BiasSshLatRmse', 'BiasSshLonRmse', "SeasonalSshLatRmse", "SeasonalSshLonRmse"] elif metric_collection == "ENSO_proc": to_remove = ['BiasSshLonRmse', 'BiasSstLonRmse', 'BiasTauxLonRmse', 'EnsoAmpl', 'EnsoSeasonality', 'EnsoSstLonRmse', 'EnsoSstSkew'] else: to_remove = ['EnsoAmpl', 'EnsoSeasonality', 'EnsoSstLonRmse', 'NinaSstLonRmse', 'NinaSstLonRmse_1', 'NinaSstLonRmse_2', 'NinoSstLonRmse', 'NinoSstLonRmse_1', 'NinoSstLonRmse_2'] for met in to_remove: while met in metrics_out: metrics_out.remove(met) return metrics_out
2c7c26a1dfcb9d58ac095caf76e618d52b761eca
3,634,599