content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import warnings def _url_from_string(url): """ Generate actual tile url from tile provider definition or template url. """ if "tileX" in url and "tileY" in url: warnings.warn( "The url format using 'tileX', 'tileY', 'tileZ' as placeholders " "is deprecated. Please use '{x}', '{y}', '{z}' instead.", FutureWarning, ) url = ( url.replace("tileX", "{x}").replace("tileY", "{y}").replace("tileZ", "{z}") ) return {"url": url}
f3d4393163e48a7949f3229c55ea8951411dcd63
3,639,400
import socket def get_reverse_dns(ip_address: str) -> str: """Does a reverse DNS lookup and returns the first IP""" try: rev = socket.gethostbyaddr(ip_address) if rev: return rev[0] return "" # noqa except (socket.herror, socket.gaierror, TypeError, IndexError): return ""
58a27e25f7a9b11ab7dcddebeea743b7864f80f1
3,639,401
def abs_path(file_path): """ Returns the absolute path from the file that calls this function to file_path. Needed to access other files within aide_gui when initialized by aide. Parameters ---------- file_path: String The relative file path from the file that calls this function. """ return join(dirname(abspath(__file__)), file_path)
63e4a4b0c8fafb5920c78310fda90b119fd18104
3,639,402
def function(x: np.ndarray) -> float: """The ellipse function is x0^2 + 2 * x1^2 + 3 * x2^2 + ...""" return np.linalg.norm(np.sqrt(np.arange(1, 1 + len(x))) * x) ** 2
efe468177ff232d45d18385fa2744a9cf63739eb
3,639,403
def replace_with_encoded_bits(one_hot_matrix, enum_val, add_value, last_col_index): """ Generate encoded bits for a categorical data value using one hot encoding. :param one_hot_matrix: matrix representing the encoding of categorical data value to 1-hot encoding :param enum_val: categorical data value, could be np.nan :param add_value: set to 1 if a reference value is needed in addition to 1-hot encoding :param last_col_index: index into encoding for np.nan if exists :return: vector representing the encoded values for a enum value """ if np.isnan(enum_val): # if data value is np.nan return one_hot_matrix[last_col_index] else: return one_hot_matrix[int(enum_val-add_value)]
d5ee111d74071fdbaa3890b35a193aa9e24df745
3,639,404
def cosine_similarity(n_co_elements, n_first_element, n_second_element): """ Description A function which returns the cosine similarity between two elements. Arguments :param n_co_elements: Number of co-elements. :type n_co_elements: int :param n_first_element: Size of the first element. :type n_first_element: int :param n_second_element: Size of the second element :type n_second_element: int """ try: return n_co_elements / (sqrt(n_first_element) * sqrt(n_second_element)) except ZeroDivisionError: return 0
ea35e47ecf3e77a95d535b0421afbe5f3a679817
3,639,405
def AddForwardEulerDynamicsConstraint(mp, A, B, x, u, xnext, dt): """ Add a dynamics constraint to the given Drake mathematical program mp, represinting the euler dynamics: xnext = x + (A*x + B*u)*dt, where x, u, and xnext are symbolic variables. """ n = A.shape[0] Aeq = np.hstack([ (np.eye(n)+A*dt), B*dt, -np.eye(n) ]) beq = np.zeros((n,1)) xeq = np.hstack([ x, u, xnext])[np.newaxis].T return mp.AddLinearEqualityConstraint(Aeq,beq,xeq)
e0070aa28b61833330706e3934cbfaa8eb1c1d1b
3,639,406
import json async def light_pure_rgb_msg_fixture(hass): """Return a mock MQTT msg with a pure rgb light actuator message.""" light_json = json.loads( await hass.async_add_executor_job(load_fixture, "ozw/light_pure_rgb.json") ) message = MQTTMessage(topic=light_json["topic"], payload=light_json["payload"]) message.encode() return message
93156674ece713d6c9371f64840852a3d5d292b5
3,639,407
import csv def make_header_names_thesaurus(header_names_thesaurus_file=HEADER_NAMES_THESAURUS_FILE): """ Get a dict mapping ideal domain-specific phrases to list of alternates. Parameters ---------- header_names_thesaurus_file : str Filepath. Returns ------- Dict of {'ideal phrase': ['alt_phrase0', 'alt_phrase1', ...]}. """ with open(header_names_thesaurus_file, 'rbU') as f: f.readline() # skip headers csvreader = csv.reader(f) header_names_thesaurus = {} for row in csvreader: header_primary_name = row[0] header_names_thesaurus[header_primary_name] = [x.lower().rstrip() for x in filter(None,row)] return header_names_thesaurus
20f89be5dfbdf0feac5facddcaeeddb346d394a8
3,639,408
def split_train_valid_test(adata_here, training_proportion=0.6, validation_proportion=0.2, test_proportion=0.2, rng=None,copy_adata=False): """Split cells into training, validation and test """ assert training_proportion<=1.0 assert validation_proportion<=1.0 assert test_proportion<=1.0 assert (training_proportion+validation_proportion+test_proportion)<=1.0 num_examples=adata_here.n_obs if rng==None: idx_shuff=np.random.RandomState(seed=77).permutation(range(num_examples)) else: idx_shuff=rng.permutation(range(num_examples)) training_threshold=int(num_examples*training_proportion) validation_threshold=int(num_examples*(training_proportion+validation_proportion)) training=range(training_threshold) validation=range(training_threshold,min(validation_threshold,num_examples)) test=range(validation_threshold,num_examples) #make obs with train, validation, test train_test_df=pd.DataFrame({'cell':adata_here.obs_names, 'train_valid_test':'train'},index=adata_here.obs_names) train_test_df=train_test_df.iloc[idx_shuff,:] train_test_df.iloc[training,1]='train' train_test_df.iloc[validation,1]='valid' train_test_df.iloc[test,1]='test' print('splitting',train_test_df.loc[adata_here.obs_names,'train_valid_test'].value_counts()) return(train_test_df.loc[adata_here.obs_names,'train_valid_test'])
ccff7c2b1372b74429bb6acb04df1dd66ad5c113
3,639,409
def get_stock_ledger_entries(previous_sle, operator=None, order="desc", limit=None, for_update=False, debug=False, check_serial_no=True): """get stock ledger entries filtered by specific posting datetime conditions""" conditions = " and timestamp(posting_date, posting_time) {0} timestamp(%(posting_date)s, %(posting_time)s)".format(operator) if previous_sle.get("warehouse"): conditions += " and warehouse = %(warehouse)s" elif previous_sle.get("warehouse_condition"): conditions += " and " + previous_sle.get("warehouse_condition") if check_serial_no and previous_sle.get("serial_no"): # conditions += " and serial_no like {}".format(frappe.db.escape('%{0}%'.format(previous_sle.get("serial_no")))) serial_no = previous_sle.get("serial_no") conditions += (""" and ( serial_no = {0} or serial_no like {1} or serial_no like {2} or serial_no like {3} ) """).format(frappe.db.escape(serial_no), frappe.db.escape('{}\n%'.format(serial_no)), frappe.db.escape('%\n{}'.format(serial_no)), frappe.db.escape('%\n{}\n%'.format(serial_no))) if not previous_sle.get("posting_date"): previous_sle["posting_date"] = "1900-01-01" if not previous_sle.get("posting_time"): previous_sle["posting_time"] = "00:00" if operator in (">", "<=") and previous_sle.get("name"): conditions += " and name!=%(name)s" return frappe.db.sql(""" select *, timestamp(posting_date, posting_time) as "timestamp" from `tabStock Ledger Entry` where item_code = %%(item_code)s and is_cancelled = 0 %(conditions)s order by timestamp(posting_date, posting_time) %(order)s, creation %(order)s %(limit)s %(for_update)s""" % { "conditions": conditions, "limit": limit or "", "for_update": for_update and "for update" or "", "order": order }, previous_sle, as_dict=1, debug=debug)
a25b1df6b692975c94f335e40d342617c3a95086
3,639,410
def index(request): """ Main index. Editor view. """ # Render editor body = render_to_string('editor.html', {}) data = { 'body': body } # Render page layout return render(request, 'index.html', data)
bab60def7716ae11d328a95274d2ee7b6305dbaf
3,639,411
def isUsdExt(ext): """ Check if the given extension is an expected USD file extension. :Parameters: ext : `str` :Returns: If the file extension is a valid USD extension :Rtype: `bool` """ return ext.lstrip('.') in USD_EXTS
5c2f7a48869c9ab4a94b4d8a84e892b76938e91a
3,639,412
def _get_dflt_lexicon(a_pos, a_neg): """Generate default lexicon by putting in it terms from seed set. @param a_pos - set of positive terms @param a_neg - set of negative terms @return list(3-tuple) - list of seed set terms with uniform scores and polarities """ return [(w, POSITIVE, 1.) for w in a_pos] \ + [(w, NEGATIVE, -1.) for w in a_neg]
b06a1f81629368447227a846ac3216220beaa77b
3,639,413
import os def process_dst_overwrite_args(src, dst=None, overwrite=True, src_to_dst_func=None): """ Check when overwrite is not allowed, whether the destination exists. """ src = os.path.abspath(src) if dst is None: dst = src_to_dst_func(src) if not overwrite: if os.path.exists(dst): raise EnvironmentError( "output path '%s' already exists.." % dst) return src, dst
201726c93ce918ff7bcf4d66cd0181ed62d1b061
3,639,414
def rct(target_t : Tensor, source_t : Tensor, target_mask_t : Tensor = None, source_mask_t : Tensor = None, mask_cutoff = 0.5) -> Tensor: """ Transfer color using rct method. arguments target_t Tensor( [N]CHW ) C==3 (BGR) float16|32 source_t Tensor( [N]CHW ) C==3 (BGR) float16|32 target_mask_t(None) Tensor( [N]CHW ) C==1|3 float16|32 target_source_t(None) Tensor( [N]CHW ) C==1|3 float16|32 reference: Color Transfer between Images https://www.cs.tau.ac.il/~turkel/imagepapers/ColorTransfer.pdf """ if target_t.ndim != source_t.ndim: raise ValueError('target_t.ndim != source_t.ndim') if target_t.ndim == 3: ch_axis = 0 spatial_axes = (1,2) else: ch_axis = 1 spatial_axes = (2,3) target_t = cvt_color(target_t, 'BGR', 'LAB', ch_axis=ch_axis) source_t = cvt_color(source_t, 'BGR', 'LAB', ch_axis=ch_axis) target_stat_t = target_t if target_mask_t is not None: target_stat_t = any_wise('O = I0*(I1 >= I2)', target_stat_t, target_mask_t, np.float32(mask_cutoff) ) source_stat_t = source_t if source_mask_t is not None: source_stat_t = any_wise('O = I0*(I1 >= I2)', source_stat_t, source_mask_t, np.float32(mask_cutoff) ) target_stat_mean_t, target_stat_var_t = moments(target_stat_t, axes=spatial_axes) source_stat_mean_t, source_stat_var_t = moments(source_stat_t, axes=spatial_axes) target_t = any_wise(f""" O_0 = clamp( (I0_0 - I1_0) * sqrt(I2_0) / sqrt(I3_0) + I4_0, 0.0, 100.0); O_1 = clamp( (I0_1 - I1_1) * sqrt(I2_1) / sqrt(I3_1) + I4_1, -127.0, 127.0); O_2 = clamp( (I0_2 - I1_2) * sqrt(I2_2) / sqrt(I3_2) + I4_2, -127.0, 127.0); """, target_t, target_stat_mean_t, source_stat_var_t, target_stat_var_t, source_stat_mean_t, dim_wise_axis=ch_axis) return cvt_color(target_t, 'LAB', 'BGR', ch_axis=ch_axis)
87f350c3e8cef10ef2e3bc883457acf861ab064c
3,639,415
def random_policy(num_actions): """ Returns a policy where all actions have equal probabilities, i.e., an uniform distribution. """ return np.zeros((num_actions,)) + 1 / num_actions
9a95865cf3bc7634bc4bf033f343b5811ba40c9f
3,639,416
def find_object(func, name, *args, **kwargs): """Locate an object by name or identifier This function will use the `name` argumetn to attempt to locate an object. It will first attempt to find the object by identifier and if that fails, it will attempt to find the object by name. Since object names are non-unique values in the Pureport API, this function will return the first value it finds in the case of multiple objects. If the requested object can not be found, this function will raise an exception. :param name: The name or identifier of the object to locate :type name: str :returns: An instance of the object found :rtype: `pureport.models.Model` :raises: `pureport.exceptions.PureportError` """ objects = func(*args, **kwargs) match = None name_matches = list() for item in objects: if name == item.id: match = item break elif name == item.name: name_matches.append(item) else: if not name_matches: raise PureportError("could not locate object `{}`".format(name)) if match is None: match = first(name_matches) return match
6ee8085d42883798c1f3ab5d0a7711af26b2b614
3,639,417
import os def mkdir(path): """ Make a directory, if the parent directory exists. """ path = abspath(path, fse.get_working().get_full_path()) parent_path, d = os.path.split(path) parent = fse.find_dir(parent_path) if parent: entry = fse.create(name=d, parent=parent, depth=parent.depth+1, is_directory=True) return f'{path} created.' return f'{parent_path} does not exist.'
6306e9fe3645b7db8cb478361186626fc294e4d5
3,639,418
import os def _get_cognitive_services_client() -> ImageSearchClient: """Get the cognitive service client to run the searches against. Ensure there is a COGNITIVE_KEY and COGNITIVE_ENDPOINT configured in your app setting for the function, or your local.settings.json file when running locally. Returns ------- client: ImageSearchClient Cognitive service client """ subscription_key = os.environ.get('COGNITIVE_KEY') subscription_endpoint = os.environ.get('COGNITIVE_ENDPOINT') client = ImageSearchClient(endpoint=subscription_endpoint, credentials=CognitiveServicesCredentials(subscription_key)) return client
f52a8a85a60401a5da2b7388fbd295954a54602a
3,639,419
import os def getZeroPadding(path): """Get original zero padding, so can be re-added.""" files = listVisibleFiles(path) zero_padding = len(getNumSubString(os.path.splitext(files[0])[0])) return zero_padding
19b044027d4309c6b3977333958801802f287650
3,639,420
import logging def weld_segments(gdf_line_net, gdf_line_gen, gdf_line_houses, debug_plotting=False): """Weld continuous line segments together and cut loose ends. This is a public function that recursively calls the internal function weld_line_segments_(), until the problem cannot be simplified further. Find all lines that only connect to one other line and connect those to a single MultiLine object. Points that connect to Generators and Houses are not simplified. Loose ends are shortened where possible. Parameters ---------- gdf_line_net : GeoDataFrame Potential pipe network. gdf_line_gen : GeoDataFrame Generators that need to be connected. gdf_line_houses : GeoDataFrame Houses that need to be connected. debug_plotting : bool, optional Plot the selection process. Returns ------- gdf_line_net_new : GeoDataFrame Simplified potential pipe network. """ gdf_line_net_last = gdf_line_net gdf_line_net_new = _weld_segments(gdf_line_net, gdf_line_gen, gdf_line_houses, debug_plotting) # Now do all of this recursively while len(gdf_line_net_new) < len(gdf_line_net_last): logging.info('Welding lines... reduced from {} to {} lines'.format( len(gdf_line_net_last), len(gdf_line_net_new))) gdf_line_net_last = gdf_line_net_new gdf_line_net_new = _weld_segments(gdf_line_net_new, gdf_line_gen, gdf_line_houses, debug_plotting) return gdf_line_net_new
913f5432fddaaf592ce8a4fa4a4a813fcd26c868
3,639,421
def CreateHSpline(points, multiple=False): """ Construct an H-spline from a sequence of interpolation points Args: points (IEnumerable<Point3d>): Points to interpolate """ url = "rhino/geometry/nurbscurve/createhspline-point3darray" if multiple: url += "?multiple=true" args = [points] if multiple: args = [[item] for item in points] response = Util.ComputeFetch(url, args) response = Util.DecodeToCommonObject(response) return response
b5f7b2000dcce04a60087ab32956fa4701d1dadc
3,639,422
def get_instance_embedding_loss(embedding, instance_loss_type, instance_labels, crop_area, crop_min_height, num_samples=10, similarity_strategy='dotproduct', loss_strategy='softmax'): """Returns the instance embedding loss based on instance_loss_type. Args: embedding: A tf.float32 tensor of size [height, width, dims] or [batch_size, height, width, dims]. instance_loss_type: A string containing the type of the embedding loss. instance_labels: A tf.int32 tensor of size [height, width] or [batch_size, heigh, width] containing instance ids. Assumed values in target start from 0 and cover 0 to N-1. crop_area: Area of the crop window. Only used in some cases of embedding loss. crop_min_height: Minimum height of the crop window. Only used in some cases of embedding loss. num_samples: Number of samples. Only used in some cases of embedding loss. similarity_strategy: Defines the method for computing similarity between embedding vectors. Possible values are 'dotproduct' and 'distance'. loss_strategy: Defines the type of loss including 'softmax' or 'sigmoid'. Returns: Instance embedding loss. Raises: ValueError: If instance loss type is not known. """ # Handling the case where there is a batch size. embedding_shape = embedding.get_shape().as_list() if len(embedding_shape) == 4: num_batches = embedding_shape[0] losses = [] embedding_list = tf.unstack(embedding) instance_label_list = tf.unstack(instance_labels) for i in range(num_batches): embedding_i = embedding_list[i] instance_labels_i = instance_label_list[i] loss = get_instance_embedding_loss(embedding_i, instance_loss_type, instance_labels_i, crop_area, crop_min_height, num_samples, similarity_strategy, loss_strategy) losses.append(loss) return tf.reduce_mean(tf.stack(losses)) if instance_loss_type == 'npair': return instance_embedding_npair_loss( embedding=embedding, instance_labels=instance_labels, crop_min_height=crop_min_height, crop_area=crop_area, similarity_strategy=similarity_strategy, loss_strategy=loss_strategy) elif instance_loss_type == 'npair_r_c': return instance_embedding_npair_random_center_loss( embedding=embedding, instance_labels=instance_labels, similarity_strategy=similarity_strategy, loss_strategy=loss_strategy) elif instance_loss_type == 'npair_r_c_r_s': return instance_embedding_npair_random_center_random_sample_loss( embedding=embedding, instance_labels=instance_labels, num_samples=num_samples, similarity_strategy=similarity_strategy, loss_strategy=loss_strategy) elif instance_loss_type == 'npair_r_s': return instance_embedding_npair_random_sample_loss( embedding=embedding, instance_labels=instance_labels, num_samples=num_samples, similarity_strategy=similarity_strategy, loss_strategy=loss_strategy) elif instance_loss_type == 'iou': return instance_embedding_iou_loss( embedding=embedding, instance_labels=instance_labels, num_samples=num_samples, similarity_strategy=similarity_strategy) else: raise ValueError('Instance loss type is not known')
ff1e08ea60f4c937fd44bec967eda37d6916ef00
3,639,423
def str_to_array(value): """ Check if value can be parsed to a tuple or and array. Because Spark can handle tuples we will try to transform tuples to arrays :param value: :return: """ try: if isinstance(literal_eval((value.encode('ascii', 'ignore')).decode("utf-8")), (list, tuple)): return True except (ValueError, SyntaxError,): pass
d565021781a3c2c19c882073ddc6cbd24334b74a
3,639,424
import inspect def get_current_func_name(): """for python version greater than equal to 2.7""" return inspect.stack()[1][3]
002d318bcab98639cab6c38317322f247a1ad0e0
3,639,425
def getParmNames(parmsDef): """Return a list of parm names in a model parm definition parmsDef: list of tuples, each tuple is a list of parms and a time constraint. Call with modelDict[modelname]['Parms]. Returns: List of string parameter names Here's an example of how to remove unused parms from Fcst, this can run in localConfig: parmsToRemove=[] for p in getParmNames(modelDict['Fcst']): pl=p.lower() for t in ['period','swell','wave','surf', 'surge']: if t in pl: parmsToRemove.append(p) break removeParms(modelDict,'Fcst',parmsToRemove) """ result=[] for pList,tc in parmsDef: # p is the parmDef tuple where first item is the parm name newParms=[p[0] for p in pList] result+=newParms return sorted(result)
785661200c388f23c5f38ae67e773a43fd8f57b3
3,639,426
def dict_merge(lft, rgt): """ Recursive dict merge. Recursively merges dict's. not just simple lft['key'] = rgt['key'], if both lft and rgt have a key who's value is a dict then dict_merge is called on both values and the result stored in the returned dictionary. """ if not isinstance(rgt, dict): return rgt result = deepcopy(lft) for key, val in rgt.iteritems(): if key in result and isinstance(result[key], dict): result[key] = dict_merge(result[key], val) else: result[key] = deepcopy(val) return result
c939fed14ff10452663bc5a32247b21f6170897a
3,639,427
def modified_zscore(x: np.ndarray) -> np.ndarray: """ Modified z-score transformation. The modified z score might be more robust than the standard z-score because it relies on the median for calculating the z-score. It is less influenced by outliers when compared to the standard z-score. Parameters ---------- x: (N,) np.ndarray numbers Returns ------- z: (N,) np.ndarray z-scored numbers computed using modified z-score """ med = np.median(x) med_abs_dev = np.median(np.abs(x - med)) return (x - med) / (1.486 * med_abs_dev)
8f0933bf30ec55ba6305c9bd926437bb0715a938
3,639,428
def update_profile(email, username, name, bio, interest, picture=None): """更新 profile""" db = get_db() cursor = db.cursor() # query user user = get_user_by_email(email) email = user['email'] profile_id = user['profile_id'] if profile_id is None: # add profile cursor.execute( "INSERT INTO profiles (username, name, bio, interest, picture) VALUES (?, ?, ?, ?, ?)", (username, name, bio, interest, picture) ) db.commit() profile_id = cursor.lastrowid cursor.execute( "UPDATE users SET profile_id = ? WHERE email=?", (profile_id, email) ) db.commit() else: # Update profile if picture: sql = "UPDATE profiles SET username=?,name=?,bio=?,interest=?,picture=? WHERE id=?" values = (username, name, bio, interest, picture, profile_id) else: sql = "UPDATE profiles SET username=?,name=?,bio=?,interest=? WHERE id=?" values = (username, name, bio, interest, profile_id) cursor.execute( sql, values, ) db.commit() return True
0b13d81f9d36198d4660179eae7616d8f25ee37e
3,639,429
import zlib import marshal def serialize(object): """ Serialize the data into bytes using marshal and zlib Args: object: a value Returns: Returns a bytes object containing compressed with zlib data. """ return zlib.compress(marshal.dumps(object, 2))
650cbc8937df5eae79960f744b69b8b12b623195
3,639,430
def logo_if(interp, expr, block, elseBlock=None): """ IF tf instructionlist (IF tf instructionlist1 instructionlist2) command. If the first input has the value TRUE, then IF runs the second input. If the first input has the value FALSE, then IF does nothing. (If given a third input, IF acts like IFELSE, as described below.) It is an error if the first input is not either TRUE or FALSE. """ if expr: return logo_eval(interp, block) elif elseBlock is not None: return logo_eval(interp, elseBlock)
94f143f59fa02f059469f8f17a3ff11093110c84
3,639,431
import itertools def select_model_general( df, grid_search, target_col_name, frequency, partition_columns=None, parallel_over_columns=None, executor=None, include_rules=None, exclude_rules=None, country_code_column=None, output_path="", persist_cv_results=False, persist_cv_data=False, persist_model_reprs=False, persist_best_model=False, persist_partition=False, persist_model_selector_results=False, ): """Run cross validation on data and select best model Best models are selected for each timeseries and if wanted persisted. Parameters ---------- df : pandas.DataFrame Container holding historical data for training grid_search : sklearn.model_selection.GridSearchCV Preconfigured grid search definition which determines which models and parameters will be tried target_col_name : str Name of target column frequency : str Temporal frequency of data. Data with different frequency will be resampled to this frequency. partition_columns : list, tuple Column names based on which the data should be split up / partitioned parallel_over_columns : list, tuple Subset of partition_columns, that are used to parallel split. executor : prefect.engine.executors Provide prefect's executor. Only valid when `parallel_over_columns` is set. For more information see https://docs.prefect.io/api/latest/engine/executors.html include_rules : dict Dictionary with keys being column names and values being list of values to include in the output. exclude_rules : dict Dictionary with keys being column names and values being list of values to exclude from the output. country_code_column : str Name of the column with country code, which can be used for supplying holiday (i.e. having gridsearch with HolidayTransformer with argument `country_code_column` set to this one). output_path : str Path to directory for storing the output, default behavior is current working directory persist_cv_results : bool If True cv_results of sklearn.model_selection.GridSearchCV as pandas df will be saved as pickle for each partition persist_cv_data : bool If True the pandas df detail cv data will be saved as pickle for each partition persist_model_reprs : bool If True model reprs will be saved as json for each partition persist_best_model : bool If True best model will be saved as pickle for each partition persist_partition : bool If True dictionary of partition label will be saved as json for each partition persist_model_selector_results : bool If True ModelSelectoResults with all important information will be saved as pickle for each partition Returns ------- list List of ModelSelectorResult """ if parallel_over_columns is not None: # run prefect flow with paralellism flow_result = run_model_selection(**locals()) # access result of select_model and flatten it result = flow_result[1].result[flow_result[0].get_tasks("select_model")[0]].result flat_list = list(itertools.chain.from_iterable(result)) return flat_list else: partition_columns = partition_columns if partition_columns is not None else [] # run without prefect df_prep = df.pipe(filter_data, include_rules=include_rules, exclude_rules=exclude_rules).pipe( prepare_data_for_training, frequency=frequency, partition_columns=partition_columns, country_code_column=country_code_column, ) result = select_model( df=df_prep, target_col_name=target_col_name, partition_columns=partition_columns, grid_search=grid_search, parallel_over_dict=None, frequency=frequency, country_code_column=country_code_column, ) if any( [ persist_cv_results, persist_cv_data, persist_model_reprs, persist_partition, persist_best_model, persist_model_selector_results, ] ): persist_experts_in_physical_partition( results=result, folder_path=output_path, persist_cv_results=persist_cv_results, persist_cv_data=persist_cv_data, persist_model_reprs=persist_model_reprs, persist_partition=persist_partition, persist_best_model=persist_best_model, persist_model_selector_results=persist_model_selector_results, ) return result
1c286b8cf922a50c1c1071aa0d0506b0cf102a6b
3,639,432
def create_arma_sample(ar_order=1, ma_order=1, size=100): """Get a random ARMA sample. Parameters ---------- ar_order, ma_order, size : int Values for the desired AR order, MA order and sample size. Returns ------- An ARMA sample as a pandas Series. """ ar_coeff = np.linspace(1, -0.9, ar_order + 1) # arbitrary ar coefficients ma_coeff = np.linspace(1, 0.9, ma_order + 1) # arbitrary ma coefficients sample = tsa.ArmaProcess(ar_coeff, ma_coeff).generate_sample(size) index = pd.date_range(start=date.today(), periods=size, freq="D") return pd.Series(sample, index=index, name="sample")
e859413cee0a20e51fc80aeffbb75b3ada83f010
3,639,433
def get_img(file_path, gray=False): """ 获取输入图片 :param file_path: 图片文件位置 :param gray: 是否转换为灰度图 :return: img """ try: img = Image.open(file_path) if gray: img = img.convert('L') return img except Exception: print("不支持的图片格式") return None
ac3ad78a1ce877905f550ebc43b7e9a6335fd762
3,639,434
from datetime import datetime def working_days(days: int): """Return a list of N workingdays Keyword arguments: days -- days past """ dates = [] today = datetime.utcnow() for i in range(days): day = today - timedelta(days=i) day = day.date() dates.append(day) for idx, date in enumerate(dates): if date.weekday() == 6: for i in range(idx, len(dates)): dates[i] = dates[i] - timedelta(days=2) if date.weekday() == 5: for i in range(idx, len(dates)): dates[i] = dates[i] - timedelta(days=1) return dates
222002b53bcf536f7b31993a22424446fcce24cc
3,639,435
def GetFile(message=None, title=None, directory=None, fileName=None, allowsMultipleSelection=False, fileTypes=None): """ An get file dialog. Optionally a `message`, `title`, `directory`, `fileName` and `allowsMultipleSelection` can be provided. :: from fontParts.ui import GetFile print(GetFile()) """ return dispatcher["GetFile"](message=message, title=title, directory=directory, fileName=fileName, allowsMultipleSelection=allowsMultipleSelection, fileTypes=fileTypes)
b81ba1e11764231c8c04164316e4ee55b0305044
3,639,436
def str_to_dtype(s): """Convert dtype string to numpy dtype.""" return eval('np.' + s)
e0ff793404af5a8022d260fde5878329abbac483
3,639,437
from typing import Callable from typing import Tuple def integrate_const( f: Callable, t_span: Tuple, dt: float, y0: np.ndarray, method: str = 'runge_kutta4' ) -> Tuple[np.ndarray, np.ndarray]: """ A Python wrapper for Boost::odeint runge_kutta4 (the only one supported right now) stepper and ODE integration. :param f: The ODE system RHS. :param t_span: The time range in which integration is performed. It is provided as (t_initial, t_final) tuple. :param dt: The time-step to increment time from t_span[0] to t_span[1]. :param y0: Initial conditions for the system state. :param method: The stepper method. Only 'runge_kutta4' is supported at the moment. :return: A tuple with two arrays: (time, solution). The first contains the time points from integration and the last is a matrix with the solution for each state provided by columns. In other words, solution[:, 0] contains the solution for state 0, solution[:, 1] for state 1 and so forth. """ time, solution = _integrate_const(f, t_span, dt, y0, method) solution = np.array(solution) time = np.array(time) return time, solution
e43479c829fd46e0f4cdd8c7918294577e91beed
3,639,438
def cleanup(serialized): """ Remove all missing values. Sometimes its useful for object methods to return missing value in order to not include that value in the json format. Examples:: >>> User(Serializable): ... def attributes(): ... return ['id', 'name', 'birthday', 'somefunc'] ... def age(): ... if birthday: ... return empty ... else: ... return calc_age(self.birthday) Now if some user has birthday the age function is going to return the age. However if user doesn't have birthday the age function is returning a special empty value which tells jsonifier not to include that key in json format. >>> User(id=1, name='someone').as_json() {'id': 1, 'name': 'Someone'} """ return dict(filter(lambda a: a[1] is not empty, serialized.items()))
5e4bfd13408ec8272c4fc4e9a499349e13dd2798
3,639,439
from typing import Optional from typing import List async def discover_devices( wave_devices: Optional[List[WaveDevice]] = None, ) -> List[WaveDevice]: """Discovers all valid, accessible Airthings Wave devices.""" wave_devices = wave_devices if isinstance(wave_devices, list) else [] device: BLEDevice # Typing annotation for device in await discover(): serial = WaveDevice.parse_manufacturer_data( device.metadata.get("manufacturer_data") ) if serial: wave_devices.append(WaveDevice(device, serial)) else: _logger.debug(f"Device: ({device.address}) is not a valid Wave device.") continue return wave_devices
31b5a43b1be765ca4d67080f5a04d16477615fa6
3,639,440
import os import pickle def convert_examples_to_feats_lstm(examples, max_seq_length, glove_vocab, feat_file, language): """Loads a data file into a list of `InputBatch`s in glove+lstm manner""" print("#examples", len(examples)) if os.path.exists(feat_file): with open(feat_file, 'rb') as f: features = pickle.load(f) return features else: features = [[]] if language == 'en': nlp = spacy.load("en_core_web_sm") else: nlp = spacy.load("zh_core_web_md") for (ex_index, example) in enumerate(examples): abandon = False dialog = nlp(example.text_a) dialog_tokens = [token.text for token in dialog] dialog_pos = [token.tag_ for token in dialog] #label to id label_ids = map_label_to_ids(example.label) # print(dialog_tokens) # print(label_ids) # print(len(dialog_tokens)) # print(len(label_ids)) # exit(0) truncate(dialog_tokens, max_seq_length) truncate(dialog_pos, max_seq_length) truncate(label_ids, max_seq_length) # convert tokens to index input_ids = glove_vocab.map(dialog_tokens) # convert pos to index pos_ids = map_to_ids(dialog_pos, constant.POS_TO_ID) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) segment_ids = [0] * len(input_ids) # actually not used # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) pos_ids.append(0) label_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(pos_ids) == max_seq_length assert len(label_ids) == max_seq_length if ex_index < 2: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join([str(token) for token in dialog_tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if not abandon: features[-1].append( InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_ids, pos_ids=pos_ids)) if len(features[-1]) == n_class: features.append([]) if len(features[-1]) == 0: features = features[:-1] print('#features', len(features)) with open(feat_file, 'wb') as f: pickle.dump(features, f) return features
734fbac9f0e2f1b203424f5b495ceb3ba056c3ab
3,639,441
import json def PyValueToMessage(message_type, value): """Convert the given python value to a message of type message_type.""" return JsonToMessage(message_type, json.dumps(value))
576237ebbacb85ac4c51be8b5523f4f95cfcc019
3,639,442
from typing import Any from typing import get_origin def istype(obj: Any, annotation: type) -> bool: """Check if object is consistent with the annotation""" if get_origin(annotation) is None: if annotation is None: return obj is None return isinstance(obj, annotation) else: raise NotImplementedError("Currently only the basic types are supported")
c1903ea2ec6c0b6b9006a38f7c0720c88987b706
3,639,443
import logging import platform def test_cand_gen(caplog): """Test extracting candidates from mentions from documents.""" caplog.set_level(logging.INFO) if platform == "darwin": logger.info("Using single core.") PARALLEL = 1 else: logger.info("Using two cores.") PARALLEL = 2 # Travis only gives 2 cores def do_nothing_matcher(fig): return True max_docs = 10 session = Meta.init("postgresql://localhost:5432/" + DB).Session() docs_path = "tests/data/html/" pdf_path = "tests/data/pdf/" # Parsing logger.info("Parsing...") doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs) corpus_parser = Parser( session, structural=True, lingual=True, visual=True, pdf_path=pdf_path ) corpus_parser.apply(doc_preprocessor, parallelism=PARALLEL) assert session.query(Document).count() == max_docs assert session.query(Sentence).count() == 5548 docs = session.query(Document).order_by(Document.name).all() # Mention Extraction part_ngrams = MentionNgramsPart(parts_by_doc=None, n_max=3) temp_ngrams = MentionNgramsTemp(n_max=2) volt_ngrams = MentionNgramsVolt(n_max=1) figs = MentionFigures(types="png") Part = mention_subclass("Part") Temp = mention_subclass("Temp") Volt = mention_subclass("Volt") Fig = mention_subclass("Fig") fig_matcher = LambdaFunctionFigureMatcher(func=do_nothing_matcher) with pytest.raises(ValueError): mention_extractor = MentionExtractor( session, [Part, Temp, Volt], [part_ngrams, volt_ngrams], # Fail, mismatched arity [part_matcher, temp_matcher, volt_matcher], ) with pytest.raises(ValueError): mention_extractor = MentionExtractor( session, [Part, Temp, Volt], [part_ngrams, temp_matcher, volt_ngrams], [part_matcher, temp_matcher], # Fail, mismatched arity ) mention_extractor = MentionExtractor( session, [Part, Temp, Volt, Fig], [part_ngrams, temp_ngrams, volt_ngrams, figs], [part_matcher, temp_matcher, volt_matcher, fig_matcher], ) mention_extractor.apply(docs, parallelism=PARALLEL) assert session.query(Part).count() == 234 assert session.query(Volt).count() == 107 assert session.query(Temp).count() == 136 assert session.query(Fig).count() == 223 part = session.query(Part).order_by(Part.id).all()[0] volt = session.query(Volt).order_by(Volt.id).all()[0] temp = session.query(Temp).order_by(Temp.id).all()[0] logger.info("Part: {}".format(part.context)) logger.info("Volt: {}".format(volt.context)) logger.info("Temp: {}".format(temp.context)) # Candidate Extraction PartTemp = candidate_subclass("PartTemp", [Part, Temp]) PartVolt = candidate_subclass("PartVolt", [Part, Volt]) with pytest.raises(ValueError): candidate_extractor = CandidateExtractor( session, [PartTemp, PartVolt], throttlers=[ temp_throttler, volt_throttler, volt_throttler, ], # Fail, mismatched arity ) with pytest.raises(ValueError): candidate_extractor = CandidateExtractor( session, [PartTemp], # Fail, mismatched arity throttlers=[temp_throttler, volt_throttler], ) # Test that no throttler in candidate extractor candidate_extractor = CandidateExtractor( session, [PartTemp, PartVolt] ) # Pass, no throttler candidate_extractor.apply(docs, split=0, parallelism=PARALLEL) assert session.query(PartTemp).count() == 4141 assert session.query(PartVolt).count() == 3610 assert session.query(Candidate).count() == 7751 candidate_extractor.clear_all(split=0) assert session.query(Candidate).count() == 0 # Test with None in throttlers in candidate extractor candidate_extractor = CandidateExtractor( session, [PartTemp, PartVolt], throttlers=[temp_throttler, None] ) candidate_extractor.apply(docs, split=0, parallelism=PARALLEL) assert session.query(PartTemp).count() == 3879 assert session.query(PartVolt).count() == 3610 assert session.query(Candidate).count() == 7489 candidate_extractor.clear_all(split=0) assert session.query(Candidate).count() == 0 candidate_extractor = CandidateExtractor( session, [PartTemp, PartVolt], throttlers=[temp_throttler, volt_throttler] ) candidate_extractor.apply(docs, split=0, parallelism=PARALLEL) assert session.query(PartTemp).count() == 3879 assert session.query(PartVolt).count() == 3266 assert session.query(Candidate).count() == 7145 assert docs[0].name == "112823" assert len(docs[0].parts) == 70 assert len(docs[0].volts) == 33 assert len(docs[0].temps) == 24 # Test that deletion of a Candidate does not delete the Mention session.query(PartTemp).delete() assert session.query(PartTemp).count() == 0 assert session.query(Temp).count() == 136 assert session.query(Part).count() == 234 # Test deletion of Candidate if Mention is deleted assert session.query(PartVolt).count() == 3266 assert session.query(Volt).count() == 107 session.query(Volt).delete() assert session.query(Volt).count() == 0 assert session.query(PartVolt).count() == 0
44cf505a7eedef55e6322eafebfb92ad3b882697
3,639,444
def spending_from_savings(take_home_pay: float, savings: float) -> Decimal: """ Calculate your spending based on your take home pay and how much you save. This is useful if you use what Paula Pant calls the anti-budget, instead of tracking your spending in detail. This number can be used as input for the savings_rate function. Args: take_home_pay: monthly take-home pay savings: amount of money saved towards FI Returns: The amount of money spent """ return Decimal(take_home_pay) - Decimal(savings)
da26cae052bd27efb11893440353d53e8b6aed89
3,639,445
import sys def stdlib_public_names(module: str, *, version: str = None) -> set[str]: """ Return a set of public names of a stdlib module, in specific Python version. If no version is given, default to the current version. The `version` parameter takes argument of the form `3.9`, `4.7`, etc. """ if module not in IMPORTABLE_STDLIB_MODULES: raise ValueError(f"{module} is not importable stdlib module") version = version or ".".join(str(c) for c in sys.version_info[:2]) return set(load_stdlib_public_names(version)[module])
4a16226a58c58ef66ef4f439160cab72b4902ad0
3,639,446
def large_asymmetric_bulge(data): """ :param data: image data as array :return: the width and location of the largest asymmetric bulge (if any) in the sequence """ # retrieve the lengths of the bars in the sequences (the counts) from the palindrome function score, upper_half_counts, lower_half_counts, len_premiRNA = palindrome(data) # zip the count lists and check whether a large asymmetric bulge is included (pixel bar reaching image border) # and in which of the two image halves the bulge is located bulge_array = [] bulge_locations = [] # go over the bar lengths in the counts arrays and check whether they match the large asymmetric bulge requirements for pixel_upper, pixel_lower in zip(upper_half_counts[0:len_premiRNA], lower_half_counts[0:len_premiRNA]): if pixel_upper == pixel_lower: bulge_array.append(0) bulge_locations.append(0) else: # check for large asymmetric bulge in lower half of image if pixel_upper == 2 and pixel_lower == 12: bulge_array.append(1) bulge_locations.append('lower') # check for large asymmetric bulge in upper half of image elif pixel_upper == 12 and pixel_lower == 2: bulge_array.append(1) bulge_locations.append('upper') else: # if above conditions do not hold, the sequence does not contain a large asymmetric bulge bulge_array.append(0) bulge_locations.append(0) # find the exact location and width of the large asymmetric bulge in the sequence by going over the bulge_array widths = [] bulge_width = 0 bulge_exact_locations = [] bulge_exact_location = [] for i in range(len(bulge_array) - 1): # if the integer in the bulge_array is 1, we are at a large asymmetric bulge and we should increment the width if bulge_array[i] == 1: bulge_width += 1 bulge_exact_location.append((bulge_locations[i], i)) # if the next integer in bulge_array is 0, we have reached the end of the bulge and we should store the # width and all location info if bulge_array[i + 1] == 0: widths.append(bulge_width) bulge_width = 0 bulge_exact_locations.append(bulge_exact_location) bulge_exact_location = [] else: i += 1 # create empty values for the attributes of interest if there is no large asymmetric bulge found in the sequence if not widths: largest_bulge = np.nan largest_bulge_location = (np.nan, np.nan) # if there is at least one large asymmetric bulge, find the widest one among all and store this as the largest # asymmetric bulge of the sequence else: largest_bulge = np.max(widths) largest_bulge_index = np.argmax(widths) largest_bulge_location = bulge_exact_locations[largest_bulge_index] middle_bulge_location = int(len(largest_bulge_location) / 2) largest_bulge_location = (largest_bulge_location[0][0], largest_bulge_location[middle_bulge_location][1]) return largest_bulge, largest_bulge_location
be7aef1cc6a2443de3ecff5099d6e28554544f7a
3,639,447
import requests def request(host, path, bearer_token, url_params): """Given a bearer token, send a GET request to the API. Args: host (str): The domain host of the API. path (str): The path of the API after the domain. bearer_token (str): OAuth bearer token, obtained using client_id and client_secret. url_params (dict): An optional set of query parameters in the request. Returns: dict: The JSON response from the request. Raises: HTTPError: An error occurs from the HTTP request. """ url_params = url_params or {} url = '{0}{1}'.format(host, quote(path.encode('utf8'))) headers = { 'Authorization': 'Bearer %s' % bearer_token, } response = requests.request('GET', url, headers=headers, params=url_params) return response.json()
8f322307bfc1cf48ff5e1a7e52df18e5c9dc7ddf
3,639,448
def find_unique_distances(distance_ij: pd.Series) -> np.ndarray: """Finds the unique distances that define the neighbor groups. :param distance_ij: A pandas ``Series`` of pairwise neighbor distances. :return: An array of unique neighbor distances. """ unique_floats: np.ndarray = np.sort(distance_ij.unique()) next_distance_not_close: np.ndarray = np.logical_not( np.isclose(unique_floats[1:], unique_floats[:-1]) ) return np.concatenate( (unique_floats[:1], unique_floats[1:][next_distance_not_close]) )
ca4d8252c4b79bd536a10a058ca5f75b9f39416e
3,639,449
from typing import Dict from typing import Any def session(monkeypatch: pytest.MonkeyPatch) -> nox.Session: """Fixture for a Nox session.""" registry: Dict[str, Any] = {} monkeypatch.setattr("nox.registry._REGISTRY", registry) @nox.session(venv_backend="none") def test(session: nox.Session) -> None: """Example session.""" config = nox._options.options.namespace(posargs=[]) [runner] = nox.manifest.Manifest(registry, config) runner._create_venv() return nox.Session(runner)
646403d4383c6e426d736bf55278e001db2a40e1
3,639,450
import yaml def _load_yaml_with_clear_tag(stream): """Like yaml.safe_load(), but everything with a !clear tag before it will be wrapped in ClearedValue().""" loader = yaml.SafeLoader(stream) loader.add_constructor('!clear', _cleared_value_constructor) try: return loader.get_single_data() finally: if hasattr(loader, 'dispose'): # it doesn't in PyYAML 3.09 loader.dispose()
dec04cec96fae797250d1fb37491755ceaea399c
3,639,451
def highlights(state_importance_df, exec_traces, budget, context_length, minimum_gap=0, overlay_limit=0): """generate highlights summary""" sorted_df = state_importance_df.sort_values(['importance'], ascending=False) summary_states, summary_traces, state_trajectories = [], [], {} seen_indexes, seen_importance = {x: [] for x in range(len(exec_traces))}, [] """for each state by importance""" for index, row in sorted_df.iterrows(): state = row['state'] """unique score for frogger""" if row["importance"] in seen_importance: continue else: seen_importance.append(row["importance"]) trace_len = len(exec_traces[state[0]].states) lower, upper = get_relevant_range(state[1], trace_len, context_length, minimum_gap, overlay_limit) if lower not in seen_indexes[state[0]] and upper not in seen_indexes[state[0]]: seen_indexes[state[0]] += list(range(lower, upper + 1)) summary_states.append(state) if len(summary_states) == budget: break # # trajectories = {} # for trace_idx, trace in enumerate(exec_traces): # if state in trace.states: # state_index = trace.states.index(state) # trace_len = len(trace.states) # lower, upper = get_relevant_range(state_index, trace_len, context_length, # minimum_gap, overlay_limit) # """check if these states are not neighbours of previously seen states""" # for seen_state in summary_states: # # if [1 for x in trace.states[lower:upper] if x == seen_state]: # if seen_state[0] != trace_idx: # break # else: # if seen_state[1] in trace.states[lower:upper]: # break # else: # trajectories[trace_idx] = state_index # if not summary_states: # trajectories[trace_idx] = state_index # # """if no siutable trajectories found - try next state""" # if not trajectories: # continue # else: # state_trajectories[state] = trajectories # # """once a trace is obtained, get the state index in it""" # summary_states.append(state) # summary_traces.append(list(trajectories.keys())) # if len(summary_states) == budget: # break summary_state_trajectories = {} for t_i, s_i in summary_states: t = exec_traces[t_i].states lower, upper = get_relevant_range(s_i, len(t), context_length) summary_state_trajectories[(t_i, s_i)] = t[lower:upper] return summary_state_trajectories
50c1dddaad88fa697f850380b215c2fb9e5f1a13
3,639,452
def draw_figure(canvas, figure, loc=(0, 0)): """ Draw a matplotlib figure onto a Tk grafica loc: location of top-left corner of figure on grafica in pixels. Inspired by matplotlib source: lib/matplotlib/backends/backend_tkagg.py """ figure_canvas_agg = FigureCanvasAgg(figure) figure_canvas_agg.draw() figure_x, figure_y, figure_w, figure_h = figure.bbox.bounds figure_w, figure_h = int(figure_w+1), int(figure_h+1) photo = tk.PhotoImage(master=canvas, width=figure_w, height=figure_h) # Position: convert from top-left anchor to center anchor canvas.create_image(loc[0] + figure_w/2, loc[1] + figure_h/2, image=photo) # Unfortunatly, there's no accessor for the pointer to the native renderer blit(photo, figure_canvas_agg.get_renderer()._renderer, colormode=2) # Return a handle which contains a reference to the photo object # which must be kept live or else the picture disappears return photo
02e4bc4a6cd475c63239170c0dae0648199c46b5
3,639,453
def find_struct(lines): """Finds structures in output data""" struct = '' name1 = '' name2 = '' seq1 = '' seq2 = '' result = [] for line in lines: if line.startswith('; ========'): break if line.startswith('; ALIGNING'): line = line.split() name1 = line[2] name2 = line[4] continue if line.startswith('; ALIGN %s' % name1): line = line.split()[3:] line = ''.join(line) seq1 = ''.join([seq1,line]) continue if line.startswith('; ALIGN %s' % name2): line = line.split()[3:] line = ''.join(line) seq2 = ''.join([seq2,line]) continue if line.startswith('; ALIGN Structure'): line = line.split()[3:] line = ''.join(line) struct = ''.join([struct,line]) continue struct = ViennaStructure(struct).toPairs() struct.sort() result.append([struct,seq1,seq2]) return result
b7f7e5c70fe0b1111f33e43a40bb9fdde4182b68
3,639,454
from typing import Callable def chain(*fs: Callable) -> Callable: """ Compose given functions in reversed order. Given functions f, g, the result of chain is chain(f, g) = g o f. >>> def f(x: int) -> int: ... return x + 1 >>> def g(x: int) -> str: ... return str(x) >>> chain(f, g)(41) '42' Chaining single function is the function itself. >>> chain(f) is f True Empty function chain is identity. >>> chain()(42) 42 """ g: Callable = compose(*reversed(fs)) return g
4956a955a760d5243988f8fc6fdb0303e3351704
3,639,455
def astra_fp_2d_fan(volume, angles, source_object, object_det): """ :param volume: :param angles: degrees :return: """ detector_size = volume.shape[1] proj_geom = build_proj_geometry_fan_2d(detector_size, angles, source_object, object_det) rec = astra_fp_2d(volume, proj_geom) return rec
5114730387bd43585bb56a16e5e930491aa87fd2
3,639,456
from typing import Mapping def get_remappings_prefix() -> Mapping[str, str]: """Get the remappings for xrefs based on the prefix. .. note:: Doesn't take into account the semicolon `:` """ return _get_curated_registry()['remappings']['prefix']
02cb1bb1cfa4ffb177327442c6fb63c4fc3fa320
3,639,457
import json def generate_schema(): """ schema generation from today filename dataset """ today = date.today().strftime("%d_%m_%Y") complete_dataset = pd.read_csv(f"complete_dataset_{today}.csv") json_schema = pd.io.json.build_table_schema(complete_dataset) with open("json_schema_for_big_query.json", "w", encoding="utf-8") as f: json.dump(json_schema, f, ensure_ascii=False, indent=4) return None
67dca17ddfae8f3530e8ced2a730c28657fa77ca
3,639,458
def binary_truncated_sprt_with_llrs(llrs, labels, alpha, beta, order_sprt): """ Used in run_truncated_sprt_with_llrs . Args: llrs: A Tensor with shape (batch, duration). LLRs (or scores) of all frames. labels: A Tensor with shape (batch,). alpha : A float. beta: A float. order_sprt: An int. Returns: confmx: A Tensor with shape (2, 2). mean_hittime: A scalar Tensor. var_hittime: A scalar Tensor. truncate_rate: A scalar Tensor. """ llrs_shape = llrs.shape duration = int(llrs_shape[1]) batch_size = llrs_shape[0] assert batch_size != 0 # Calc thresholds thresh = [np.log(beta/(1-alpha)), np.log((1-beta)/alpha)] if not ( (thresh[1] >= thresh[0]) and (thresh[1] * thresh[0] < 0) ): raise ValueError("thresh must be thresh[1] >= thresh[0] and thresh[1] * thresh[0] < 0. Now thresh = {}".format(thresh)) # Calc all predictions and waits signs1 = (tf.sign(llrs - thresh[1]) + 1)/2 # 1:hit, 0:wait signs0 = (-1 - tf.sign(thresh[0] - llrs))/2 # -1:hit, 0:wait preds_all_frames = signs1 + signs0 # (batch, duration), value= +1, 0, -1 # Calc truncate rate hit_or_wait_all_frames = -(tf.abs(preds_all_frames) - 1) # wait=1, hit=0 truncate_rate = tf.reduce_mean(tf.reduce_prod(hit_or_wait_all_frames, 1), 0) # Truncate survivors (forced decision) preds_last_frame = tf.sign(llrs[:,-1]) # (batch,) value= +1, -1 preds_last_frame = tf.expand_dims(preds_last_frame, -1) # (batch, 1) preds_all_frames_trunc = tf.concat([preds_all_frames[:,:-1], preds_last_frame], -1) # (batch, duration-1)+(batch,1)=(batch, duration) if duration == 1: # Calc mean hitting time and confusion matrix mean_hittime = tf.constant(1., tf.float32) preds = preds_all_frames_trunc[:,0] # (batch,) preds = tf.cast((preds + 1) / 2, tf.int32) confmx = tf.math.confusion_matrix(labels, preds, num_classes=2, dtype=tf.int32) else: # Calc mean hitting time mask = tf.constant([i+1 for i in range(duration)][::-1], tf.float32) mask = tf.tile(mask, [batch_size,]) mask = tf.reshape(mask, [batch_size, duration]) masked = preds_all_frames_trunc * mask # (batch, duration) signed_hittimes1 = tf.reduce_max(masked, 1, keepdims=True) signed_hittimes0 = tf.reduce_min(masked, 1, keepdims=True) signed_hittimes0_abs = tf.abs(signed_hittimes0) signed_hittimes_twin = tf.concat([signed_hittimes1, signed_hittimes0], 1) hittimes_twin = tf.abs(signed_hittimes_twin) answers1 = tf.greater(signed_hittimes1, signed_hittimes0_abs) answers0 = tf.less(signed_hittimes1, signed_hittimes0_abs) answers = tf.concat([answers1, answers0], 1) hittimes = hittimes_twin[answers] hittimes = duration - hittimes + 1 mean_hittime, var_hittime = tf.nn.moments(hittimes, axes=[0]) # Calc confusion matrix signs_twin = tf.sign(signed_hittimes_twin) preds = signs_twin[answers] preds = tf.cast((preds + 1) / 2, tf.int32) confmx = tf.math.confusion_matrix(labels, preds, num_classes=2, dtype=tf.int32) return confmx, mean_hittime, var_hittime, truncate_rate
4d4f67d1ad9407df1cf8bfdc0e4c5cf775fcc57b
3,639,459
import time def backoff(action, condition, max_attempts=40): """ Calls result = action() up to max_attempts times until condition(result) becomes true, with 30 s backoff. Returns a bool flag indicating whether condition(result) was met. """ timeout = 30 for attempt in range(max_attempts): result = action() if condition(result): return True printf("Condition not met, retrying in {0} seconds...".format(timeout)) time.sleep(timeout) return False
93fe5ff9ee672073eb9eb4792572e41d4b4c3faa
3,639,460
def get_file_info(repo, path): """we need change_count, last_change, nbr_committers.""" committers = [] last_change = None nbr_changes = 0 for commit in repo.iter_commits(paths=path): #print(dir(commit)) committers.append(commit.committer) last_change = commit.committed_date nbr_changes += 1 return nbr_changes, last_change, len(set(committers))
6ff99df399d35b79d0e2a5635b1e76e1f65fe0bd
3,639,461
import requests import urllib3 def retryable_session(session: requests.Session, retries: int = 8) -> requests.Session: """ Session with requests to allow for re-attempts at downloading missing data :param session: Session to download with :param retries: How many retries to attempt :return: Session that does downloading """ retry = urllib3.util.retry.Retry( total=retries, read=retries, connect=retries, backoff_factor=0.3, status_forcelist=(500, 502, 504), ) adapter = requests.adapters.HTTPAdapter(max_retries=retry) session.mount("http://", adapter) session.mount("https://", adapter) return session
a57d2021077997ab14576df35b4e5ad9d281575e
3,639,462
def apply_affine(x, y, z, affine): """ Apply the affine matrix to the given coordinate. Parameters ---------- x: number or ndarray The x coordinates y: number or ndarray The y coordinates z: number or ndarray The z coordinates affine: 4x4 ndarray The affine matrix of the transformation """ shape = x.shape assert y.shape == shape, 'Coordinate shapes are not equal' assert z.shape == shape, 'Coordinate shapes are not equal' # Ravel, but avoiding a copy if possible x = np.reshape(x, (-1,)) y = np.reshape(y, (-1,)) z = np.reshape(z, (-1,)) in_coords = np.c_[x, y, z, np.ones(x.shape)].T x, y, z, _ = np.dot(affine, in_coords) x = np.reshape(x, shape) y = np.reshape(y, shape) z = np.reshape(z, shape) return x, y, z
b940c98da65a61cd46d2ad85ec33c791619341a0
3,639,463
from scipy.ndimage import binary_dilation def binary_dilation_circle(input, radius): """Dilate with disk of given radius. Parameters ---------- input : array_like Input array radius : float Dilation radius (pix) Returns ------- TODO """ structure = binary_disk(radius) return binary_dilation(input, structure)
c769b3aa652dc960cdd62dd19524d12f20e8b2bc
3,639,464
def square_valid(board: Board, n: int, pawn_value: int, x: int, y: int) -> bool: """Check if the square at x and y is available to put a pawn on it.""" return (coordinates_within_board(n, x, y) and square_playable(board, pawn_value, x, y))
725f65e64a8570e7483f103f0bf669cef3d7f1ef
3,639,465
def epb2jd(epb): """ Besselian epoch to Julian date. :param epb: Besselian epoch. :type epb: float :returns: a tuple of two items: * MJD zero-point, always 2400000.5 (float) * modified Julian date (float). .. seealso:: |MANUAL| page 76 """ djm0 = _ct.c_double() djm = _ct.c_double() _sofa.iauEpb2jd(epb, _ct.byref(djm0), _ct.byref(djm)) return djm0.value, djm.value
c5a9bcb422ab34ba0875d152cf8c39dda898e68b
3,639,466
def one_hot_decision_function(y): """ Examples -------- >>> y = [[0.1, 0.4, 0.5], ... [0.8, 0.1, 0.1], ... [0.2, 0.2, 0.6], ... [0.3, 0.4, 0.3]] >>> one_hot_decision_function(y) array([[ 0., 0., 1.], [ 1., 0., 0.], [ 0., 0., 1.], [ 0., 1., 0.]]) """ z = np.zeros_like(y) z[np.arange(len(z)), np.argmax(y, axis=1)] = 1 return z
a6eecff684ab926a46d746ca9c18e6b098308286
3,639,467
def combine_incomes(toshl_income, excel_income): """ Combines two data sources of incomes: toshl incomes and incomes from cashflow excel. :param toshl_income: Preprocessed dataframe of toshl incomes (after cleaning and splitting) :param excel_income: Raw excel income data :return: Total income data """ df_in = toshl_income.reset_index().copy() df_in["Tags"] = df_in["Tags"].apply(lambda x: "Salary" if x in ["Privat", "NHK", "OL"] else x) df_in2 = excel_income.copy() df_in2 = df_in2[["Datum", "Art", "Betrag"]].rename(columns={"Datum": "Date", "Art": "Tags", "Betrag": "Amount"}).dropna() df_in2["Date"] = pd.to_datetime(df_in2["Date"], format="%d.%m.%Y") df_in2["Tags"] = df_in2["Tags"].apply(lambda x: "Salary" if x in ["Gehalt", "Sodexo"] else x) df_income = pd.concat([df_in, df_in2], ignore_index=True) assert df_income.count()[0] == df_in.count()[0] + df_in2.count()[0], "Some income rows were lost!" df_income = df_income.groupby([pd.Grouper(key='Date', freq='1M'), 'Tags']).sum() return(df_income)
31efb2d7b7420f3c71fcb12876cdc09d7ff748ec
3,639,468
def generate_k(data_set, k): """ Given `data_set`, which is an array of arrays, find the minimum and maximum for each coordinate, a range. Generate `k` random points between the ranges. Return an array of the random points within the ranges. """ centers = [] dimensions = len(data_set[0]) min_max = defaultdict(int) for point in data_set: for i in range(dimensions): val = point[i] min_key = 'min_%d' % i max_key = 'max_%d' % i if min_key not in min_max or val < min_max[min_key]: min_max[min_key] = val if max_key not in min_max or val > min_max[max_key]: min_max[max_key] = val for _k in range(k): rand_point = [] for i in range(dimensions): min_val = min_max['min_%d' % i] max_val = min_max['max_%d' % i] rand_point.append(uniform(min_val, max_val)) centers.append(rand_point) return centers
1fd4eb6a825a0ca2b8e6b8200081ecfded351c7d
3,639,469
import requests def __ping_url(url: str) -> bool: """Check a link for rotting.""" try: r = requests.head(url) return r.status_code in ( requests.codes.ok, requests.codes.created, requests.codes.no_content, requests.codes.not_modified, ) except Exception: return False
e680cec006127bbe889dcab0291be3149f30d10e
3,639,470
def get_all_list_data(): """ Handles the GET request to '/get-all-list-data'. :return: Json with all list data """ conn = get_db() all_types = TypeDataAccess(conn).get_types(False) all_tags = TagDataAccess(conn).get_tags() all_groups = ResearchGroupDataAccess(conn).get_research_groups(False) all_employees = EmployeeDataAccess(conn).get_employees(False) result = { "types": [obj.to_dict() for obj in all_types], "tags": all_tags, "research groups": [obj.to_dict() for obj in all_groups], "employees": [obj.to_dict() for obj in all_employees] } return jsonify(result)
4a4a942e054d301f936ae7993b04aff6c554f91c
3,639,471
def truncate_range(data, percMin=0.25, percMax=99.75, discard_zeros=True): """Truncate too low and too high values. Parameters ---------- data : np.ndarray Image to be truncated. percMin : float Percentile minimum. percMax : float Percentile maximum. discard_zeros : bool Discard voxels with value 0 from truncation. Returns ------- data : np.ndarray Truncated data. pMin : float Minimum truncation threshold which is used. pMax : float Maximum truncation threshold which is used. """ if discard_zeros: msk = ~np.isclose(data, 0.) pMin, pMax = np.nanpercentile(data[msk], [percMin, percMax]) else: pMin, pMax = np.nanpercentile(data, [percMin, percMax]) temp = data[~np.isnan(data)] temp[temp < pMin], temp[temp > pMax] = pMin, pMax # truncate min and max data[~np.isnan(data)] = temp if discard_zeros: data[~msk] = 0 # put back masked out voxels return data, pMin, pMax
c9f56e593255ae6261b6f709b725cc952accc884
3,639,472
def obtain_dcdb_to_drugbank(biana_cnx, unification_protocol, output_pickle_file): """ Obtain a dictionary {dcdb : drugbank} """ up_table = return_unification_protocol_table(biana_cnx, unification_protocol) query = ('''SELECT DC.value, DB.value FROM externalEntityDCDB_drugID DC, {} U1, {} U2, externalEntityDrugBankID DB WHERE DC.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = DB.externalEntityID '''.format(up_table, up_table)) cursor = biana_cnx.cursor() cursor.execute(query) dcdb_to_drugbank = {} for items in cursor: dcdb = items[0] drugbank = items[1] dcdb_to_drugbank.setdefault(dcdb, set()) dcdb_to_drugbank[dcdb].add(drugbank) cursor.close() print(dcdb_to_drugbank) cPickle.dump(dcdb_to_drugbank, open(output_pickle_file, 'w')) return dcdb_to_drugbank
02b9d5b6ddb29974d551123e7bb12a7a6aca3ca4
3,639,473
def duo_username(user): """ Return the Duo username for user. """ return user.username
92b2bfd5f6f3027787db493880139a8564597946
3,639,474
import random def random_number_list(data=[]): """ Add random number between 0 and 9 (both inclusive) to a list """ for i in range( 0, list_length ): # append a random int to the data list data.append( random.randint(0, 10)) return data
5a04409a40e1e65216579056f95024269da1fc5a
3,639,475
def _matrix_method_reshape(df: pd.DataFrame) -> pd.DataFrame: """ Reshape df for matrix method and deal with missing values. We first drop columns which contain all missing values, transpose the dataframe and then fill the remaining missing values with zero, to deal with missing items in some periods. Parameters ---------- df : pd.DataFrame The dataframe to reshape. Returns ------- pd.DataFrame The reshaped dataframe. """ return df.dropna(how='all', axis=1).T.fillna(0)
64989a6c61d1d891a3190cc1f6a36c98cf562775
3,639,476
import glob import os import csv def orbitrap(file_path): """Import Orbitrap data from XCalibur export. Designed for scan by scan Orbitrap data. Original export of example data performed by Cech lab @ UNCG. Example data in MS_data external in Cech directory """ headers = ["scan", "rt", "mz", "drift", "intensity"] input_data = [] intensity_cutoff = config.intensity_cutoff for path_name in glob.glob(os.path.join(file_path, "*.mzML.binary.*.txt")): file_name = path_name.split("/")[-1] scan_number = int(file_name.split(".")[-2]) with open(path_name) as f: for row in f: if row.startswith("# retentionTime:"): retention_time = float(row.split(" ")[-1]) break with open(path_name) as f: csv_f = csv.reader(f, delimiter="\t") for row in csv_f: if not row[0].startswith("#"): intensity = round(float(row[1]), 0) mass = round(float(row[0]), 4) if intensity >= intensity_cutoff: input_data.append([scan_number, retention_time, mass, None, intensity]) orbitrap_dataframe = pd.DataFrame.from_records(input_data, columns=headers, index=str) return orbitrap_dataframe
3d627faac3988451e5dd8287dfe8487c2efc3897
3,639,477
import warnings def sim_bursty_oscillator(T, Fs, freq, prob_enter_burst=.1, prob_leave_burst=.1, cycle_features=None, return_cycle_df=False): """Simulate a band-pass filtered signal with 1/f^2 Input suggestions: f_range=(2,None), Fs=1000, N=1001 Parameters ---------- freq : float oscillator frequency T : float signal duration (seconds) Fs : float signal sampling rate prob_enter_burst : float probability of a cycle being oscillating given the last cycle is not oscillating prob_leave_burst : float probability of a cycle not being oscillating given the last cycle is oscillating cycle_features : dict specify the mean and standard deviations (within and across bursts) of each cycle's amplitude, period, and rise-decay symmetry. This can include a complete or incomplete set (using defaults) of the following keys: amp_mean - mean cycle amplitude amp_std - standard deviation of cycle amplitude amp_burst_std - std. of mean amplitude for each burst period_mean - mean period (computed from `freq`) period_std - standard deviation of period (samples) period_burst_std - std. of mean period for each burst rdsym_mean - mean rise-decay symmetry rdsym_std - standard deviation of rdsym rdsym_burst_std - std. of mean rdsym for each burst return_cycle_df : bool if True, return the dataframe that contains the simulation parameters for each cycle. This may be useful for computing power, for example. Because the power of the oscillator should only be considered over the times where there's bursts, not when there's nothing. Returns ------- signal : np.array bursty oscillator df : pd.DataFrame cycle-by-cycle properties of the simulated oscillator """ # Define default parameters for cycle features mean_period_samples = int(Fs / freq) cycle_features_use = {'amp_mean': 1, 'amp_burst_std': .1, 'amp_std': .2, 'period_mean': mean_period_samples, 'period_burst_std': .1 * mean_period_samples, 'period_std': .1 * mean_period_samples, 'rdsym_mean': .5, 'rdsym_burst_std': .05, 'rdsym_std': .05} # Overwrite default cycle features with those specified if cycle_features is not None: for k in cycle_features: cycle_features_use[k] = cycle_features[k] # Determine number of cycles to generate N_samples = T * Fs N_cycles_overestimate = int(np.ceil(N_samples / mean_period_samples * 2)) # Simulate if a series of cycles are oscillating or not oscillating is_oscillating = [False] N_cycles_current = 1 while N_cycles_current < N_cycles_overestimate: rand_num = np.random.rand() if is_oscillating[-1]: is_oscillating.append(rand_num > prob_leave_burst) else: is_oscillating.append(rand_num < prob_enter_burst) N_cycles_current += 1 # Determine period, amp, and rdsym for each cycle periods = [] amps = [] rdsyms = [] for is_osc in is_oscillating: if is_osc is False: period = cycle_features_use['period_mean'] + \ np.random.randn() * cycle_features_use['period_std'] periods.append(int(period)) amps.append(np.nan) rdsyms.append(np.nan) current_burst_period_mean = np.nan current_burst_amp_mean = np.nan current_burst_rdsym_mean = np.nan else: if np.isnan(current_burst_period_mean): current_burst_period_mean = cycle_features_use['period_mean'] + \ np.random.randn() * cycle_features_use['period_burst_std'] current_burst_amp_mean = cycle_features_use['amp_mean'] + \ np.random.randn() * cycle_features_use['amp_burst_std'] current_burst_rdsym_mean = cycle_features_use['rdsym_mean'] + \ np.random.randn() * cycle_features_use['rdsym_burst_std'] N_iter = 0 period, amp, rdsym = 0, 0, 0 while np.min([period, amp, rdsym]) <= 0: if N_iter > 0: if period < 0: feat0 = 'period' elif rdsym < 0: feat0 = 'rise-decay symmetry' else: feat0 = 'amp' warnings.warn('Simulation settings are such that the {:s} is occasionally computed to be negative. You may want to reset your simulation settings'.format(feat0)) period = current_burst_period_mean + \ np.random.randn() * cycle_features_use['period_std'] amp = current_burst_amp_mean + \ np.random.randn() * cycle_features_use['amp_std'] rdsym = current_burst_rdsym_mean + \ np.random.randn() * cycle_features_use['rdsym_std'] N_iter += 1 periods.append(int(period)) amps.append(amp) rdsyms.append(rdsym) df = pd.DataFrame({'is_cycle': is_oscillating, 'period': periods, 'amp': amps, 'rdsym': rdsyms}) df['start_sample'] = np.insert(df['period'].cumsum().values[:-1], 0, 0) df = df[df['start_sample'] < N_samples] # Shorten df to only cycles that are included in the data # Simulate time series for each cycle x = np.array([]) last_cycle_oscillating = False for i, row in df.iterrows(): if row['is_cycle'] is False: # If last cycle was oscillating, add a decay to 0 then 0s if last_cycle_oscillating: decay_pha = np.linspace(0, np.pi / 2, int(row['period'] / 4)) decay_t = np.cos(decay_pha) * x[-1] x = np.append(x, decay_t) cycle_t = np.zeros(row['period'] - int(row['period'] / 4)) x = np.append(x, cycle_t) else: # Add a blank cycle cycle_t = np.zeros(row['period']) x = np.append(x, cycle_t) last_cycle_oscillating = False else: # If last cycle was oscillating, add a decay to 0 if not last_cycle_oscillating: rise_pha = np.linspace(-np.pi / 2, 0, int(row['period'] / 4))[1:] rise_t = np.cos(rise_pha) * row['amp'] x[-len(rise_t):] = rise_t # Add a cycle with rdsym rise_samples = int(np.round(row['period'] * row['rdsym'])) decay_samples = row['period'] - rise_samples pha_t = np.hstack([np.linspace(0, np.pi, decay_samples + 1)[1:], np.linspace(-np.pi, 0, rise_samples + 1)[1:]]) cycle_t = np.cos(pha_t) # Adjust decay if the last cycle was oscillating if last_cycle_oscillating: scaling = (row['amp'] + x[-1]) / 2 offset = (x[-1] - row['amp']) / 2 cycle_t[:decay_samples] = cycle_t[:decay_samples] * \ scaling + offset cycle_t[decay_samples:] = cycle_t[decay_samples:] * row['amp'] else: cycle_t = cycle_t * row['amp'] x = np.append(x, cycle_t) last_cycle_oscillating = True x = x[:N_samples] if return_cycle_df: return x, df else: return x
ea408f91f6160114f0077bd441ea049f848d2da1
3,639,478
def visualize_percent_diff(df): """Creates a visualization of difference in percentage of tweets of a topic across the entire US and returns the mean sentiment felt about the topic across the entire US Parameters: ----------- df: pd.DataFrame dataframe containing all tweets. Must contain the columns - state - sentiment Returns: -------- map: Choropleth map of the US, where the color refers to the total number of tweets avg_sentiment: The average sentiment of a topic """ avg_sentiment = df.sentiment.mean() tweet_processor = process_tweets.TweetProcessor('models/stemmed_lr.pk') default_rate = tweet_processor.get_default_rate() df_grouped = df[['sentiment', 'state']].groupby(['state']).count() df_grouped['sentiment'] = 100.*df_grouped['sentiment']\ /df_grouped['sentiment'].sum() df_grouped = pd.merge(df_grouped, default_rate, how='right', left_index=True, right_index=True) df_grouped.fillna(0.) df_grouped['sentiment'] = 100*df_grouped['sentiment']/df_grouped['rate'] gdf = gpd.read_file('data/cb_2016_us_state_20m.dbf') merged_df = gdf.merge(df_grouped, how='left', left_on='NAME', right_index=True) merged_df = merged_df.fillna(0) data_df = merged_df[['NAME', 'sentiment']].fillna(0) geo_str = merged_df[['NAME', 'geometry']].to_json() threshold_scale = np.linspace(min(0, data_df['sentiment'].min()), data_df['sentiment'].max(), 6) threshold_scale = list(threshold_scale) map1 = folium.Map(location=[+37, -100], tiles='Cartodb Positron', zoom_start=4) map1.choropleth(geo_data=geo_str, data=data_df, columns=['NAME', 'sentiment'], fill_color='YlGn', legend_name='percentage of expected', name='topic: sentiment = {:.2f}'.format(avg_sentiment), threshold_scale=threshold_scale, key_on='feature.properties.NAME') return map1, avg_sentiment
d3f3404e5695a0191580f3df20eaf4c824d3e436
3,639,479
import six import inspect def basic_compare(first, second, strict=False): """ Comparison used for custom match functions, can do pattern matching, function evaluation or simple equality. Returns traceback if something goes wrong. """ try: if is_regex(second): if not isinstance(first, six.string_types) and not strict: first = str(first) result = bool(second.match(first)) elif callable(second): result = bool(second(first)) else: result = first == second return result, None except Exception as exc: return None, format_trace(inspect.trace(), exc)
ee16806fd78f46c2bcf01a5263f6d0210c22f32a
3,639,480
def parse_line(line,): """Return a list of 2-tuples of the possible atomic valences for a given line from the APS defining sheet.""" possap = [] for valence, entry in enumerate(line[4:]): if entry != "*": possap.append((valence, int(entry))) return possap
d27ed66cb35084c9927cae8658d7ea8a421c69a4
3,639,481
def first_order_moments(X, min_words=3): """First-Order Moments Generate first order Moment of document-word frequency matrix. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Matrix of Document-word frequency. `n_samples` is the number of documnet and `n_features` are number of unique words in the corpus. min_words : Integer, default=3 Minimum number of words in each document. In LDA, the number is 3 since we need 3rd order moments. Returns ------- e1 : array, shape=(n_features,) Expectation of each words in the input matrix. ignored: integer Number of ignored documents. """ n_samples, n_features = X.shape is_sparse_x = sp.issparse(X) doc_word_cnts = np.asarray(X.sum(axis=1)) if len(doc_word_cnts.shape) > 1: doc_word_cnts = np.squeeze(doc_word_cnts) if is_sparse_x: X_data = X.data X_indices = X.indices X_indptr = X.indptr ignored_docs = 0 e1 = np.zeros(n_features) # TODO: optimize for loop with cython for idx_d in xrange(n_samples): # get word_id and count in each document words_cnt = doc_word_cnts[idx_d] if words_cnt < min_words: ignored_docs += 1 continue if is_sparse_x: ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]] cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]] else: ids = np.nonzero(X[idx_d, :])[0] cnts = X[idx_d, ids] for w_id, w_cnt in zip(ids, cnts): e1[w_id] += (w_cnt / float(words_cnt)) e1 /= (n_samples - ignored_docs) return (e1, ignored_docs)
b3105e50a3cf69b3dda62dfc07764ae8539ebc8f
3,639,482
from datetime import datetime def dashboard(): """Получить статистику по сайту""" user = get_user_from_request() if not user.is_admin: return errors.no_access() users = User.select().count() d = datetime.datetime.now() - datetime.timedelta(days=7) active_users = User.select().where(User.last_active_date > d).count() return jsonify({"success": 1, "users": users, "active_users_7_days": active_users})
05363fd27ee6980258b7ea015a81e644799c5baa
3,639,483
def dct(f, axis=-1): """ Compute the Discrete Cosine Transform over the specified axis. :param f: The input array. :param axis: Axis along which the DCT is computed. The default is over the last axis. :return c: The computed DCT. """ # Size of the input along the specified axis. n = f.shape[axis] # Create two vectors containing the integers from 0 to n-1. i = k = np.arange(n) # Compute the x-axis coordinate of the f function. x = (2 * i + 1) / (2 * n) # Compute the outer product of x and kπ, obtaining the nxn matrix that will # form the argument of the cosine. arg = np.multiply.outer(x, k * np.pi) # Normalization factors. alpha = np.where(k == 0, 1 / np.sqrt(n), np.sqrt(2 / n)) # The orthonormal DCT basis. w = alpha * np.cos(arg) # Compute the convolution between the input array and the DCT basis. # The output contains the amplitude coefficient for every frequency. c = np.tensordot(f, w, axes=(axis, 0)) # `axis` becomes the last dimension in the output of `np.tensordot`. # Move it back to its original position so that the output shape matches # the input shape. c = np.moveaxis(c, -1, axis) return c
3e6cd65a3088d948fb81f61c25b2f590facb8351
3,639,484
from pathlib import Path def get_create_data_dir(): """Get the data directory. When the directory does not exist it is created. """ # Calculate the dataset data dir data_dir = Path(get_data_dir()).expanduser() dataset = _dataset_settings['name'] dataset_dir = data_dir / dataset # Ensure that the directlry exists dataset_dir.mkdir(parents=True, exist_ok=True) return dataset_dir
8fd7631504ab7b926f1f6b533d0fdabaa8cad592
3,639,485
def interpolate_bezier(points, steps=100, **kwargs): """Generates an array of waypoints which lie on a 2D Bezier curve described by n (x, y) points. The trajectory is guaranteed to include the start and end points though only on (x, y, z) axes. The curve generated is of the nth degree, where n = len(points) - 1 1st point is the start point. 2nd point indicates the orientation at the start point. (n-1)th point indicates the orientation at the end point. nth point is the end point. For information about Bezier curve look at: - http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-der.html :param points: (n, 2+) array of waypoints :return: trajectory with interpolated points """ n = len(points) - 1 t = np.linspace(0, 1, steps).reshape((steps, 1)) B = np.zeros((steps, 6)) # could be vectorised: # r = range(0, n+1) # coefs = sci.misc.comb(n, r) # t_1_pow = np.power(np.tile(t-1, (1, 6)), np.tile(r, (steps, 1))) # t_pow = np.power(np.tile(t, (1, 6)), np.tile(r, (steps, 1))) for i in xrange(n+1): e1 = ((1-t)**(n-i) * t**i).reshape((steps, 1)) e2 = points[i, 0:2].reshape((1, 2)) B[:, 0:2] += sci.misc.comb(n, i) * np.dot(e1, e2) # coef = sci.misc.comb(n, i) # B[:, 0] += coef * (1-t)**(n-i) * t**i * points[i, 0] # B[:, 1] += coef * (1-t)**(n-i) * t**i * points[i, 1] B[:, 2] = np.linspace(points[0, 2], points[-1, 2], steps) B[:, 3:5] = 0 # calculate the xy slope at each point of the curve der_x = np.diff(B[:, 0]) der_y = np.diff(B[:, 1]) B[1:, 5] = np.arctan2(der_y, der_x) # add the initial point B[0, :] = points[0] return B
403d8f6242947bc240920ea15ae6c0d72ec2d547
3,639,486
def _EAMS(track, Xmin=0.55, i0=12): """ Early-Age Main Sequence. Without this, the low-mass tracks do not reach an EEP past the ZAMS before 15 Gyr. """ i_EAMS = _IorT_AMS(track, Xmin, i0) return i_EAMS
4cde6c1e598366bbaf25ab98d2ec14b9f5a34d86
3,639,487
import uuid def neighboring_pairs(dataset, text_key='text', reuse_sentences=True): """Create a dataset consisting of neighboring sentence pairs. The input examples should have a key text_key associated with a tf.string value. The output examples have keys 'first' and 'second'. We only take sentence pairs from within the same line since lines seem to represent paragraph-like structures in our text datasets. Empty lines and 1-sentence lines will thus be ignored. The argument reuse_sentences determines whether a sentence can be used as both the first and last element in the pair. For example, the input with sentences A,B,C,D will return (A,B),(B,C),(C,D) if reuse_sentences is True and (A,B),(C,D) if reuse_sentences is False. Args: dataset: a tf.data.Dataset text_key: a string, the key for the text feature to preprocess in the dataset examples. reuse_sentences: a boolean Returns: a tf.data.Dataset """ def split_by_lines(dataset): """Splits text in dataset by line, removing empty lines.""" def my_fn(text): lines = tf.strings.split([text], sep='\n').values return tf.strings.strip(lines) dataset = dataset.map( my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.unbatch() return dataset.filter(lambda x: tf.strings.length(x) > 0) def split_into_pairs(line): """Split a given text example into pairs of neighboring sentences.""" # TODO(mmatena): Use better sentence segmentation. sep = str(uuid.uuid4()) sentences = tf.strings.regex_replace(line, r'((?:\.|\!|\?)+)', r'\1' + sep) sentences = tf.strings.strip(tf.strings.split([sentences], sep).values) if reuse_sentences: firsts = sentences[:-1] seconds = sentences[1:] else: firsts = sentences[:-1:2] seconds = sentences[1::2] return { 'first': firsts, 'second': seconds, } def example_len(x): return tf.math.minimum( tf.strings.length(x['first']), tf.strings.length(x['second'])) # Split by lines. dataset = dataset.map( lambda x: x[text_key], num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = split_by_lines(dataset) # Get pairs of neighboring sentences. dataset = dataset.map( split_into_pairs, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.unbatch() # Remove examples with empty strings. dataset = dataset.filter(lambda x: example_len(x) > 0) return dataset
815b04be745344e3a527c1eb07d22fd31bfffd94
3,639,488
def decode(tokenizer, token): """decodes the tokens to the answer with a given tokenizer""" answer_tokens = tokenizer.convert_ids_to_tokens( token, skip_special_tokens=True) return tokenizer.convert_tokens_to_string(answer_tokens)
4bbb58a6a0ed0d33411f9beee35ad0f2fb43698f
3,639,489
def davis_jaccard_measure(fg_mask, gt_mask): """ Compute region similarity as the Jaccard Index. :param fg_mask: (ndarray): binary segmentation map. :param gt_mask: (ndarray): binary annotation map. :return: jaccard (float): region similarity """ gt_mask = gt_mask.astype(np.bool) fg_mask = fg_mask.astype(np.bool) if np.isclose(np.sum(gt_mask), 0) and np.isclose(np.sum(fg_mask), 0): return 1 else: return np.sum((gt_mask & fg_mask)) / \ np.sum((gt_mask | fg_mask), dtype=np.float32)
96e6c47cd3b8d71206f9cf903b3827840803cf10
3,639,490
def extract_logits(logits = None, seq_pos = None): """ Args logits: Tensor(batch_size,seq_length,vocab_size) e.g.(8,1024,50257) seq_pos: list(batch_size) Return: output_logits: Tensor(batch_size,1,vocab_size) extract the Specified logit according to the seq_pos list . """ batch_size = logits.shape[0] for i in range(batch_size): logit = logits[i:i+1:1, seq_pos[i]:seq_pos[i]+1:1, ::] # print("extract_logits logit shape: {}".format(logit.shape)) if i == 0 : output_logits = logit else: output_logits = P.Concat()((output_logits, logit)) # print("final logits:",output_logits) return output_logits
008931ca8677461de947d7a365521e1e72c53866
3,639,491
import subprocess def CollectSONAME(args): """Replaces: readelf -d $sofile | grep SONAME""" toc = '' readelf = subprocess.Popen(wrapper_utils.CommandToRun( [args.readelf, '-d', args.sofile]), stdout=subprocess.PIPE, bufsize=-1, universal_newlines=True) for line in readelf.stdout: if 'SONAME' in line: toc += line return readelf.wait(), toc
dda042430ec1acfb82f6622e0d3786f6be44f2f4
3,639,492
def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True): """ Pads a rectangle by the specified values on each individual side, ensuring the padded rectangle falls within the specified bounds. The input rectangle, bounds, and return value are all a tuple of (x,y,w,h). """ # Unpack the rectangle x, y, w, h = rect # Pad by the specified value x -= padLeft y -= padTop w += (padLeft + padRight) h += (padTop + padBottom) # Determine if we are clipping overflows/underflows or # shifting the centre of the rectangle to compensate if clipExcess == True: # Clip any underflows x = max(0, x) y = max(0, y) # Clip any overflows overflowY = max(0, (y + h) - bounds[0]) overflowX = max(0, (x + w) - bounds[1]) h -= overflowY w -= overflowX else: # Compensate for any underflows underflowX = max(0, 0 - x) underflowY = max(0, 0 - y) x += underflowX y += underflowY # Compensate for any overflows overflowY = max(0, (y + h) - bounds[0]) overflowX = max(0, (x + w) - bounds[1]) x -= overflowX w += overflowX y -= overflowY h += overflowY # If there are still overflows or underflows after our # modifications, we have no choice but to clip them x, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True) # Re-pack the padded rect return (x,y,w,h)
032cafd373b59b725b8e2e28ba91e263ccae6e12
3,639,493
def gcs_csv_to_table(full_table_id: str, remote_csv_path: str) -> Table: """ Insert CSV from Google Storage to BigQuery Table. :param full_table_id: Full ID of a Google BigQuery table. :type full_table_id: str :param remote_csv_path: Path to uploaded CSV. :type remote_csv_path: str :returns: str """ try: gcs_csv_uri = f"gs://{GCP_BUCKET_NAME}/{remote_csv_path}" job_config = LoadJobConfig( autodetect=True, skip_leading_rows=1, source_format=SourceFormat.CSV, ) load_job = gbq.load_table_from_uri( gcs_csv_uri, full_table_id, job_config=job_config ) LOGGER.info(f"Starting job {load_job.job_id}.") LOGGER.info(load_job.result()) # Waits for table load to complete. return gbq.get_table(full_table_id) except BadRequest as e: LOGGER.error(f"Invalid GCP request when creating table `{full_table_id}`: {e}") except Exception as e: LOGGER.error(f"Unexpected error when creating table `{full_table_id}`: {e}")
bb0713848249e2eb4e6b89db652152c6485af0ee
3,639,494
import argparse def _get_client(args: argparse.Namespace) -> NodeClient: """Returns a pycspr client instance. """ return NodeClient(NodeConnectionInfo( host=args.node_host, port_sse=args.node_port_sse ))
0eff36345d99c50a6d8298021b9f875dd17a2afe
3,639,495
import math def turn_xyz_into_llh(x,y,z,system): """Convert 3D Cartesian x,y,z into Lat, Long and Height See http://www.ordnancesurvey.co.uk/gps/docs/convertingcoordinates3D.pdf""" a = abe_values[system][0] b = abe_values[system][1] e2 = abe_values[system][2] p = math.sqrt(x*x + y*y) long = math.atan(y/x) lat_init = math.atan( z / (p * (1.0 - e2)) ) v = a / math.sqrt( 1.0 - e2 * (math.sin(lat_init) * math.sin(lat_init)) ) lat = math.atan( (z + e2*v*math.sin(lat_init)) / p ) height = (p / math.cos(lat)) - v # Ignore if a bit out # Turn from radians back into degrees long = long / 2 / math.pi * 360 lat = lat / 2 / math.pi * 360 return [lat,long,height]
304facd429083032e611f2f9aca09b298a40a48b
3,639,496
def _TileGrad(op, grad): """Sum reduces grad along the tiled dimensions.""" input_shape = array_ops.shape(op.inputs[0]) # We interleave multiples and input_shape to get split_shape, # reshape grad to split_shape, and reduce along all even # dimensions (the tiled dimensions) to get the result # with shape input_shape. For example # input_shape = [20, 30, 40] # multiples = [2, 3, 4] # split_shape = [2, 20, 3, 30, 4, 40] # axes = [0, 2, 4] split_shape = array_ops.reshape( array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1]) axes = math_ops.range(0, array_ops.size(split_shape), 2) # Sum reduces grad along the first dimension for IndexedSlices if isinstance(grad, ops.IndexedSlices): grad = math_ops.unsorted_segment_sum( grad.values, math_ops.mod(grad.indices, input_shape[0]), input_shape[0]) split_shape = array_ops.concat([[1], split_shape[1:]], axis=0) input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes) # Fix shape inference if not context.executing_eagerly(): input_grad.set_shape(op.inputs[0].get_shape()) return [input_grad, None]
21294667ac3a31082cc2a3d09120330ce3ec1564
3,639,497
def obj_spatial_error_sum_and_naturalness_jac(s, data): """ jacobian of error function. It is a combination of analytic solution for motion primitive model and numerical solution for kinematic error """ # Extract relevant parameters from data tuple. # Note other parameters are used for calling obj_error_sum gmm = data[0].get_gaussian_mixture_model() error_scale = data[-1] quality_scale = data[-2] logLikelihoods = _estimate_log_gaussian_prob(s, gmm.means_, gmm.precisions_cholesky_, 'full') logLikelihoods = np.ravel(logLikelihoods) numerator = 0 n_models = len(gmm.weights_) for i in range(n_models): numerator += np.exp(logLikelihoods[i]) * gmm.weights_[i] * np.dot(np.linalg.inv(gmm.covars_[i]), (s - gmm.means_[i])) denominator = np.exp(gmm.score([s])[0]) logLikelihood_jac = numerator / denominator kinematic_jac = approx_fprime(s, obj_spatial_error_sum, 1e-7, data[-2:])# ignore the kinematic factor and quality factor jac = logLikelihood_jac * quality_scale + kinematic_jac * error_scale return jac
e0f57a88e3b490abc8eb9dbc636701c4a06ffc05
3,639,498
from datetime import datetime def today(): """Ritorna il giorno di oggi in formato YYYYMMDD""" today = datetime.date.today() return today.strftime("%Y%m%d")
fdf9c83153667fb3324f31893bf3721566dea4d3
3,639,499