content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_transceiver_sensor_sub_id(ifindex, sensor): """ Returns sub OID for transceiver sensor. Sub OID is calculated as folows: sub OID = transceiver_oid + XCVR_SENSOR_PART_ID_MAP[sensor] :param ifindex: interface index :param sensor: sensor key :return: sub OID = {{index}} * 1000 + {{lane}} * 10 + sensor id """ transceiver_oid, = get_transceiver_sub_id(ifindex) return (transceiver_oid + XCVR_SENSOR_PART_ID_MAP[sensor],)
4c718feb45384ab6bef11e1f3c42ab4cd8d0ae2c
21,500
def patch_twitter_get_following_users(value): """Return a function decorator which patches the TwitterClient.get_following_user_ids method.""" return patch_twitter_client_method("get_following_user_ids", value)
296d20a3dbce3684cb2af2568c64fac25ab345c9
21,501
def conv1x1_1d(inplanes: int, outplanes: int, stride: int = 1) -> nn.Conv1d: """1x1一维卷积,用于短接时降采样""" return nn.Conv1d( inplanes, outplanes, kernel_size=(1,), stride=(stride,), padding=0, bias=False )
481dc7b71b31ae6199bcafd1112bf1541d7a5d25
21,502
import hashlib import os def load_cifar10(channels_last=True, x_shape=None, x_dtype=np.float32, y_dtype=np.int32, normalize_x=False): """ Load the CIFAR-10 dataset as NumPy arrays. Args: channels_last (bool): Whether or not to place the channels axis at the last? x_shape: Reshape each digit into this shape. Default to ``(32, 32, 3)`` if `channels_last` is :obj:`True`, otherwise default to ``(3, 32, 32)``. x_dtype: Cast each digit into this data type. Default `np.float32`. y_dtype: Cast each label into this data type. Default `np.int32`. normalize_x (bool): Whether or not to normalize x into ``[0, 1]``, by dividing each pixel value with 255.? (default :obj:`False`) Returns: (np.ndarray, np.ndarray), (np.ndarray, np.ndarray): The (train_x, train_y), (test_x, test_y) """ # check the arguments x_shape = _validate_x_shape(x_shape, channels_last) # fetch data path = CacheDir('cifar').download_and_extract( CIFAR_10_URI, hasher=hashlib.md5(), expected_hash=CIFAR_10_MD5) data_dir = os.path.join(path, CIFAR_10_CONTENT_DIR) # load the data train_num = 50000 train_x = np.zeros((train_num,) + x_shape, dtype=x_dtype) train_y = np.zeros((train_num,), dtype=y_dtype) for i in range(1, 6): path = os.path.join(data_dir, 'data_batch_{}'.format(i)) x, y = _load_batch( path, channels_last=channels_last, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype, normalize_x=normalize_x, expected_batch_label='training batch {} of 5'.format(i) ) (train_x[(i - 1) * 10000: i * 10000, ...], train_y[(i - 1) * 10000: i * 10000]) = x, y path = os.path.join(data_dir, 'test_batch') test_x, test_y = _load_batch( path, channels_last=channels_last, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype, normalize_x=normalize_x, expected_batch_label='testing batch 1 of 1' ) assert(len(test_x) == len(test_y) == 10000) return (train_x, train_y), (test_x, test_y)
d730abf0de556a8990289e92359c5d31df6c0e9c
21,503
import struct def unpack_mmap_block(mm, n): """Decode the nth 4-byte long byte string from mapped memory.""" return struct.unpack("<L", mm[n*DATA_BLOCK_SIZE:(n+1)*DATA_BLOCK_SIZE])[0]
a75ac48e188e03e1ec7d3c289ffdd8e12173bc6f
21,504
def tobs(): """Return a list of temperatures for prior year""" # * Query for the dates and temperature observations from the last year. # * Convert the query results to a Dictionary using `date` as the key and `tobs` as the value. # * Return the json representation of your dictionary. last_date = session.query(Measurements.date).order_by(Measurements.date.desc()).first() last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365) temperature = session.query(Measurements.date, Measurements.tobs).\ filter(Measurements.date > last_year).\ order_by(Measurements.date).all() # Create a list of dicts with `date` and `tobs` as the keys and values temperature_totals = [] for result in temperature: row = {} row["date"] = temperature[0] row["tobs"] = temperature[1] temperature_totals.append(row) return jsonify(temperature_totals)
c7411130bb5c8d956d10a2ba3ce535f41ca04474
21,505
def reactToAMQPMessage(message, send_back): """ React to given (AMQP) message. `message` is expected to be :py:func:`collections.namedtuple` structure from :mod:`.structures` filled with all necessary data. Args: message (object): One of the request objects defined in :mod:`.structures`. send_back (fn reference): Reference to function for responding. This is useful for progress monitoring for example. Function takes one parameter, which may be response structure/namedtuple, or string or whatever would be normally returned. Returns: object: Response class from :mod:`structures`. Raises: ValueError: if bad type of `message` structure is given. """ if _instanceof(message, ConversionRequest): return ConversionResponse( marcxml2mods( marc_xml=message.marc_xml, uuid=message.uuid, url=message.url ) ) raise ValueError("'%s' is unknown type of request!" % str(type(message)))
fd34510d58e6b164f37f93c0b17f2b2a1c8d32d2
21,506
def recE(siEnergy, layer): """ Reconstructed energy from sim energy """ return ( (siEnergy/mipSiEnergy) * layerWeights[layer-1] + siEnergy)*\ secondOrderEnergyCorrection
9efde4432f3a81ff06505c6fbb529be7404027d4
21,507
def get_account_id(): """ Retrieve the AWS account ID """ client = boto3.client("sts") account_id = client.get_caller_identity()["Account"] return account_id
579bdc686a0ceb5d71e180bf8ce7a17243cff849
21,508
def process_tag(item, profile, level=0): """ Processes element with <code>tag</code> type @type item: ZenNode @type profile: dict @type level: int """ if not item.name: # looks like it's root element return item attrs = make_attributes_string(item, profile) cursor = profile['place_cursor'] and zen_coding.get_caret_placeholder() or '' self_closing = '' is_unary = item.is_unary() and not item.children if profile['self_closing_tag'] and is_unary: self_closing = '/' # define tag name tag_name = '%' + (profile['tag_case'] == 'upper' and item.name.upper() or item.name.lower()) if tag_name.lower() == '%div' and '{' not in attrs: # omit div tag tag_name = '' item.end = '' item.start = _replace(item.start, tag_name + attrs + self_closing) if not item.children and not is_unary: item.start += cursor return item
7d06160cadb0d828799713d888327a41e7ab0b80
21,509
from typing import Optional import google def read_secret(project_id: str, secret_name: str) -> Optional[str]: """Reads the latest version of a GCP Secret Manager secret. Returns None if the secret doesn't exist.""" secret_manager = secretmanager.SecretManagerServiceClient() secret_path = secret_manager.secret_path(project_id, secret_name) try: response = secret_manager.access_secret_version( request={'name': f'{secret_path}/versions/latest'} ) return response.payload.data.decode('UTF-8') except google.api_core.exceptions.ClientError: # Fail gracefully if there's no secret version yet. return None except AttributeError: # Sometimes the google API fails when no version is present, with: # File "{site-packages}/google/api_core/exceptions.py", # line 532, in from_grpc_error if isinstance(rpc_exc, grpc.Call) or _is_informative_grpc_error(rpc_exc): # AttributeError: 'NoneType' object has no attribute 'Call' return None
388fa51983452f0852646d1ed1ab183da706c0ab
21,510
import xml def find_in_xml(data, search_params): """Try to find an element in an xml Take an xml from string or as xml.etree.ElementTree and an iterable of strings (and/or tuples in case of findall) to search. The tuple should contain the string to search for and a true value. """ if isinstance(data, str): data = xml.etree.ElementTree.fromstring(data) param = search_params[0] if isinstance(data, list): result = iterate_search_data(data, param) else: result = xml_search_helper(data, param) if len(search_params) == 1: return result return find_in_xml(result, search_params[1:])
1cb4685a042349231cd946116a4894ca5b9d68d5
21,511
def conditional_expect( X, func, reg, method=None, quantile_method=None, n_integration_samples=10, quad_dict=None, random_state=None, include_x=False, include_idx=False, vector_func=False, ): """Calculates the conditional expectation, i.e. E[func(Y)|X=x_eval], where Y | X ~ reg.predict_target_distribution, for x_eval in `X_eval`. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples where the expectation should be evaluated. func : callable The function that transforms the random variable. reg: ProbabilisticRegressor Predicts the target distribution over which the expectation is calculated. method: string, optional, optional (default=None) The method by which the expectation is computed. -'assume_linear' assumes E[func(Y)|X=x_eval] ~= func(E[Y|X=x_eval]) and thereby only takes the function value at the expected y value. -'monte_carlo' Basic monte carlo integration. Taking the average of randomly drawn samples. `n_integration_samples` specifies the number of monte carlo samples. -'quantile' Uses the quantile function to transform the integration space into the interval from 0 to 1 and than uses the method from 'quantile_method' to calculate the integral. The number of integration points is specified by `n_integration_samples`. -'gauss_hermite' Uses Gauss-Hermite quadrature. This assumes Y | X to be gaussian distributed. The number of evaluation points is given by `n_integration_samples`. -'dynamic_quad' uses `scipy's` function `expect` on the `rv_continuous` random variable of `reg`, which in turn uses a dynamic gaussian quadrature routine for calculating the integral. Performance is worse using a vector function. If `method is None` 'gauss_hermite' is used. quantile_method: string, optional (default=None) Specifies the integration methods used after the quantile transformation. -'trapezoid' Trapezoidal method for integration using evenly spaced samples. -'simpson' Simpson method for integration using evenly spaced samples. -'average' Taking the average value for integration using evenly spaced samples. -'romberg' Romberg method for integration. If `n_integration_samples` is not equal to `2**k + 1` for a natural number k, the number of samples used for integration is put to the smallest such number greater than `n_integration_samples`. -'quadrature' Gaussian quadrature method for integration. If `quantile_method is None` quadrature is used. n_integration_samples: int, optional (default=10) The number of integration samples used in 'quantile', 'monte_carlo' and 'gauss-hermite'. quad_dict: dict, optional (default=None) Further arguments for using `scipy's` `expect` random_state: numeric | np.random.RandomState, optional (default=None) Random state for fixing the number generation. include_x: bool, optional (default=False) If `include_x` is `True`, `func` also takes the x value. include_idx: bool, optional (default=False) If `include_idx` is `True`, `func` also takes the index of the x value. vector_func: bool or str, optional (default=False) If `vector_func` is `True`, the integration values are passed as a whole to the function `func`. If `vector_func` is 'both', the integration values might or might not be passed as a whole. The integration values if passed as a whole are of the form (n_samples, n_integration), where n_integration denotes the number of integration values. Returns ------- expectation : numpy.ndarray of shape (n_samples) The conditional expectation for each value applied. """ X = check_array(X, allow_nd=True) check_type(reg, "reg", ProbabilisticRegressor) check_type( method, "method", target_vals=[ "monte_carlo", "assume_linear", "dynamic_quad", "gauss_hermite", "quantile", None, ], ) check_type( quantile_method, "quantile_method", target_vals=[ "trapezoid", "simpson", "average", "romberg", "quadrature", None, ], ) check_scalar(n_integration_samples, "n_monte_carlo", int, min_val=1) check_type(quad_dict, "scipy_args", dict, target_vals=[None]) check_type(include_idx, "include_idx", bool) check_type(include_x, "include_x", bool) check_type(vector_func, "vector_func", bool, target_vals=["both"]) check_callable(func, "func", n_free_parameters=1 + include_idx + include_x) if method is None: method = "gauss_hermite" if quantile_method is None: quantile_method = "quadrature" if quad_dict is None: quad_dict = {} if method == "quantile" and quantile_method == "romberg": # n_integration_samples need to be of the form 2**k + 1 n_integration_samples = ( 2 ** int(np.log2(n_integration_samples) + 1) + 1 ) is_optional = vector_func == "both" if is_optional: vector_func = True random_state = check_random_state(random_state) def arg_filter(idx_y, x_y, y): ret = tuple() if include_idx: ret += (idx_y,) if include_x: ret += (x_y,) ret += (y,) return ret def evaluate_func(inner_potential_y): if vector_func: inner_output = func( *arg_filter(np.arange(len(X)), X, inner_potential_y) ) else: inner_output = np.zeros_like(inner_potential_y) for idx_x, inner_x in enumerate(X): for idx_y, y_val in enumerate(inner_potential_y[idx_x]): inner_output[idx_x, idx_y] = func( *arg_filter(idx_x, inner_x, y_val) ) return inner_output expectation = np.zeros(len(X)) if method in ["assume_linear", "monte_carlo"]: if method == "assume_linear": potential_y = reg.predict(X).reshape(-1, 1) else: # method equals "monte_carlo" potential_y = reg.sample_y( X=X, n_samples=n_integration_samples, random_state=random_state, ) expectation = np.average(evaluate_func(potential_y), axis=1) elif method == "quantile": if quantile_method in ["trapezoid", "simpson", "average", "romberg"]: eval_points = np.arange(1, n_integration_samples + 1) / ( n_integration_samples + 1 ) cond_dist = _reshape_scipy_dist( reg.predict_target_distribution(X), shape=(-1, 1) ) potential_y = cond_dist.ppf(eval_points.reshape(1, -1)) output = evaluate_func(potential_y) if quantile_method == "trapezoid": expectation = integrate.trapezoid( output, dx=1 / n_integration_samples, axis=1 ) elif quantile_method == "simpson": expectation = integrate.simpson( output, dx=1 / n_integration_samples, axis=1 ) elif quantile_method == "average": expectation = np.average(output, axis=-1) else: # quantile_method equals "romberg" expectation = integrate.romb( output, dx=1 / n_integration_samples, axis=1 ) else: # quantile_method equals "quadrature" def fixed_quad_function_wrapper(inner_eval_points): inner_cond_dist = _reshape_scipy_dist( reg.predict_target_distribution(X), shape=(-1, 1) ) inner_potential_y = inner_cond_dist.ppf( inner_eval_points.reshape(1, -1) ) return evaluate_func(inner_potential_y) expectation, _ = integrate.fixed_quad( fixed_quad_function_wrapper, 0, 1, n=n_integration_samples ) elif method == "gauss_hermite": unscaled_potential_y, weights = roots_hermitenorm( n_integration_samples ) cond_mean, cond_std = reg.predict(X, return_std=True) potential_y = ( cond_std[:, np.newaxis] * unscaled_potential_y[np.newaxis, :] + cond_mean[:, np.newaxis] ) output = evaluate_func(potential_y) expectation = ( 1 / (2 * np.pi) ** (1 / 2) * np.sum(weights[np.newaxis, :] * output, axis=1) ) else: # method equals "dynamic_quad" for idx, x in enumerate(X): cond_dist = reg.predict_target_distribution([x]) def quad_function_wrapper(y): if is_optional or not vector_func: return func(*arg_filter(idx, x, y)) else: return func( *arg_filter( np.arange(len(X)), X, np.full((len(X), 1), y) ) )[idx] expectation[idx] = cond_dist.expect( quad_function_wrapper, **quad_dict, ) return expectation
fb89adedeada5e100f2483d927d945ffe2d99034
21,512
def getSumOfSquaresPixel16_Image16(Image): """getSumOfSquaresPixel16_Image16(Image) -> unsigned __int16""" return _ImageFunctions.getSumOfSquaresPixel16_Image16(Image)
9ba21171e26fc32d938a3f377684830df5f03b8f
21,513
def parse_statement(parsed, output): """Parses a tokenized sql_parse token and returns an encoded table.""" # Get the name of the table being created table_name = next(token.value for token in parsed.tokens if isinstance(token, Identifier)) # Add the table metadata to the cached tables to access later. if len(table_name.split('.')) == 2\ and not found_table(table_name.split('.')[0], table_name.split('.')[1]): this_table = Table( table_name.split('.')[0], table_name.split('.')[1], cursor ) print(f'Appending this table ({this_table.alias}):') print(this_table) this_table.query_data() tables.append(this_table) elif len(table_name.split('.')) == 3 \ and not found_table(table_name.split('.')[1], table_name.split('.')[2]): this_table = Table( table_name.split('.')[1], table_name.split('.')[2], cursor ) print('Appending this table') print(this_table) this_table.query_data() tables.append(this_table) # print(this_table) # Get all the FROM statements's metadata froms = {k: v for d in extract_from_part(parsed, cursor) for k, v in d.items()} print('Tables:') print([table for table in tables]) # Get all the JOIN statements's metadata joins = list(extract_join_part(parsed, cursor)) # Get all of the comparisons to compare the number of comparisons to the number of JOIN # statements comparisons = list(extract_comparisons(parsed)) # Get all the columns selected by this query. The table aliases are used to identify where # the columns originate from. selects = list( extract_selects(parsed, {**froms, **{k: v for d in joins for k, v in d.items()}}) ) # When the number of comparisons does not match the number of joins, the parsing was # incorrect, raise and exception. if len(comparisons) != len(joins): raise Exception('Parsing messed up!') return encode_table(joins, froms, table_name, selects, comparisons, output)
a81791deac59e496145993bba0547294d5d5a7fa
21,514
def create_hue_success_response(entity_number, attr, value): """Create a success response for an attribute set on a light.""" success_key = f"/lights/{entity_number}/state/{attr}" return {"success": {success_key: value}}
c8570ca95ada89bd26d93f659261c91032c915c7
21,515
import warnings def get_deaths(): """***DEPRECATED - Use get_data_jhu instead.*** Get most recent fatality counts from JHU.""" # Deprecated warning url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/" warnings.warn("This function is deprecated. Use get_data_jhu instead; see tutorials at <https://github.com/PayneLab/covid19pandas/tree/master/docs/>.", DeprecatedWarning, stacklevel=2) print("These data were obtained from Johns Hopkins University (https://github.com/CSSEGISandData/COVID-19).") return _get_table(url, "time_series_covid19_deaths_global.csv", source="jhu", update=True)
c747d27b10d0845520f4dbbfbb5efcf23c655b7e
21,516
def transform_sentence(text, model): """ Mean embedding vector """ def preprocess_text(raw_text, model=model): """ Excluding unknown words and get corresponding token """ raw_text = raw_text.split() return list(filter(lambda x: x in model.vocab, raw_text)) tokens = preprocess_text(text) if not tokens: return np.zeros(model.vector_size) text_vector = np.mean(model[tokens], axis=0) return np.array(text_vector)
57f9002e4f7fccefa824f1691324b4593b11fbe0
21,517
from typing import Sequence from typing import List import inspect def model_primary_key_columns_and_names(Model: DeclarativeMeta) -> (Sequence[Column], List[str]): """ Get the list of primary columns and their names as two separate tuples Example: pk_columns, pk_names = model_primary_key_columns_and_names(models.User) pk_columns # -> (models.User.id, ) pk_names # -> ('id', ) """ pk_columns: Sequence[Column] = inspect(Model).primary_key pk_names: List[str] = [col.key for col in pk_columns] return pk_columns, pk_names
9466524452a77459042081e7becf968302b3dd3b
21,518
def biweekly_test_data(): """ Provides test data for the full system test when using "biweekly" time_scale.""" time_scale = "biweekly" time_per_task = { "Free" : 480 * 9 * 2, "Work" : 480 * 5 * 2, "Sleep" : 480 * 7 * 2 } min_task_time = 60 preferences = { "Free" : { "avoid" : [ "Monday1,09:00AM-Monday1,05:00PM", "Tuesday1,09:00AM-Tuesday1,05:00PM", "Wednesday1,09:00AM-Wednesday1,05:00PM", "Thursday1,09:00AM-Thursday1,05:00PM", "Friday1,09:00AM-Friday1,05:00PM", "Monday2,09:00AM-Monday2,05:00PM", "Tuesday2,09:00AM-Tuesday2,05:00PM", "Wednesday2,09:00AM-Wednesday2,05:00PM", "Thursday2,09:00AM-Thursday2,05:00PM", "Friday2,09:00AM-Friday2,05:00PM"], "inconvenient" : [], "neutral" : [], "convenient" :[ "Monday1,06:00PM-Monday1,08:00PM", "Tuesday1,06:00PM-Tuesday1,08:00PM", "Wednesday1,06:00PM-Wednesday1,08:00PM", "Thursday1,06:00PM-Thursday1,08:00PM", "Friday1,06:00PM-Friday1,08:00PM", "Monday2,06:00PM-Monday2,08:00PM", "Tuesday2,06:00PM-Tuesday2,08:00PM", "Wednesday2,06:00PM-Wednesday2,08:00PM", "Thursday2,06:00PM-Thursday2,08:00PM", "Friday2,06:00PM-Friday2,08:00PM"], "preferred" : [], "required" : [] }, "Work" : { "avoid" : [], "inconvenient" : [], "neutral" : [], "convenient" : [], "preferred" : [], "required" : [ "Monday1,09:00AM-Monday1,05:00PM", "Tuesday1,09:00AM-Tuesday1,05:00PM", "Wednesday1,09:00AM-Wednesday1,05:00PM", "Thursday1,09:00AM-Thursday1,05:00PM", "Friday1,09:00AM-Friday1,05:00PM", "Monday2,09:00AM-Monday2,05:00PM", "Tuesday2,09:00AM-Tuesday2,05:00PM", "Wednesday2,09:00AM-Wednesday2,05:00PM", "Thursday2,09:00AM-Thursday2,05:00PM", "Friday2,09:00AM-Friday2,05:00PM"], }, "Sleep" : { "avoid" : [ "Monday1,09:00AM-Monday1,05:00PM", "Tuesday1,09:00AM-Tuesday1,05:00PM", "Wednesday1,09:00AM-Wednesday1,05:00PM", "Thursday1,09:00AM-Thursday1,05:00PM", "Friday1,09:00AM-Friday1,05:00PM", "Monday2,09:00AM-Monday2,05:00PM", "Tuesday2,09:00AM-Tuesday2,05:00PM", "Wednesday2,09:00AM-Wednesday2,05:00PM", "Thursday2,09:00AM-Thursday2,05:00PM", "Friday2,09:00AM-Friday2,05:00PM"], "inconvenient" : [], "neutral" : [], "convenient" : [], "preferred" : [ "Monday1,10:00PM-Tuesday1,06:00AM", "Tuesday1,10:00PM-Wednesday1,06:00AM", "Wednesday1,10:00PM-Thursday1,06:00AM", "Thursday1,10:00PM-Friday1,06:00AM", "Friday1,10:00PM-Saturday1,06:00AM", "Saturday1,10:00PM-Sunday1,06:00AM", "Sunday1,10:00PM-Monday2,06:00AM", "Monday2,10:00PM-Tuesday2,06:00AM", "Tuesday2,10:00PM-Wednesday2,06:00AM", "Wednesday2,10:00PM-Thursday2,06:00AM", "Thursday2,10:00PM-Friday2,06:00AM", "Friday2,10:00PM-Saturday2,06:00AM", "Saturday2,10:00PM-Sunday2,06:00AM", "Sunday2,10:00PM-Monday1,06:00AM"], "required" : [] } } return time_scale, time_per_task, min_task_time, preferences
b5f354a17819133c3c29e7652f6b1132599e89b6
21,519
def plot_bar_whiskers_jitter_significance(data, comparison_columns, significant_comparison_columns, heights, ylabel, xlabels=None, ax_handle=None, median_notch=False, boxplot_color='black', boxplot_linewidth=2, markersize=12, xtick_rotation=90, marker=None, color=None, alpha=0.2, whis = [2.5, 97.5]): """ Make a jittered boxplot significance test Parameters ------------------- d : A pandas dataframe, where each column corresponds to data to be plotted with jitter + boxplot heights : A list, heights of the significance annotations, for each comparison comparison_columns : A list of lists, where each element corresponds to a pair of columns to compare significant_comparison_columns : A list of lists, where each element corresponds to a pair of significant column comparisons heights : A list of floats, the height of each comparison annotation xlabels : A list of strings, the x-labels ax_handle : A matplotlib axis handle, for adding onto an existing plot median_notch : A bool, to plot the lower and upper quartiles of the median boxplot_color : A string, the boxplot color boxplot_linewidth : A float, the boxplot linewidth markersize: An int, the marker size marker : A string or a list of strings, the marker of the points color : A string or a list of strings, the color of the points alpha : A float, transparency whis : A list of floats, the quantiles for whiskers Returns ------------- fig : A matplotlib figure handle (if ax_handle is None) ax : A matplotlib axis handle (if ax_handle is None) """ if ax_handle is None: fig, ax = plt.subplots(1, 1, figsize=(5, 5)) else: ax = ax_handle make_jitter_plots(data, names=data.columns, ylabel=ylabel, ax_handle=ax, alpha=alpha, markersize=markersize, xlabels=xlabels, marker=marker, color=color) bp = data.boxplot(ax=ax,notch=median_notch, grid=False, whis = whis, showfliers=False, return_type='dict') for _, line_list in bp.items(): for line in line_list: line.set_color(boxplot_color) line.set_linewidth(boxplot_linewidth) previous_ymaxes = [] for i, comparison in enumerate(comparison_columns): comp1, comp2 = comparison x1, x2 = np.nonzero(data.columns==comp1)[0][0]+1, np.nonzero(data.columns==comp2)[0][0]+1 y_max = data.loc[:,[comp1,comp2]].max().values.max() previous_ymaxes.append(y_max) y, h, col = max(previous_ymaxes) + heights[i], 2, 'k' ax.plot([x1, x1, x2, x2], [y, y+h, y+h, y], lw=1.5, c=col) if comparison in significant_comparison_columns: ax.text((x1+x2)*.5, y+h, "*", ha='center', va='bottom', color=col, fontsize=20) else: ax.text((x1+x2)*.5, y+h, "ns", ha='center', va='bottom', color=col, fontsize=20) if xlabels is not None: ax.set_xticklabels(xlabels, rotation=xtick_rotation) if ax_handle is None: return fig, ax
dfdaf95034d3d53fac7c79eb4c7b387f9ac18f5b
21,520
def _is_trans_valid(seed, mutate_sample): """ Check a mutated sample is valid. If the number of changed pixels in a seed is less than pixels_change_rate*size(seed), this mutate is valid. Else check the infinite norm of seed changes, if the value of the infinite norm less than pixel_value_change_rate*255, this mutate is valid too. Otherwise the opposite. """ is_valid = False pixels_change_rate = 0.02 pixel_value_change_rate = 0.2 diff = np.array(seed - mutate_sample).flatten() size = np.shape(diff)[0] l0_norm = np.linalg.norm(diff, ord=0) linf = np.linalg.norm(diff, ord=np.inf) if l0_norm > pixels_change_rate * size: if linf < 256: is_valid = True else: if linf < pixel_value_change_rate * 255: is_valid = True return is_valid
118dc0e566fc4f5c481d21f8c8aec7fe4f1ece29
21,521
def split_axis(x, indices_or_sections, axis): """Splits given variables along an axis. Args: x (tuple of Variables): Variables to be split. indices_or_sections (int or 1-D array): If this argument is an integer, N, the array will be divided into N equal arrays along axis. If it is a 1-D array of sorted integers, it indicates the positions where the array is split. axis (int): Axis that the input array is split along. Returns: ``tuple`` or ``Variable``: Tuple of :class:`~chainer.Variable` objects if the number of outputs is more than 1 or :class:`~chainer.Variable` otherwise. .. note:: This function raises ``ValueError`` if at least one of the outputs is splitted to zero-size (i.e. `axis`-th value of its shape is zero). """ return SplitAxis(indices_or_sections, axis)(x)
245841aaef14ea130b20254775152a9199d63c41
21,522
def status(**kwargs): """Execute \"git status\" on the repository.""" status = check_output(["git", "status"]).decode("utf-8") repo_clean = True for keyword in ["ahead", "modified", "untracked"]: if keyword in status: repo_clean = False return {"clean": repo_clean, "status": status}
8e81264579628407e8560a6d89be884179636ea9
21,523
import os import tqdm def preprocessing_raw_csv(PATH=".//tcdata//hy_round2_train_20200225//", local_file_name="train.pkl"): """Loading and processing all train csv data.""" if PATH is None: raise ValueError("Invalid PATH !") file_names = sorted(os.listdir(PATH), key=lambda s: int(s.split(".")[0])) # Loading all trajectory data. traj_data = [] for name in file_names: traj_data.append(pd.read_csv(PATH + name, encoding="utf-8")) # Processing each trajectory data. print("\n@Multi-processing RAW CSV started:") print("-----------------------------") with mp.Pool(processes=mp.cpu_count()) as p: tmp = list(tqdm(p.imap(preprocessing_traj, traj_data), total=len(traj_data))) print("-----------------------------") print("@Multi-processing RAW CSV ended, to the local file: {}.\n".format( local_file_name)) traj_data = [item[0] for item in tmp] change_record = [item[1] for item in tmp] change_record = pd.DataFrame(change_record, columns=["speed_change", "coord_change"]) # Saving processed data to the lcoal path with *.pkl format file_processor = LoadSave(PATH) file_processor.save_data(path=".//tcdata_tmp//{}".format(local_file_name), data=traj_data) return change_record
774e3865bac16458439fe597d4a5a5a745957a9c
21,524
from typing import Counter def find_listener_frequent_words(df, num): """ Given a conversation dataframe from a certain subreddit, find the top frequent words spoken by listeners. Args: df: A specified dataframe from a subreddit. num: A ranking number used for finding the top frequent words. Return: result: A dataframe showing the top frequent words. """ # extract listeners' turn df_listener = df[df['dialog turn'] != 1] # compute tokens df_listener_filtered = compute_tokens(df_listener) # find top (num) frequent words result = pd.DataFrame(Counter(df_listener_filtered.sum()).most_common(num), columns = ["word", "count"]) return result
80bc4b95e5713429751ff1725afce4ac02f0bddd
21,525
def is_rescue_entry(boot_entry): """ Determines whether the given boot entry is rescue. :param BootEntry boot_entry: Boot entry to assess :return: True is the entry is rescue :rtype: bool """ return 'rescue' in boot_entry.kernel_image.lower()
ba456c2724c3ad4e35bef110ed8c4cc08147b42c
21,526
import base64 def estimate_cost(features, ssd): """Generate a TensorFlow subgraph to estimate the cost of an architecture. Args: features: A 1D float tensor containing features for a single network architecture. ssd: The name of the search space definition to use for the cost model. Returns: A scalar float tensor containing the estimated cost for the specified network architecture """ kernel_data = cost_model_data.KERNEL_DATA[ssd] kernel_data = base64.decodebytes(kernel_data) kernel = np.frombuffer(kernel_data, cost_model_data.SERIALIZATION_DTYPE) kernel = kernel.reshape([-1, 1]).astype(np.float32) bias_data = cost_model_data.BIAS_DATA[ssd] bias_data = base64.decodebytes(bias_data) bias = np.frombuffer(bias_data, cost_model_data.SERIALIZATION_DTYPE) bias = bias.reshape([1]).astype(np.float32) with tf.name_scope('estimate_cost'): batch_features = tf.expand_dims(features, axis=0) batch_prediction = tf.linalg.matmul(batch_features, kernel) batch_prediction = tf.nn.bias_add(batch_prediction, bias) return tf.squeeze(batch_prediction, axis=[0, 1])
2c85cc5d320cd214dae260793a1c779d8019177c
21,527
def short_bubble(l, debug=True): """ what if the whole list is already in ascending order, the bubble would then still go through the entire outer loops actually, if there is no swap happens in a certain iteration ( the inner loop), the next loop should stop :param l: :param debug: :return: """ swapped = True i = 0 while i < len(l) and swapped: swapped = False for j in range(0, len(l) - i - 1): if l[j] > l[j + 1]: l[j], l[j + 1] = l[j + 1], l[j] swapped = True if debug: print('iteration {}'.format(i), l) i += 1 return l
d79ec7311e8a08267ddcbe82d08a85b35b9d4a6f
21,528
import yaml def IsResourceLike(item): """Return True if item is a dict like object or list of dict like objects.""" return yaml.dict_like(item) or (yaml.list_like(item) and all(yaml.dict_like(x) for x in item))
bc6ae6c4d84949511c679116454343731e8d8bd2
21,529
import math def rad_to_gon(angle: float) -> float: """Converts from radiant to gon (grad). Args: angle: Angle in rad. Returns: Converted angle in gon. """ return angle * 200 / math.pi
cbf7070a9c3a9796dfe4bffe39fdf2421f7279ed
21,530
def check_interface_status(conn_obj, interface, state, device="dut"): """ API to check the interface state Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com) :param conn_obj: :param interface: :param state: :param device: :return: """ interface_state = get_interface_status(conn_obj, interface, device=device) if interface_state != state: return False return True
484c1a8dcf96a3160791a63979d47096e5e51fbc
21,531
def hungarian_match(self, y_true, y_pred): """Matches predicted labels to original using hungarian algorithm.""" y_true = self.adjust_range(y_true) y_pred = self.adjust_range(y_pred) D = max(y_pred.max(), y_true.max()) + 1 w = np.zeros((D, D), dtype=np.int64) # Confusion matrix. for i in range(y_pred.size): w[y_pred[i], y_true[i]] += 1 ind = linear_assignment(-w) d = {i:j for i, j in ind} y_pred = np.array([d[v] for v in y_pred]) return y_true, y_pred
29a9976edcfa4a935d451f6471641b3836343d83
21,532
import torch def orthantree(scaled, capacity=8): """Constructs a :ref:`tree <presolve>` for the given :func:`~pybbfmm.scale`'d problem. This is a bit of a mess of a function, but long story short it starts with all the sources allocated to the root and repeatedly subdivides overfull boxes, constructing the various tree tensors as it goes. :param scaled: :func:`~pybbfmm.scale`'d problem. :param capacity: the max number of sources or targets per box. :return: A :ref:`tree <presolve>`. """ D = scaled.sources.shape[1] points = torch.cat([scaled.sources, scaled.targets]) indices = points.new_zeros((len(points),), dtype=torch.long) tree = arrdict.arrdict( parents=indices.new_full((1,), -1), depths=indices.new_zeros((1,)), centers=points.new_zeros((1, D)), terminal=indices.new_ones((1,), dtype=torch.bool), children=indices.new_full((1,) + (2,)*D, -1), descent=indices.new_zeros((1, D))) bases = 2**torch.flip(torch.arange(D, device=indices.device), (0,)) subscript_offsets = sets.cartesian_product(torch.tensor([0, 1], device=indices.device), D) center_offsets = sets.cartesian_product(torch.tensor([-1, +1], device=indices.device), D) depthcounts = [torch.as_tensor([1], device=indices.device)] depth = 0 while True: used, used_inv = torch.unique(indices, return_inverse=True) source_idxs, target_idxs = indices[:len(scaled.sources)], indices[-len(scaled.targets):] tree.terminal[used] = underoccupied(source_idxs, target_idxs, tree.terminal, capacity)[used] used_is_active = ~tree.terminal[used] point_is_active = used_is_active[used_inv] if not point_is_active.any(): break depth += 1 active = used[used_is_active] active_inv = (used_is_active.cumsum(0) - used_is_active.long())[used_inv[point_is_active]] first_child = len(tree.parents) + 2**D*torch.arange(len(active), device=active.device) point_offset = ((points[point_is_active] >= tree.centers[active][active_inv])*bases).sum(-1) child_box = first_child[active_inv] + point_offset indices[point_is_active] = child_box trailing_ones = (slice(None),) + (None,)*D tree.children[active] = first_child[trailing_ones] + (subscript_offsets*bases).sum(-1) centers = tree.centers[active][trailing_ones] + center_offsets.float()/2**depth descent = center_offsets[None].expand_as(centers) n_children = len(active)*2**D children = arrdict.arrdict( parents=active.repeat_interleave(2**D), depths=tree.depths.new_full((n_children,), depth), centers=centers.reshape(-1, D), descent=descent.reshape(-1, D), terminal=tree.terminal.new_ones((n_children,)), children=tree.children.new_full((n_children,) + (2,)*D, -1)) tree = arrdict.cat([tree, children]) depthcounts.append(n_children) tree['id'] = torch.arange(len(tree.parents), device=points.device) indices = arrdict.arrdict( sources=indices[:len(scaled.sources)], targets=indices[-len(scaled.targets):]) depths = ragged.Ragged( torch.arange(len(tree.id), device=points.device), torch.as_tensor(depthcounts, device=points.device)) return tree, indices, depths
9813697be3b19a2d7e3b71f28b1212c91a590fd3
21,533
def make_sparse( docs_to_fit, min_df=50, stop_words=None, docs_to_transform=None, ngram_range=None, ): """ Take a pre-tokenized document and turn into a sparse matrix. :param docs_to_fit: A list of lists of tokenized words to build the vocabulary from. :param min_df: Number of records that a word should appear in to be stored as a feature. :param stop_words: List of words to exclude, if any. :param docs_to_transform: A list of lists of tokenized words to transform. If none, we transform the first argument. :return: """ cv = CountVectorizer( tokenizer=no_tokenization, preprocessor=None, ngram_range=ngram_range, stop_words=stop_words, lowercase=False, min_df=min_df ) if docs_to_transform is None: return cv, cv.fit_transform(docs_to_fit) elif docs_to_transform is not None: cv.fit(docs_to_fit) return cv, cv.transform(docs_to_transform)
467d04f465ed4c19b4e20aa69c05508f6faafdc6
21,534
import random def weightedPriorityReliabilityScore(service_instances, last_records): """ Algorithm to find highest priority of the service based on reliability score achieved in past discovery results """ priority_list = [] for i in range(0, len(service_instances)): single_instance = {} single_instance['ip'] = service_instances[i][1] single_instance['port'] = service_instances[i][2] score = 0.0 discovery_instances = sharkradarDbutils.getLatestRecordsDiscoveryLogs( service_instances[i][0], service_instances[i][1], service_instances[i][2], last_records) len_discovery = len(discovery_instances) for i in range(0, len_discovery): if discovery_instances[i][0] == "FAIL": score = score + ((-1.0) * (len_discovery - i)) if discovery_instances[i][0] == "SUCCESS": score = score + ((1.0) * (len_discovery - i)) single_instance['score'] = score priority_list.append(single_instance) priority_list.sort(key=lambda x: x['score'], reverse=True) res = priority_list[0] res_list = list( filter( lambda x: x['score'] == res['score'], priority_list)) res = random.choice(res_list) return str(res['ip']), str(res['port'])
e215ae3e4009de7e8e6e8a8a0b66a66238e30f16
21,535
def calculate_output(param_dict, select_device, input_example): """Calculate the output of the imported graph given the input. Load the graph def from graph file on selected device, then get the tensors based on the input and output name from the graph, then feed the input_example to the graph and retrieves the output vector. Args: param_dict: The dictionary contains all the user-input data in the json file. select_device: "NGRAPH" or "CPU". input_example: A map with key is the name of the input tensor, and value is the random generated example Returns: The output vector obtained from running the input_example through the graph. """ graph_filename = param_dict["graph_location"] output_tensor_name = param_dict["output_tensor_name"] if not tf.gfile.Exists(graph_filename): raise Exception("Input graph file '" + graph_filename + "' does not exist!") graph_def = tf.GraphDef() if graph_filename.endswith("pbtxt"): with open(graph_filename, "r") as f: text_format.Merge(f.read(), graph_def) else: with open(graph_filename, "rb") as f: graph_def.ParseFromString(f.read()) set_os_env(select_device) with tf.Graph().as_default() as graph: tf.import_graph_def(graph_def) if len(output_tensor_name) == 0: # if no outputs are specified, then compare for all tensors output_tensor_name = sum( [[j.name for j in i.outputs] for i in graph.get_operations()], []) # Create the tensor to its corresponding example map tensor_to_example_map = {} for item in input_example: t = graph.get_tensor_by_name(item) tensor_to_example_map[t] = input_example[item] #input_placeholder = graph.get_tensor_by_name(input_tensor_name) output_tensor = [graph.get_tensor_by_name(i) for i in output_tensor_name] config = tf.ConfigProto( allow_soft_placement=True, # log_device_placement=True, inter_op_parallelism_threads=1) with tf.Session(graph=graph, config=config) as sess: output_tensor = sess.run(output_tensor, feed_dict=tensor_to_example_map) return output_tensor, output_tensor_name
e98bf63743d7f940170ca7ab4dcd97b751be178f
21,536
def is_instance_failed_alarm(alarms, instance, guest_hb=False): """ Check if an instance failed alarm has been raised """ expected_alarm = {'alarm_id': fm_constants.FM_ALARM_ID_VM_FAILED, 'severity': fm_constants.FM_ALARM_SEVERITY_CRITICAL} return _instance_alarm_raised(alarms, expected_alarm, instance)
5c886bea0b72d52392ed38217af20b7ebc87bd91
21,537
from modules.plumedwrapper import io as plumedio import os def load_metad_fes(fes_dir_path): """Load all the PLUMED free energy profiles saved by sumhills.""" # Read the FES in time. fes_file_prefix_path = os.path.join(fes_dir_path, 'fes_') fes_time = plumedio.read_table(fes_file_prefix_path + 'time.dat') all_metad_fes = [] for i in range(len(fes_time['time'])): all_metad_fes.append(plumedio.read_table(fes_file_prefix_path + str(i) + '.dat')) return all_metad_fes, fes_time
29e341193670e1ff82e33d1ab818f7df8a4d9042
21,538
def detected(numbers, mode): """ Returns a Boolean result indicating whether the last member in a numeric array is the max or min, depending on the setting. Arguments - numbers: an array of numbers - mode: 'max' or 'min' """ call_dict = {'min': min, 'max': max} if mode not in call_dict.keys(): print('Must specify either max or min') return return numbers[-1] == call_dict[mode](numbers)
b0a5b19e7d97db99769f28c4b8ce998dbe318c5b
21,539
import math def calculate_compass_bearing(point_a, point_b): """ Calculates the bearing between two points. The formulae used is the following: θ = atan2(sin(Δlong).cos(lat2), cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong)) :Parameters: - `pointA: The tuple representing the latitude/longitude for the first point. Latitude and longitude must be in decimal degrees - `pointB: The tuple representing the latitude/longitude for the second point. Latitude and longitude must be in decimal degrees :Returns: The bearing in degrees :Returns Type: float """ # LICENSE: public domain from https://gist.github.com/jeromer/2005586 if (type(point_a) != tuple) or (type(point_b) != tuple): raise TypeError("Only tuples are supported as arguments") lat1 = math.radians(point_a[0]) lat2 = math.radians(point_b[0]) diff_long = math.radians(point_b[1] - point_a[1]) x = math.sin(diff_long) * math.cos(lat2) y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) * math.cos(lat2) * math.cos(diff_long)) initial_bearing = math.atan2(x, y) # Now we have the initial bearing but math.atan2 return values # from -180° to + 180° which is not what we want for a compass bearing # The solution is to normalize the initial bearing as shown below initial_bearing = math.degrees(initial_bearing) compass_bearing = (initial_bearing + 360) % 360 return compass_bearing
535fc0cfc086974b1e329df297bdbea4aab1f127
21,540
import re def parse_instructions(instruction_list): """ Parses the instruction strings into a dictionary """ instruction_dict = [] for instruction in instruction_list: regex_match = re.match(r"(?P<direction>\w)(?P<value>\d*)",instruction) if regex_match: instruction_dict.append(regex_match.groupdict()) return instruction_dict
67b773bae0cb2cc0509503f2ea27f3312ce9d41c
21,541
def calc_elapsed_sleep(in_num, hyp_file, fpath, savedir, export=True): """ Calculate minutes of elapsed sleep from a hypnogram file & concatenate stage 2 sleep files Parameters ---------- in_num: str patient identifier hyp_file: str (format: *.txt) file with hypnogram at 30-second intervals fpath: str path to EEG files cut by sleep stage savedir: str path to save EEG files cut by hrs elapsed sleep export: bool (default: True) whether to export blocked dataframes Returns ------- .csv files with EEG data blocked in two-hour chunks (according to Purcell et al. 2017) OR pd.dataframes blocked in two-hour chunks (according to Purcell et al. 2017) """ # calculate elapsed sleep for each 30-second time interval print('Loading hypnogram...') sleep_scores = [1, 2, 3, 4, 5] # exclude 0 and 6 for awake and record break hyp = pd.read_csv(hyp_file, header=None, index_col=[0], sep='\t', names=['time', 'score'], parse_dates=True) mins_elapsed = hyp.score.isin(sleep_scores).cumsum()/2 # get a list of all matching files glob_match = f'{fpath}/{in_num}*_s2_*' files = glob.glob(glob_match) # make list of dfs for concat print('Reading data...') data = [pd.read_csv(file, header = [0, 1], index_col = 0, parse_dates=True) for file in files] # add NaN to the end of each df data_blocked = [df.append(pd.Series(name=df.iloc[-1].name + pd.Timedelta(milliseconds=1))) for df in data] # concatenate the dfs print('Concatenating data...') s2_df = pd.concat(data_blocked).sort_index() # assign indices to hours elapsed sleep print('Assigning minutes elapsed...') idx0_2 = mins_elapsed[mins_elapsed.between(0, 120)].index idx2_4 = mins_elapsed[mins_elapsed.between(120.5, 240)].index idx4_6 = mins_elapsed[mins_elapsed.between(240.5, 360)].index idx6_8 = mins_elapsed[mins_elapsed.between(360.5, 480)].index # cut dataframe into blocks by elapsed sleep (0-2, 2-4, 4-6, 6-8) df_two = s2_df[(s2_df.index > idx0_2[0]) & (s2_df.index < idx0_2[-1])] df_four = s2_df[(s2_df.index > idx2_4[0]) & (s2_df.index < idx2_4[-1])] df_six = s2_df[(s2_df.index > idx4_6[0]) & (s2_df.index < idx4_6[-1])] df_eight = s2_df[(s2_df.index > idx6_8[0]) & (s2_df.index < idx6_8[-1])] if export: # export blocked data if not os.path.exists(savedir): print(savedir + ' does not exist. Creating directory...') os.makedirs(savedir) print('Saving files...') for df, hrs in zip([df_two, df_four, df_six, df_eight], ['0-2hrs', '2-4hrs', '4-6hrs', '6-8hrs']): date = df.index[0].strftime('%Y-%m-%d') savename = in_num + '_' + date + '_s2_' + hrs + '.csv' df.to_csv(os.path.join(savedir, savename)) print(f'Files saved to {savedir}') else: return df_two df_four df_six df_eight print('Done')
61390b205c7dcbd65884e8f073f1b1395f1d1ca2
21,542
def valid_pairs(pairs, chain): """ Determine if the chain contains any invalid pairs (e.g. ETH_XMR) """ for primary, secondary in zip(chain[:-1], chain[1:]): if not (primary, secondary) in pairs and \ not (secondary, primary) in pairs: return False return True
c9e36d0490893e1b1a6cd8c3fb0b14b382d69515
21,543
import os import json def setup(sub_args, ifiles, repo_path, output_path): """Setup the pipeline for execution and creates config file from templates @param sub_args <parser.parse_args() object>: Parsed arguments for run sub-command @param repo_path <str>: Path to installation or source code and its templates @param output_path <str>: Pipeline output path, created if it does not exist @return config <dict>: Config dictionary containing metadata to run the pipeline """ # Check for mixed inputs, # inputs which are a mixture # of FastQ and BAM files mixed_inputs(ifiles) # Resolves PATH to reference file # template or a user generated # reference genome built via build # subcommand genome_config = os.path.join(repo_path,'config','genome.json') # if sub_args.genome.endswith('.json'): # Provided a custom reference genome generated by build pipline # genome_config = os.path.abspath(sub_args.genome) required = { # Base configuration file "base": os.path.join(repo_path,'config','config.json'), # Template for project-level information "project": os.path.join(repo_path,'config','containers.json'), # Template for genomic reference files # User provided argument --genome is used to select the template "genome": genome_config, # Template for tool information "tools": os.path.join(repo_path,'config', 'modules.json'), } # Create the global or master config # file for pipeline, config.json config = join_jsons(required.values()) # uses templates in config/*.json config['project'] = {} config = add_user_information(config) config = add_rawdata_information(sub_args, config, ifiles) # Resolves if an image needs to be pulled # from an OCI registry or a local SIF exists config = image_cache(sub_args, config, repo_path) # Add other runtime info for debugging config['project']['version'] = __version__ config['project']['workpath'] = os.path.abspath(sub_args.output) git_hash = git_commit_hash(repo_path) config['project']['git_commit_hash'] = git_hash # Add latest git commit hash config['project']['pipeline_path'] = repo_path # Add path to installation # Add all cli options for data provenance for opt, v in vars(sub_args).items(): if opt == 'func': # Pass over sub command's handler continue elif not isinstance(v, (list, dict)): # CLI value can be converted to a string v = str(v) config['options'][opt] = v # Save config to output directory with open(os.path.join(output_path, 'config.json'), 'w') as fh: json.dump(config, fh, indent = 4, sort_keys = True) return config
30a45285d09ba2d7e919c918fa2e67f67c3b57e6
21,544
from typing import Any def fqname_for(obj: Any) -> str: """ Returns the fully qualified name of ``obj``. Parameters ---------- obj The class we are interested in. Returns ------- str The fully qualified name of ``obj``. """ if "<locals>" in obj.__qualname__: raise RuntimeError( "Can't get fully qualified name of locally defined object. " f"{obj.__qualname__}" ) return f"{obj.__module__}.{obj.__qualname__}"
6d4e5db255715c999d1bb40533f3dbe03b948b07
21,545
import time import io import zipfile import os def analyzer_zipfile(platform, monitor): """Creates the Zip file that is sent to the Guest.""" t = time.time() zip_data = io.BytesIO() zip_file = zipfile.ZipFile(zip_data, "w", zipfile.ZIP_STORED) # Select the proper analyzer's folder according to the operating # system associated with the current machine. root = cwd("analyzer", platform) root_len = len(os.path.abspath(root)) if not os.path.exists(root): log.error("No valid analyzer found at path: %s", root) raise CuckooGuestError( "No valid analyzer found for %s platform!" % platform ) # Walk through everything inside the analyzer's folder and write # them to the zip archive. for root, dirs, files in os.walk(root): archive_root = os.path.abspath(root)[root_len:] for name in files: path = os.path.join(root, name) archive_name = os.path.join(archive_root, name) zip_file.write(path, archive_name) # Include the chosen monitoring component and any additional files. if platform == "windows": dirpath = cwd("monitor", monitor) # Generally speaking we should no longer be getting symbolic links for # "latest" anymore, so in the case of a file; follow it. if os.path.isfile(dirpath): monitor = os.path.basename(open(dirpath, "rb").read().strip()) dirpath = cwd("monitor", monitor) for name in os.listdir(dirpath): zip_file.write( os.path.join(dirpath, name), os.path.join("bin", name) ) # Dump compiled "dumpmem" Yara rules for zer0m0n usage. zip_file.write(cwd("stuff", "dumpmem.yarac"), "bin/rules.yarac") zip_file.close() data = zip_data.getvalue() if time.time() - t > 10: log.warning( "It took more than 10 seconds to build the Analyzer Zip for the " "Guest. This might be a serious performance penalty. Is your " "analyzer/windows/ directory bloated with unnecessary files?" ) return data
4c302ce81f1a3a18bfda9681e1761e246a6de42a
21,546
def symbol_size(values): """ Rescale given values to reasonable symbol sizes in the plot. """ max_size = 50.0 min_size = 5.0 # Rescale max. slope = (max_size - min_size)/(values.max() - values.min()) return slope*(values - values.max()) + max_size
a33f77ee8eeff8d0e63035c5c408a0788b661886
21,547
from datetime import datetime def delete(id): """Soft delete a patient.""" check_patient_permission(id) patient = Patient.query.get(id) patient.deleted = datetime.datetime.now() patient.deleted_by = current_user db.session.commit() return redirect(url_for('screener.index'))
0e9c984bb8bf8429c662f1af14945089789b8bc8
21,548
import os import tempfile def system_temp_dir(): """ Return the global temp directory for the current user. """ temp_dir = os.getenv('SCANCODE_TMP') if not temp_dir: sc = text.python_safe_name('scancode_' + system.username) temp_dir = os.path.join(tempfile.gettempdir(), sc) create_dir(temp_dir) return temp_dir
14c82a59d049068042d10845f63c61f53879259b
21,549
from typing import List from typing import Callable from typing import Union import os import click def _pipeline_network_multiple_database(database: List[str], kernel_method: Callable, filter_network_omic: Union[List, str]) -> Union[Matrix, str]: """Process network for a multiple database.""" network = None db_norm = frozenset([db.lower().replace(' ', '_') for db in database]) if db_norm in list(PATHME_MAPPING.keys()): db_norm = PATHME_MAPPING[db_norm] kernels_db_path = os.path.join(DEFAULT_DIFFUPATH_DIR, 'kernels', 'pathme') kernels_files_list = get_or_create_dir(kernels_db_path) for kernel in kernels_files_list: if db_norm in kernel or db_norm == kernel: network = os.path.join(DEFAULT_DIFFUPATH_DIR, 'kernels', 'by_db', f'{db_norm}.pickle') break if not network: network = os.path.join(DEFAULT_DIFFUPATH_DIR, 'kernels', 'by_db', f'{db_norm}.pickle') GoogleDriveDownloader.download_file_from_google_drive(file_id=DATABASE_LINKS[db_norm], dest_path=network, unzip=True) else: intersecc_db = db_norm.intersection(PATHME_DB) intersecc_db_str = '' for db_name in intersecc_db: intersecc_db_str += f'_{db_name}' if intersecc_db: kernels_db_path = os.path.join(DEFAULT_DIFFUPATH_DIR, 'kernels', 'by_db') kernels_files_list = get_or_create_dir(kernels_db_path) for kernel_file in kernels_files_list: if intersecc_db_str == kernel_file: network = os.path.join(DEFAULT_DIFFUPATH_DIR, 'kernels', 'by_db', f'{intersecc_db_str}.pickle') break if not network: graph_db_path = os.path.join(DEFAULT_DIFFUPATH_DIR, 'graphs', 'by_db') graphs_files_list = get_or_create_dir(graph_db_path) if graphs_files_list: for graph_file in graphs_files_list: if f'{intersecc_db_str}.pickle' == graph_file: network = os.path.join(DEFAULT_DIFFUPATH_DIR, 'graphs', 'by_db', f'{intersecc_db_str}.pickle') break if not network: graph = process_graph_from_file(GRAPH_PATH) network = get_subgraph_by_annotation_value(graph, 'database', intersecc_db ) to_pickle(network, os.path.join(DEFAULT_DIFFUPATH_DIR, 'graphs', 'by_db', f'{intersecc_db_str}.pickle')) if not filter_network_omic: click.secho(f'{EMOJI}Generating kernel from {GRAPH_PATH} {EMOJI}') network = get_kernel_from_graph(network, kernel_method) click.secho(f'{EMOJI}Kernel generated {EMOJI}') to_pickle(network, os.path.join(DEFAULT_DIFFUPATH_DIR, 'kernels', 'by_db', f'{db_norm}.pickle')) else: raise ValueError( 'Subgraph filtering by database only supported for PathMe network (KEGG, Reactome and Wikipathways).') return network
60f454aee74f5d2779d3a54fdcfb119637c73919
21,550
import torch import tensorflow as tf def _to_tensor(args, data): """Change data to tensor.""" if vega.is_torch_backend(): data = torch.tensor(data) if args.device == "GPU": return data.cuda() else: return data elif vega.is_tf_backend(): data = tf.convert_to_tensor(data) return data
77d87982c81232bac4581eb5269629c268a7fe16
21,551
def materialize_jupyter_deployment( config: ClusterConfig, uuid: str, definition: DeploymentDefinition) -> JupyterDeploymentImpl: # noqa """Materializes the Jupyter deployment definition. :param config: Cluster to materialize the Jupyter deployment with. :param uuid: Unique deployment id. :param definition: Deployment definition to materialize. """ jupyter_deployment = deserialize_jupyter_deployment_impl( config=config, uuid=uuid, serialized=definition.value) return jupyter_deployment
d4a12efd7d4f55d5261734cf3eb0dd3b230c363d
21,552
def _CreateLSTMPruneVariables(lstm_obj, input_depth, h_depth): """Function to create additional variables for pruning.""" mask = lstm_obj.add_variable( name="mask", shape=[input_depth + h_depth, 4 * h_depth], initializer=tf.ones_initializer(), trainable=False, dtype=lstm_obj.dtype) threshold = lstm_obj.add_variable( name="threshold", shape=[], initializer=tf.zeros_initializer(), trainable=False, dtype=lstm_obj.dtype) # Add old_weights, old_old_weights, gradient for gradient # based pruning. old_weight = lstm_obj.add_variable( name="old_weight", shape=[input_depth + h_depth, 4 * h_depth], initializer=tf.zeros_initializer(), trainable=False, dtype=lstm_obj.dtype) old_old_weight = lstm_obj.add_variable( name="old_old_weight", shape=[input_depth + h_depth, 4 * h_depth], initializer=tf.zeros_initializer(), trainable=False, dtype=lstm_obj.dtype) gradient = lstm_obj.add_variable( name="gradient", shape=[input_depth + h_depth, 4 * h_depth], initializer=tf.zeros_initializer(), trainable=False, dtype=lstm_obj.dtype) return mask, threshold, old_weight, old_old_weight, gradient
398dd89a9b8251f11aef3ba19523e26861ff5874
21,553
def get_index_fredkin_gate(N, padding = 0): """Get paramaters for log2(N) Fredkin gates Args: - N (int): dimensional of states - padding (int, optional): Defaults to 0. Returns: - list of int: params for the second and third Frekin gates """ indices = [] for i in range(0, int(np.log2(N))): indices.append(2**i + padding) return indices
d7ab1f4bc414ad741533d5fabfb0f7c8b4fe0959
21,554
def import_by_name(name): """ 动态导入 """ tmp = name.split(".") module_name = ".".join(tmp[0:-1]) obj_name = tmp[-1] module = __import__(module_name, globals(), locals(), [obj_name]) return getattr(module, obj_name)
714ca90704d99a8eafc8db08a5f3df8e17bc6da4
21,555
def f1_score(y_true, y_pred): """F-measure.""" p = precision(y_true, y_pred) r = true_positive_rate(y_true, y_pred) return 2 * (p * r) / (p + r)
e5f79def2db902bb0aa1efd9ea1ccef52b62072a
21,556
def hexColorToInt(rgb): """Convert rgb color string to STK integer color code.""" r = int(rgb[0:2],16) g = int(rgb[2:4],16) b = int(rgb[4:6],16) color = format(b, '02X') + format(g, '02X') + format(r, '02X') return int(color,16)
59b8815d647b9ca3e90092bb6ee7a0ca19dd46c2
21,557
import torch def test(model, X, model_type, test_type, counter=False): """Test functions.""" if model_type == 'notear-mlp': X = np.vstack(X) y = model(torch.from_numpy(X)) y = y.cpu().detach().numpy() mse = mean_squared_loss(y.shape[0], y[:, 0], X[:, 0]) elif model_type == 'notear-castle': X = np.vstack(X) y = model(torch.from_numpy(X)) y = y.cpu().detach().numpy() mse = mean_squared_loss(y.shape[0], y[:, 0], X[:, 0]) elif model_type == 'ISL': y = model.test(X) mse = mean_squared_loss(y.shape[0] * y.shape[1], y, X[:, :, 0][:, :, np.newaxis]) if not counter: if test_type == 'ID': metrics[f'{model_type}_testID_MSE'] = mse elif test_type == 'OOD': metrics[f'{model_type}_testOOD_MSE'] = mse else: if test_type == 'ID': metrics[f'{model_type}_counter_testID_MSE'] = mse elif test_type == 'OOD': metrics[f'{model_type}_counter_testOOD_MSE'] = mse return mse
cb98e2096052270e786bbb81fafc328076b1aa40
21,558
def scale(): """ Returns class instance of `Scale`. For more details, please have a look at the implementations inside `Scale`. Returns ------- Scale : Class instance implementing all 'scale' processes. """ return Scale()
f5fb9daf9baaf86674be110aae78b1bf91f09371
21,559
def imread_rgb(filename): """Read image file from filename and return rgb numpy array""" bgr = cv2.imread(filename) rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB) return rgb
6fcef3f9f5d8b02c28c596f706f3e1fcf685dd24
21,560
def insert_at_index(rootllist, newllist, index): """ Insert newllist in the llist following rootllist such that newllist is at the provided index in the resulting llist""" # At start if index == 0: newllist.child = rootllist return newllist # Walk through the list curllist = rootllist for i in range(index-1): curllist = curllist.child # Insert newllist.last().child=curllist.child curllist.child=newllist return rootllist
767cde29fbc711373c37dd3674655fb1bdf3fedf
21,561
def kpi_value(request, body): """kpi值接口 根据 indicator 传入参数不同请求不同的 handler""" params = { "indicator": body.indicator } handler = KpiFactory().create_handler(params["indicator"]) result = handler(params=params) return DashboardResult(content=result)
5f242028d7f95b1ffa81690c3ed1b1d7006cf97c
21,562
def safeReplaceOrder( references ): """ When inlining a variable, if multiple instances occur on the line, then the last reference must be replaced first. Otherwise the remaining intra-line references will be incorrect. """ def safeReplaceOrderCmp(self, other): return -cmp(self.colno, other.colno) result = list(references) result.sort(safeReplaceOrderCmp) return result
dae29bf1c8da84c77c64210c4d897ac4a9d0c098
21,563
from typing import Union from typing import Iterable import time from typing import List from typing import Any import requests def broken_link_finder(urls: Union[str, list, tuple, set], print_to_console: bool = False, file_out = None, viewer = DEFAULT_CSV_VIEWER, open_results_when_done = True, exclude_prefixes: Iterable = EXCLUDE_LINKS_STARTING_WITH): """ Checks for broken links on a specific web page(s) as specified by the urls argument. :param urls: the url or urls to check. :param print_to_console: True / False -- print each link to console while checking. :param file_out: if not None, name of file to which to write broken link checker output :param viewer: program to use to open and view the results (csv file) :param open_results_when_done: True/False :param exclude_prefixes: list-like :return: list of sets of broken_urls, local_urls, foreign_urls, processed_urls """ start_time = time.time() working_urls: List[Any] = [] broken_urls: List[Any] = [] if type(exclude_prefixes) == str: exclude_prefixes = [exclude_prefixes] if 'mailto' not in exclude_prefixes: exclude_prefixes = list(exclude_prefixes) exclude_prefixes.append('mailto') if type(urls) == str: urls = [urls] links = [] for url in urls: lst = get_links_from_webpage(url, full_links = True, exclude_prefixes = exclude_prefixes) links += lst['urls'] # remove duplicates links = set(links) tot = len(links) cnt = 0 for link in links: if print_to_console: cnt += 1 print(f'Checking link {cnt} of {tot}: {link}') try: # TODO: should probably leverage link_check instead of repeating code. # compare the code in this function and link_check to see what's up. head = requests.head(link) success = head.ok status = "Retrieved header from: {link}" try: response = requests.get(link) success = response.ok status = "Received response from: {link}" except Exception as e: success = False status = f"{e}. Retrieved header but failed to open page: {link}" except Exception as e: success = False status = f"{e}. Failed to retrieved header from: {link}" if link.startswith('ftp:'): # get stats from ftp server stats = ez_ftp.stats(link) filename = stats.basename file_size = stats.size # filename, file_size = ftp_file_size(link) if not filename: success = False status = f"FTP file not found: {link}" if type(filename) == str: if file_size > 0: success = True status = f"FTP file found: {link}" else: success = False status = f"FTP file is empty: {link}" else: success = False status = f"{file_size}. FTP file: {link}" if success: # found a broken link working_urls.append((link, success, status)) else: broken_urls.append((link, success, status)) processed_urls = working_urls + broken_urls if file_out: df = pd.DataFrame(data = processed_urls, index = None, columns = ['link', 'success', 'header']) df.to_csv(path_or_buf = file_out, sep = ',', header = True) if open_results_when_done: # open result file in viewer text editor). view_file(filename = file_out, viewer = viewer) # Done recursive loop. Report results. stop_time = time.time() run_time = stop_time - start_time if print_to_console: print(f'\n\nChecked: {len(processed_urls)} links in {run_time} seconds') print(f'\nFound {len(broken_urls)} BROKEN LINKS: \n', broken_urls) # Return results. ReturnTuple = namedtuple('ReturnTuple', 'processed_urls broken_urls run_time') return ReturnTuple(processed_urls, broken_urls, run_time)
2cbbe3c40d2d78e68144c03dcb93f2f2ac6feb83
21,564
def clean_value(value: str) -> t.Union[int, float, str]: """Return the given value as an int or float if possible, otherwise as the original string.""" try: return int(value) except ValueError: pass try: return float(value) except ValueError: pass return value
52c09e2aaf77cb22e62f47e11226350112390eb2
21,565
import types def sumstat(*L): """ Sums a list or a tuple L Modified from pg 80 of Web Programming in Python """ if len(L) == 1 and \ ( isinstance(L[0],types.ListType) or \ isinstance (L[0], types.TupleType) ) : L = L[0] s = 0.0 for k in L: s = s + k return s
c37aa0aa0b7dbf6adbe77d82b27b25e469891795
21,566
def halref_to_data_url(halref: str) -> str: """ Given a HAL or HAL-data document URIRef, returns the corresponding HAL-data URL halref: str HAL document URL (Most important!) https://hal.archives-ouvertes.fr/hal-02371715v2 -> https://data.archives-ouvertes.fr/document/hal-02371715v2 https://data.archives-ouvertes.fr/document/hal-02371715v2.rdf -> https://data.archives-ouvertes.fr/document/hal-02371715v2.rdf https://data.archives-ouvertes.fr/document/hal-02371715 -> https://data.archives-ouvertes.fr/document/hal-02371715 """ parsed_ref = urlparse(halref) assert "archives-ouvertes.fr" in parsed_ref.netloc, "Expected HAL (or HAL-data) document URL" if "hal.archives-ouvertes.fr" in parsed_ref.netloc: parsed_ref = parsed_ref._replace(netloc="data.archives-ouvertes.fr", path=f"/document{parsed_ref.path}") return urlunparse(parsed_ref)
48ef6629fc3198af2c8004a3dbcbbde6e700cb12
21,567
def find_best_rate(): """ Input: Annual salary, semi-annual raise, cost of home Assumes: a time frame of three years (36 months), a down payment of 25% of the total cost, current savings starting from 0 and annual return of 4% Returns the best savings rate within (plus/minus) $100 of the downpayment, and bisection search else returns false if result is not possible """ annual_salary = float(input("Enter your annual salary: ")) total_cost = float(1000000) semi_annual_raise = float(0.07) monthly_salary = annual_salary/12 r = 0.04 down_payment = 0.25 * total_cost current_savings = 0 time = 36 epsilon = 100 low = 0 high = 10000 savings_rate = (low + high)//2 num = 0 while abs(current_savings - down_payment) >= epsilon: mod_annual_salary = annual_salary #The annual salary we will use to modify/ make changes current_savings = 0 portion_saved = savings_rate/10000 #Converting our floor/ int division to decimal (as a portion to save) for month in range(1, time+1): if month % 6 == 0: mod_annual_salary += (annual_salary * semi_annual_raise) monthly_salary = mod_annual_salary/12 monthly_savings = monthly_salary * portion_saved additional = monthly_savings + (current_savings * r/12) #Additional return considering monthly and current savings current_savings += additional #Bisection search if current_savings < down_payment: low = savings_rate else: high = savings_rate savings_rate = (low + high)//2 num += 1 if num > 15: #Log_2 (10000) is 13.28... it will not make sense to keep searching after this point break if num < 15: print("Best Savings Rate: {} or {}%".format(portion_saved, portion_saved*100)), print("Steps in bisection Search: {}".format(num)) return portion_saved else: return("It is not possible to pay the down payment in three years")
451fc72c006182b63233376a701b4cbb855ad39a
21,568
def q_inv(a): """Return the inverse of a quaternion.""" return [a[0], -a[1], -a[2], -a[3]]
e8d06e7db6d5b23efab10c07f4b9c6088190fa07
21,569
def divide_hex_grid_flower(points, hex_radius=None): """Partitions a hexagonal grid into a flower pattern (this is what I used for the final product. Returns a list of partition indices for each point.""" if hex_radius is None: # copied from build_mirror_array() mini_hex_radius = (10 * 2.5 / 2) + 1 hex_radius = mini_hex_radius * 1.1 points = np.array(points) # Divide into quarters partition_indices = np.ones(len(points)) * -1 for i, point in enumerate(points): x, y, z = point if np.sqrt(x**2 + y**2) <= 3 * (2*hex_radius + 1) * np.sqrt(3)/2: partition_indices[i] = 0 else: θ = np.arctan2(x,y) + pi - 1e-10 partition_indices[i] = 1 + np.floor(6 * θ / (2 * pi)) return partition_indices
9f77e85d4bbfd00ea5eff6905209aad84e3a9191
21,570
def fis_gauss2mf(x:float, s1:float, c1:float, s2:float, c2:float): """Split Gaussian Member Function""" t1 = 1.0 t2 = 1.0 if x < c1: t1 = fis_gaussmf(x, s1, c1) if x > c2: t2 = fis_gaussmf(x, s2, c2) return (t1 * t2)
443e02dff7ab3827ac0006443964f45a6f9f4ce2
21,571
def _is_trigonal_prism(vectors, dev_cutoff=15): """ Triangular prisms are defined by 3 vertices in a triangular pattern on two aligned planes. Unfortunately, the angles are dependent on the length and width of the prism. Need more examples to come up with a better way of detecting this shape. For now, this code is experimental. Parameters ---------- vectors : list scitbx.matrix.col dev_cutoff : float, optional Returns ------- bool """ if len(vectors) != 6: return angles = _bond_angles(vectors) a_85s, a_135s = [], [] for angle in angles: if abs(angle[-1] - 85) < abs(angle[-1] - 135): a_85s.append(angle[-1] - 85) else: a_135s.append(angle[-1] - 135) if len(a_85s) != 9 and len(a_135s) != 6: return deviation = sqrt(sum(i ** 2 for i in a_85s + a_135s) / len(angles)) if deviation < dev_cutoff: return deviation, 6 - len(vectors)
2b3ffb318b3201923828eea8f4769a6ce854dd58
21,572
import os def get_sza(times, rad, mask=None): """ Fetch sza at all range cell in radar FoV rad: Radar code mask: mask metrix """ fname = "data/sim/{rad}.geolocate.data.nc.gz".format(rad=rad) os.system("gzip -d " + fname) fname = fname.replace(".gz","") data = Dataset(fname) lat, lon = data["geo_lat"], data["geo_lon"] sza = [] for d in times: sza.append(get_altitude(lat, lon, d)) sza = np.array(sza) os.system("gzip " + fname) return sza
2b3ecd5c07ba00cbaae3274ade46925a5ad377dc
21,573
def load_results(path): """ return a dictionary of columns can't use genfromtex because of weird format for arrays that I used :param path: :return: """ data = defaultdict(list) column_casts = { "epoch": float, "env_name": str, "game_counter": int, "game_length": int, "score_red": float, "score_green": float, "score_blue": float, "wall_time": float, "date_time": float } # load in data step_counter = 0 player_count = None with open(path, "r") as f: header = f.readline() column_names = [name.strip() for name in header.split(",")] infer_epoch = "epoch" not in column_names for line in f: row = line.split(",") for name, value, in zip(column_names, row): if name in column_casts: value = column_casts[name](value) else: value = str(value) data[name] += [value] # fix a bug with a specific version of rescue game if "stats_voted_offplayer_count" in data: data["player_count"] = data["stats_voted_offplayer_count"] if player_count is None: player_count = sum([int(x) for x in data["player_count"][0].split(" ")]) step_counter += data["game_length"][-1] * player_count # convert the team stats to single columns for i, hit in enumerate(int(x) for x in str(data["stats_player_hit"][-1]).split(" ")): if vs_order[i] not in data: data[vs_order[i]] = [] data[vs_order[i]] += [hit] # convert the team stats to single columns if "stats_player_hit_with_witness" in data: for i, hit in enumerate(int(x) for x in str(data["stats_player_hit_with_witness"][-1]).split(" ")): key = vs_order[i]+"_ww" if key not in data: data[key] = [] data[key] += [hit] # convert the team stats to single columns for stat in ["deaths", "kills", "general_shot", "general_moved", "general_hidden", "tree_harvested"]: stats_name = f"stats_{stat}" if stats_name not in data: continue for team, value in zip("RGB", (int(x) for x in str(data[stats_name][-1]).split(" "))): field_name = f"{team}_{stat}" data[field_name] += [value] # convert the team stats to single columns for stat in ["votes"]: stats_name = f"stats_{stat}" if stats_name not in data: continue for team, value in zip("RGBT", (int(x) for x in str(data[stats_name][-1]).split(" "))): field_name = f"{team}_{stat}" data[field_name] += [value] if infer_epoch: data["epoch"].append(float(step_counter)/1e6) # make epoch an into to group better data["epoch"][-1] = round(data["epoch"][-1], 1) return data
a4015d37d41353d41c5044d84249ad2e0700d7dc
21,574
def priority(n=0): """ Sets the priority of the plugin. Higher values indicate a higher priority. This should be used as a decorator. Returns a decorator function. :param n: priority (higher values = higher priority) :type n: int :rtype: function """ def wrapper(cls): cls._plugin_priority = n return cls return wrapper
58ab19fd88e9e293676943857a0fa04bf16f0e93
21,575
import math def vecangle(u,v): """ Calculate as accurately as possible the angle between two 3-component vectors u and v. This formula comes from W. Kahan's advice in his paper "How Futile are Mindless Assessments of Roundoff in Floating-Point Computation?" (https://www.cs.berkeley.edu/~wkahan/Mindless.pdf), section 12 "Mangled Angles." θ=2 atan2(|| ||v||u−||u||v ||, || ||v||u+||u||v ||) """ modu = modvec(u) modv = modvec(v) vmodu = [modu*v[0] , modu*v[1], modu*v[2] ] umodv = [modv*u[0] , modv*u[1], modv*u[2] ] term1 = [umodv[0]-vmodu[0], umodv[1]-vmodu[1], umodv[2]-vmodu[2]] modterm1 = modvec(term1) term2 = [umodv[0]+vmodu[0], umodv[1]+vmodu[1], umodv[2]+vmodu[2]] modterm2 = modvec(term2) return (2.0*math.atan2(modterm1,modterm2))
dde6ebed830130f122b0582d4c19963a061a3d31
21,576
def sanitize_option(option): """ Format the given string by stripping the trailing parentheses eg. Auckland City (123) -> Auckland City :param option: String to be formatted :return: Substring without the trailing parentheses """ return ' '.join(option.split(' ')[:-1]).strip()
ece0a78599e428ae8826b82d7d00ffc39495d27f
21,577
def node_values_for_tests(): """Creates a list of possible node values for parameters Returns: List[Any]: possible node values """ return [1, 3, 5, 7, "hello"]
b919efc5e59a5827b3b27e4f0a4cd070ceb9a5a4
21,578
import torch def computeGramMatrix(A, B): """ Constructs a linear kernel matrix between A and B. We assume that each row in A and B represents a d-dimensional feature vector. Parameters: A: a (n_batch, n, d) Tensor. B: a (n_batch, m, d) Tensor. Returns: a (n_batch, n, m) Tensor. """ assert(A.dim() == 3) assert(B.dim() == 3) assert(A.size(0) == B.size(0) and A.size(2) == B.size(2)) return torch.bmm(A, B.transpose(1,2))
c9b221b3d6a8c7a16337178a1f148873b27ec04a
21,579
def parse_config(config): """Backwards compatible parsing. :param config: ConfigParser object initilized with nvp.ini. :returns: A tuple consisting of a control cluster object and a plugin_config variable. raises: In general, system exceptions are not caught but are propagated up to the user. Config parsing is still very lightweight. At some point, error handling needs to be significantly enhanced to provide user friendly error messages, clean program exists, rather than exceptions propagated to the user. """ # Extract plugin config parameters. try: failover_time = config.get('NVP', 'failover_time') except ConfigParser.NoOptionError, e: failover_time = str(DEFAULT_FAILOVER_TIME) try: concurrent_connections = config.get('NVP', 'concurrent_connections') except ConfigParser.NoOptionError, e: concurrent_connections = str(DEFAULT_CONCURRENT_CONNECTIONS) plugin_config = { 'failover_time': failover_time, 'concurrent_connections': concurrent_connections, } LOG.info('parse_config(): plugin_config == "%s"' % plugin_config) cluster = NVPCluster('cluster1') # Extract connection information. try: defined_connections = config.get('NVP', 'NVP_CONTROLLER_CONNECTIONS') for conn_key in defined_connections.split(): args = [config.get('NVP', 'DEFAULT_TZ_UUID')] args.extend(config.get('NVP', conn_key).split(':')) try: cluster.add_controller(*args) except Exception, e: LOG.fatal('Invalid connection parameters: %s' % str(e)) sys.exit(1) return cluster, plugin_config except Exception, e: LOG.info('No new style connections defined: %s' % e) # Old style controller specification. args = [config.get('NVP', k) for k in CONFIG_KEYS] try: cluster.add_controller(*args) except Exception, e: LOG.fatal('Invalid connection parameters.') sys.exit(1) return cluster, plugin_config
74689d11c1d610a9211dc5895ff42a8b8e2389ae
21,580
def deletable_proxy_user(request, onefs_client): """Get the name of an existing proxy user that it is ok to delete.""" return _deletable_proxy_user(request, onefs_client)
c7440099fe4435cf9b5b557253f7fb9563dc600c
21,581
import six def get_from_module(identifier, module_params, module_name, instantiate=False, kwargs=None): """The function is stolen from keras.utils.generic_utils. """ if isinstance(identifier, six.string_types): res = module_params.get(identifier) if not res: raise Exception('Invalid ' + str(module_name) + ': ' + str(identifier)) if instantiate and not kwargs: return res() elif instantiate and kwargs: return res(**kwargs) else: return res elif type(identifier) is dict: name = identifier.pop('name') res = module_params.get(name) if res: return res(**identifier) else: raise Exception('Invalid ' + str(module_name) + ': ' + str(identifier)) return identifier
406a1da5843feb8556bbd1802426b57e7a33b20d
21,582
def color_lerp(c1, c2, a): """Return the linear interpolation between two colors. ``a`` is the interpolation value, with 0 returing ``c1``, 1 returning ``c2``, and 0.5 returing a color halfway between both. Args: c1 (Union[Tuple[int, int, int], Sequence[int]]): The first color. At a=0. c2 (Union[Tuple[int, int, int], Sequence[int]]): The second color. At a=1. a (float): The interpolation value, Returns: Color: The interpolated Color. """ return Color._new_from_cdata(lib.TCOD_color_lerp(c1, c2, a))
96c950c447994a729c9eb4c18bdcc60976dbb675
21,583
def get_equations(points): """ Calculate affine equations of inputted points Input : 1 points : list of list ex : [[[x1, y1], [x2, y2]], [[xx1, yy1], [xx2, yy2]]] for 2 identified elements Contains coordinates of separation lines i.e. [[[start points x, y], [end points x, y]] [...], [...]] Output : 2 columns_a : list of list Contains all the a coefficients of an affine equation (y = ax + b) of all the calculated lines, in the same order as the input columns_b : list of list Contains all the b coefficients of an affine equation (y = ax + b) of the all the calculated lines, in the same order as the input""" columns_a, columns_b = [], [] # iterate throught points for k in points: # calculate the a coefficients of start and end separation lines of this element a1 = (k[0][1] - k[1][1])/(k[0][0] - k[1][0]) a2 = (k[2][1] - k[3][1])/(k[2][0] - k[3][0]) columns_a.append([a1, a2]) # then calculate the b coefficients of start and end separation lines # using the a coeff calculated before b1 = k[0][1] - a1*k[0][0] b2 = k[2][1] - a2*k[2][0] columns_b.append([b1, b2]) return (columns_a, columns_b)
4eea43aee8b5f9c63793daae0b28e3c8b4ce0929
21,584
def Temple_Loc(player, num): """temple location function""" player.coins -= num player.score += num player.donation += num # player = temple_bonus_check(player) for acheivements return (player)
dced7b9f23f63c0c51787291ab12701bd7021152
21,585
def indexGenomeFile(input, output): """Index STAR genome index file `input`: Input probes fasta file `output`: SAindex file to check the completion of STAR genome index """ #print input #print output base = splitext(input)[0] base = base + ".gtf" #print base gtfFile = base outputDir = proDir + "/result/Genome" print colored("Stage 4: Creating genome index file from the probe fasta file ....", "green") print input #print cpuNum result = tasks.index_db_file(input, outputDir, cpuNum, gtfFile) return result
2ebd981ebad97f68adb1043e9c06fd01dc270c10
21,586
import math def performance(origin_labels, predict_labels, deci_value, bi_or_multi=False, res=False): """evaluations used to evaluate the performance of the model. :param deci_value: decision values used for ROC and AUC. :param bi_or_multi: binary or multiple classification :param origin_labels: true values of the data set. :param predict_labels: predicted values of the data set. :param res: residue or not. """ if len(origin_labels) != len(predict_labels): raise ValueError("The number of the original labels must equal to that of the predicted labels.") if bi_or_multi is False: tp = 0.0 tn = 0.0 fp = 0.0 fn = 0.0 for i in range(len(origin_labels)): if res is True: if origin_labels[i] == 1 and predict_labels[i] == 1: tp += 1.0 elif origin_labels[i] == 1 and predict_labels[i] == 0: fn += 1.0 elif origin_labels[i] == 0 and predict_labels[i] == 1: fp += 1.0 elif origin_labels[i] == 0 and predict_labels[i] == 0: tn += 1.0 else: if origin_labels[i] == 1 and predict_labels[i] == 1: tp += 1.0 elif origin_labels[i] == 1 and predict_labels[i] == -1: fn += 1.0 elif origin_labels[i] == -1 and predict_labels[i] == 1: fp += 1.0 elif origin_labels[i] == -1 and predict_labels[i] == -1: tn += 1.0 try: sn = tp / (tp + fn) r = sn except ZeroDivisionError: sn, r = 0.0, 0.0 try: sp = tn / (fp + tn) except ZeroDivisionError: sp = 0.0 try: acc = (tp + tn) / (tp + tn + fp + fn) except ZeroDivisionError: acc = 0.0 try: mcc = (tp * tn - fp * fn) / math.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) except ZeroDivisionError: mcc = 0.0 try: auc = roc_auc_score(origin_labels, deci_value) except ValueError: # modify in 2020/9/13 auc = 0.0 try: p = tp / (tp + fp) except ZeroDivisionError: p = 0.0 try: f1 = 2 * p * r / (p + r) except ZeroDivisionError: f1 = 0.0 balance_acc = (sn + sp) / 2 return acc, mcc, auc, balance_acc, sn, sp, p, r, f1 else: correct_labels = 0.0 for elem in zip(origin_labels, predict_labels): if elem[0] == elem[1]: correct_labels += 1.0 acc = correct_labels / len(origin_labels) return acc
aac87e0bdc02b61ccb5136e04e1ac8b09e01ce65
21,587
def rotkehlchen_instance( uninitialized_rotkehlchen, database, blockchain, accountant, start_with_logged_in_user, start_with_valid_premium, function_scope_messages_aggregator, db_password, rotki_premium_credentials, accounting_data_dir, username, etherscan, ): """A partially mocked rotkehlchen instance""" initialize_mock_rotkehlchen_instance( rotki=uninitialized_rotkehlchen, start_with_logged_in_user=start_with_logged_in_user, start_with_valid_premium=start_with_valid_premium, msg_aggregator=function_scope_messages_aggregator, accountant=accountant, blockchain=blockchain, db_password=db_password, rotki_premium_credentials=rotki_premium_credentials, data_dir=accounting_data_dir, database=database, username=username, etherscan=etherscan, ) return uninitialized_rotkehlchen
144585d62c04f97aa7bcb7a355bd90f8ff001022
21,588
def store_inspection_outputs_df(backend, annotation_iterators, code_reference, return_value, operator_context): """ Stores the inspection annotations for the rows in the dataframe and the inspection annotations for the DAG operators in a map """ dag_node_identifier = DagNodeIdentifier(operator_context.operator, code_reference, backend.code_reference_to_description.get(code_reference)) annotations_df = build_annotation_df_from_iters(backend.inspections, annotation_iterators) annotations_df['mlinspect_index'] = range(1, len(annotations_df) + 1) inspection_outputs = {} for inspection in backend.inspections: inspection_outputs[inspection] = inspection.get_operator_annotation_after_visit() backend.dag_node_identifier_to_inspection_output[dag_node_identifier] = inspection_outputs return_value = MlinspectDataFrame(return_value) return_value.annotations = annotations_df return_value.backend = backend if "mlinspect_index" in return_value.columns: return_value = return_value.drop("mlinspect_index", axis=1) elif "mlinspect_index_x" in return_value.columns: return_value = return_value.drop(["mlinspect_index_x", "mlinspect_index_y"], axis=1) assert "mlinspect_index" not in return_value.columns assert isinstance(return_value, MlinspectDataFrame) return return_value
228a24a4d59162382b5a3ae7d8204e396b8c76dd
21,589
def switched (decorator): """decorator transform for switched decorations. adds start_fun and stop_fun methods to class to control fun""" @simple_decorator def new_decorator (fun): event = new_event() def inner_fun (self, *args): if args: event.wait() if threads_alive(): return fun(self, *args) def new_fun (self, *args): setattr(self, 'start_%s' % fun.__name__, event.set) setattr(self, 'stop_%s' % fun.__name__, event.clear) decorator(inner_fun)(self, *args) return new_fun return new_decorator
1996274fcaba2095b43f7d0da134abb59b2f7a56
21,590
def logistic_embedding0(k=1, dataset='epinions'): """using random embedding to train logistic Keyword Arguments: k {int} -- [folder] (default: {1}) dataset {str} -- [dataset] (default: {'epinions'}) Returns: [type] -- [pos_ratio, accuracy, f1_score0, f1_score1, f1_score2, auc_score] """ print('random embeddings') embeddings = np.random.rand(DATASET_NUM_DIC[dataset], EMBEDDING_SIZE) pos_ratio, accuracy, f1_score0, f1_score1, f1_score2, auc_score = common_logistic(dataset, k, embeddings, 'random') return pos_ratio, accuracy, f1_score0, f1_score1, f1_score2, auc_score
69b198c6a5f8a44681ccfee67b532b3d38d2ee44
21,591
def process_plus_glosses(word): """ Find all glosses with a plus inside. They correspond to one-phoneme affix sequences that are expressed by the same letter due to orthographic requirements. Replace the glosses and the morphemes. """ return rxPartsGloss.sub(process_plus_glosses_ana, word)
0678efc61d1af0ec75b8d0566866b305b6312448
21,592
from datetime import datetime def check_in_the_past(value: datetime) -> datetime: """ Validate that a timestamp is in the past. """ assert value.tzinfo == timezone.utc, "date must be an explicit UTC timestamp" assert value < datetime.now(timezone.utc), "date must be in the past" return value
a439295190bfa2b6d2d6de79c7dc074df562e9ed
21,593
import os def fixture(filename): """ Get the handle / path to the test data folder. """ return os.path.join(fixtures_dir, filename)
7a14e173993b3f226ca71ea335ee153512e3df7d
21,594
from typing import Dict def character_count_helper(results: Dict) -> int: """ Helper Function that computes character count for ocr results on a single image Parameters ---------- results: Dict (OCR results from a clapperboard instance) Returns ------- Int Number of words computed from OCR results """ count = 0 for element in results: words_list = element["text"].split(" ") for word in words_list: count += len(word) return count
b5bcba9d39b7b09a1a123fec034ab1f27b31d1eb
21,595
import pickle def from_pickle(input_path): """Read from pickle file.""" with open(input_path, 'rb') as f: unpickler = pickle.Unpickler(f) return unpickler.load()
4e537fcde38e612e22004007122130c545246afb
21,596
def PromptForRegion(available_regions=constants.SUPPORTED_REGION): """Prompt for region from list of available regions. This method is referenced by the declaritive iam commands as a fallthrough for getting the region. Args: available_regions: list of the available regions to choose from Returns: The region specified by the user, str """ if console_io.CanPrompt(): all_regions = list(available_regions) idx = console_io.PromptChoice( all_regions, message='Please specify a region:\n', cancel_option=True) region = all_regions[idx] log.status.Print('To make this the default region, run ' '`gcloud config set ai/region {}`.\n'.format(region)) return region
2298fde743219f59b5a36844d85e14929d1e2a1e
21,597
def update(x, new_x): """Update the value of `x` to `new_x`. # Arguments x: A `Variable`. new_x: A tensor of same shape as `x`. # Returns The variable `x` updated. """ return tf.assign(x, new_x)
363cd3232a57d4c2c946813874a5a3c613f9a8c9
21,598
def r1r2_to_bp(r1,r2,pl=0.01, pu=0.25): """ Convert uniform samling of r1 and r2 to impact parameter b and and radius ratio p following Espinoza 2018, https://iopscience.iop.org/article/10.3847/2515-5172/aaef38/meta Paramters: ----------- r1, r2: float; uniform parameters in from u(0,1) pl, pu: float; lower and upper limits of the radius ratio Return: ------- b, p: tuple; impact parameter and radius ratio """ assert np.all(0<r1) and np.all(r1<=1) and np.all(0<r2) and np.all(r2<=1), f"r1 and r2 needs to be u(0,1) but r1={r1}, r2={r2}" Ar = (pu-pl)/(2+pu+pl) if np.all(r1 > Ar): b = (1+pl) * (1 + (r1-1)/(1-Ar) ) p = (1-r2)*pl + r2*pu elif np.all(r1 <= Ar): q1 = r1/Ar b = (1+pl) + q1**0.5 * r2*(pu-pl) p = pu + (pl-pu)* q1**0.5*(1-r2) return b, p
0c7f69f3f7960792e8d0ecd75fde028eda9feefa
21,599