content
stringlengths
22
815k
id
int64
0
4.91M
def friable_sand(Ks, Gs, phi, phic, P_eff, n=-1, f=1.0): """ Friable sand rock physics model. Reference: Avseth et al., Quantitative Seismic Interpretation, p.54 Inputs: Ks = Bulk modulus of mineral matrix Gs = Shear modulus of mineral matrix phi = porosity phic = critical porosity P_eff = effective pressure n = coordination number f = shear reduction factor Outputs: K_dry = dry rock bulk modulus of friable rock G_dry = dry rock shear modulus of friable rock """ K_hm, G_hm = hertz_mindlin(Ks, Gs, phic, P_eff, n, f) z = G_hm/6 * (9*K_hm + 8*G_hm)/(K_hm + 2*G_hm) A = (phi/phic)/(K_hm + 4/3*G_hm) B = (1 - phi/phic)/(Ks + 4.0/3.0*G_hm) K_dry = (A+B)**-1 - 4.0/3.0*G_hm C = (phi/phic)/(G_hm+z) D = (1.0-phi/phic)/(Gs + z) G_dry = (C+D)**-1 - z return K_dry, G_dry
5,333,900
def get_available_currencies(): """ This function retrieves a listing with all the available currencies with indexed currency crosses in order to get to know which are the available currencies. The currencies listed in this function, so on, can be used to search currency crosses and used the retrieved data to get historical data of those currency crosses, so to determine which is the value of one base currency in the second currency. Returns: :obj:`list` - available_currencies: The resulting :obj:`list` contains all the available currencies with currency crosses being either the base or the second value of the cross, as listed in Investing.com. In case the listing was successfully retrieved, the :obj:`list` will look like:: available_currencies = [ 'AED', 'AFN', 'ALL', 'AMD', 'ANG', ... ] Raises: FileNotFoundError: raised if currency crosses file was not found. IOError: raised if currency crosses retrieval failed, both for missing file or empty file. """ return available_currencies_as_list()
5,333,901
def create_announcements(): """ fill MongoDB Announcements collection """ pass
5,333,902
def test_evaluate_with_labels_k2_r5_no_verbose(capsys): """Silently evaluate observation sequences with labels (k=2, r=5)""" acc, cm = clfs[1].evaluate(X, y, labels=labels, verbose=False) assert 'Classifying examples' not in capsys.readouterr().err assert isinstance(acc, float) assert isinstance(cm, np.ndarray) assert cm.shape == (5, 5)
5,333,903
def mktemp(suffix="", prefix=template, dir=None): """User-callable function to return a unique temporary file name. The file is not created. Arguments are as for mkstemp, except that the 'text' argument is not accepted. This function is unsafe and should not be used. The file name refers to a file that did not exist at some point, but by the time you get around to creating it, someone else may have beaten you to the punch. """ ## from warnings import warn as _warn ## _warn("mktemp is a potential security risk to your program", ## RuntimeWarning, stacklevel=2) if dir is None: dir = gettempdir() names = _get_candidate_names() for seq in xrange(TMP_MAX): name = names.next() file = _os.path.join(dir, prefix + name + suffix) if not _exists(file): return file raise IOError, (_errno.EEXIST, "No usable temporary filename found")
5,333,904
def getTaskIdentifier( task_id ) : """Get tuple of Type and Instance identifiers.""" _inst = Instance.objects.get( id = task_id ) return ( _inst.type.identifier , _inst.identifier )
5,333,905
def hessian_vector_product(loss, weights, v): """Compute the tensor of the product H.v, where H is the loss Hessian with respect to the weights. v is a vector (a rank 1 Tensor) of the same size as the loss gradient. The ordering of elements in v is the same obtained from flatten_tensor_list() acting on the gradient. Derivatives of dv/dweights should vanish. """ grad = flatten_tensor_list(tf.gradients(loss, weights)) grad_v = tf.reduce_sum(grad * tf.stop_gradient(v)) H_v = flatten_tensor_list(tf.gradients(grad_v, weights)) return H_v
5,333,906
def clean_cells(nb_node): """Delete any outputs and resets cell count.""" for cell in nb_node['cells']: if 'code' == cell['cell_type']: if 'outputs' in cell: cell['outputs'] = [] if 'execution_count' in cell: cell['execution_count'] = None return nb_node
5,333,907
def _spanned(scond: _SpanConductor) -> Callable[..., Any]: """Handle decorating a function with either a new span or a reused span.""" def inner_function(func: Callable[..., Any]) -> Callable[..., Any]: def setup(args: Args, kwargs: Kwargs) -> Span: if not isinstance(scond, (_NewSpanConductor, _ReuseSpanConductor)): raise Exception(f"Undefined SpanConductor type: {scond}.") else: return scond.get_span(FunctionInspector(func, args, kwargs)) @wraps(func) def wrapper(*args: Any, **kwargs: Any) -> Any: LOGGER.debug("Spanned Function") span = setup(args, kwargs) is_iterator_class_next_method = span.name.endswith(".__next__") reraise_stopiteration_outside_contextmanager = False # CASE 1 ---------------------------------------------------------- if scond.behavior == SpanBehavior.ONLY_END_ON_EXCEPTION: try: with use_span(span, end_on_exit=False): try: return func(*args, **kwargs) except StopIteration: # intercept and temporarily suppress StopIteration if not is_iterator_class_next_method: raise reraise_stopiteration_outside_contextmanager = True except: # noqa: E722 # pylint: disable=bare-except span.end() raise if reraise_stopiteration_outside_contextmanager: raise StopIteration raise RuntimeError("Malformed SpanBehavior Handling") # CASES 2 & 3 ----------------------------------------------------- elif scond.behavior in (SpanBehavior.END_ON_EXIT, SpanBehavior.DONT_END): end_on_exit = bool(scond.behavior == SpanBehavior.END_ON_EXIT) with use_span(span, end_on_exit=end_on_exit): try: return func(*args, **kwargs) except StopIteration: # intercept and temporarily suppress StopIteration if not is_iterator_class_next_method: raise reraise_stopiteration_outside_contextmanager = True if reraise_stopiteration_outside_contextmanager: raise StopIteration raise RuntimeError("Malformed SpanBehavior Handling") # ELSE ------------------------------------------------------------ else: raise InvalidSpanBehavior(scond.behavior) @wraps(func) def gen_wrapper(*args: Any, **kwargs: Any) -> Any: LOGGER.debug("Spanned Generator Function") span = setup(args, kwargs) # CASE 1 ---------------------------------------------------------- if scond.behavior == SpanBehavior.ONLY_END_ON_EXCEPTION: try: with use_span(span, end_on_exit=False): for val in func(*args, **kwargs): yield val except: # noqa: E722 # pylint: disable=bare-except span.end() raise # CASES 2 & 3 ----------------------------------------------------- elif scond.behavior in (SpanBehavior.END_ON_EXIT, SpanBehavior.DONT_END): end_on_exit = bool(scond.behavior == SpanBehavior.END_ON_EXIT) with use_span(span, end_on_exit=end_on_exit): for val in func(*args, **kwargs): yield val # ELSE ------------------------------------------------------------ else: raise InvalidSpanBehavior(scond.behavior) @wraps(func) async def async_wrapper(*args: Any, **kwargs: Any) -> Any: LOGGER.debug("Spanned Async Function") span = setup(args, kwargs) is_iterator_class_anext_method = span.name.endswith(".__anext__") reraise_stopasynciteration_outside_contextmanager = False # CASE 1 ---------------------------------------------------------- if scond.behavior == SpanBehavior.ONLY_END_ON_EXCEPTION: try: with use_span(span, end_on_exit=False): try: return await func(*args, **kwargs) except StopAsyncIteration: # intercept and temporarily suppress StopAsyncIteration if not is_iterator_class_anext_method: raise reraise_stopasynciteration_outside_contextmanager = True except: # noqa: E722 # pylint: disable=bare-except span.end() raise if reraise_stopasynciteration_outside_contextmanager: raise StopAsyncIteration raise RuntimeError("Malformed SpanBehavior Handling") # CASES 2 & 3 ----------------------------------------------------- elif scond.behavior in (SpanBehavior.END_ON_EXIT, SpanBehavior.DONT_END): end_on_exit = bool(scond.behavior == SpanBehavior.END_ON_EXIT) with use_span(span, end_on_exit=end_on_exit): try: return await func(*args, **kwargs) except StopAsyncIteration: # intercept and temporarily suppress StopAsyncIteration if not is_iterator_class_anext_method: raise reraise_stopasynciteration_outside_contextmanager = True if reraise_stopasynciteration_outside_contextmanager: raise StopAsyncIteration raise RuntimeError("Malformed SpanBehavior Handling") # ELSE ------------------------------------------------------------ else: raise InvalidSpanBehavior(scond.behavior) if asyncio.iscoroutinefunction(func): return async_wrapper else: if inspect.isgeneratorfunction(func): return gen_wrapper else: return wrapper return inner_function
5,333,908
def filter_by_distance(junctions, min_distance, max_distance): """Yields the junction sites that have a distance less than equal max_distance""" for j in junctions: d = abs(j.descriptor[2] - j.descriptor[5]) if min_distance <= d and d <= max_distance: yield j
5,333,909
def getProjectProperties(): """ :return: @rtype: list of ProjectProperty """ return getMetDataLoader().projectProperties
5,333,910
def svn_client_cleanup(*args): """svn_client_cleanup(char dir, svn_client_ctx_t ctx, apr_pool_t scratch_pool) -> svn_error_t""" return _client.svn_client_cleanup(*args)
5,333,911
def model_chromatic(psrs, psd='powerlaw', noisedict=None, components=30, gamma_common=None, upper_limit=False, bayesephem=False, wideband=False, idx=4, chromatic_psd='powerlaw', c_psrs=['J1713+0747']): """ Reads in list of enterprise Pulsar instance and returns a PTA instantiated with model 2A from the analysis paper + additional chromatic noise for given pulsars per pulsar: 1. fixed EFAC per backend/receiver system 2. fixed EQUAD per backend/receiver system 3. fixed ECORR per backend/receiver system 4. Red noise modeled as a power-law with 30 sampling frequencies 5. Linear timing model. 6. Chromatic noise for given pulsar list global: 1.Common red noise modeled with user defined PSD with 30 sampling frequencies. Available PSDs are ['powerlaw', 'turnover' 'spectrum'] 2. Optional physical ephemeris modeling. :param psd: PSD to use for common red noise signal. Available options are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default value. :param noisedict: Dictionary of pulsar noise properties. Can provide manually, or the code will attempt to find it. :param gamma_common: Fixed common red process spectral index value. By default we vary the spectral index over the range [0, 7]. :param upper_limit: Perform upper limit on common red noise amplitude. By default this is set to False. Note that when perfoming upper limits it is recommended that the spectral index also be fixed to a specific value. :param bayesephem: Include BayesEphem model. Set to False by default :param wideband: Use wideband par and tim files. Ignore ECORR. Set to False by default. :param idx: Index of chromatic process (i.e DM is 2, scattering would be 4). If set to `vary` then will vary from 0 - 6 (This will be VERY slow!) :param chromatic_psd: PSD to use for chromatic noise. Available options are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default value. :param c_psrs: List of pulsars to use chromatic noise. 'all' will use all pulsars """ amp_prior = 'uniform' if upper_limit else 'log-uniform' # find the maximum time span to set GW frequency sampling Tspan = model_utils.get_tspan(psrs) # white noise s = white_noise_block(vary=False, wideband=wideband) # red noise s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components) # common red noise block s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan, components=components, gamma_val=gamma_common, name='gw') # ephemeris model if bayesephem: s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True) # timing model s += gp_signals.TimingModel() # chromatic noise sc = chromatic_noise_block(psd=chromatic_psd, idx=idx) if c_psrs == 'all': s += sc models = [s(psr) for psr in psrs] elif len(c_psrs) > 0: models = [] for psr in psrs: if psr.name in c_psrs: print('Adding chromatic model to PSR {}'.format(psr.name)) snew = s + sc models.append(snew(psr)) else: models.append(s(psr)) # set up PTA pta = signal_base.PTA(models) # set white noise parameters if noisedict is None: print('No noise dictionary provided!...') else: noisedict = noisedict pta.set_default_params(noisedict) return pta
5,333,912
def restore(backup_path: str, storage_name: str, target: str or None = None, token: str or None = None) -> str: """ Downloads the information from the backup :returns path to the file """ if not token: token = _restore_token(storage_name) print(f'[{__name__}] Getting storage...') storage_class = get_storage_by_name(storage_name) storage: Storage = storage_class(token=token) # Handle files that were saved on a normal basis remote_path_resource_id = backup_path.split('/')[-2] _, original_name = _decode_resource_id(remote_path_resource_id) # Handle files saved under /custom folder # pass if target is None: print(f'[{__name__}] Calculating local file path...') dl_target = f"{BASE_BACKUPS_DIRECTORY}/" + original_name + ".zip" target = f"{BASE_BACKUPS_DIRECTORY}/" + original_name if os.path.exists(target): raise ValueError(f"Path {target} is not empty. Please deal with it, then try to restore file again") else: raise NotImplementedError() print(f'[{__name__}] Downloading file...') storage.download_resource(backup_path, dl_target) try: print(f'[{__name__}] Unpacking file...') shutil.unpack_archive(dl_target, target, 'zip') return target finally: os.unlink(dl_target)
5,333,913
def test_f32(heavydb): """If UDF name ends with an underscore, expect strange behaviour. For instance, defining @heavydb('f32(f32)', 'f32(f64)') def f32_(x): return x+4.5 the query `select f32_(0.0E0))` fails but not when defining @heavydb('f32(f64)', 'f32(f32)') def f32_(x): return x+4.5 (notice the order of signatures in heavydb decorator argument). """ @heavydb('f32(f32)', 'f32(f64)') # noqa: F811 def f_32(x): return x+4.5 descr, result = heavydb.sql_execute( 'select f_32(0.0E0) from {heavydb.table_name} limit 1' .format(**locals())) assert list(result)[0] == (4.5,)
5,333,914
def get_message_bytes( file_path: Union[str, Path], count: int, ) -> bytes: """ 从 GRIB2 文件中读取第 count 个要素场,裁剪区域 (东北区域),并返回新场的字节码 Parameters ---------- file_path count 要素场序号,从 1 开始,ecCodes GRIB Key count Returns ------- bytes 重新编码后的 GRIB 2 消息字节码 """ message = load_message_from_file(file_path, count=count) message = extract_region( message, 0, 180, 89.875, 0.125 ) message_bytes = eccodes.codes_get_message(message) eccodes.codes_release(message) return message_bytes
5,333,915
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'): """ Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) length - Optional : character length of bar (Int) fill - Optional : bar fill character (Str) """ percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filled_length = int(length * iteration // total) p_bar = fill * filled_length + '-' * (length - filled_length) print('\r%s |%s| %s%% %s' % (prefix, p_bar, percent, suffix), end='\r') # Print New Line on Complete if iteration == total: print()
5,333,916
def lint(ctx, error=False): """Lint Robot Framework test data and Python code.""" print("Lint python") black_command = "black --config ./pyproject.toml assertionengine/ tasks.py atest/" isort_command = "isort assertionengine/" if error: black_command = f"{black_command} --check" isort_command = f"{isort_command} --check-only" ctx.run("mypy --config-file ./mypy.ini assertionengine/ utest/") ctx.run(black_command) ctx.run(isort_command) ctx.run("flake8 --config ./.flake8 assertionengine/ utest/")
5,333,917
def define_macos_utilities(): """ Set some environment variables for Darwin systems differently The variables are: READLINK, SED, DATE_UTIL and LN_UTIL """ if os.uname()[0] == 'Darwin': if check_darwin('greadlink'): set_env_var('READLINK','greadlink') if check_darwin('gsed'): set_env_var('SED','gsed') if check_darwin('gdate'): set_env_var('DATE_UTIL','gdate') if check_darwin('gln'): set_env_var('LN_UTIL','gln') else: set_env_var('READLINK','readlink') set_env_var('SED','sed') set_env_var('DATE_UTIL','date') set_env_var('LN_UTIL','ln')
5,333,918
def discover_climate_observations( time_resolution: Union[ None, str, TimeResolution, List[Union[str, TimeResolution]] ] = None, parameter: Union[None, str, Parameter, List[Union[str, Parameter]]] = None, period_type: Union[None, str, PeriodType, List[Union[str, PeriodType]]] = None, ) -> str: """ Function to print/discover available time_resolution/parameter/period_type combinations. :param parameter: Observation measure :param time_resolution: Frequency/granularity of measurement interval :param period_type: Recent or historical files :return: Result of available combinations in JSON. """ if not time_resolution: time_resolution = [*TimeResolution] if not parameter: parameter = [*Parameter] if not period_type: period_type = [*PeriodType] time_resolution = parse_enumeration(TimeResolution, time_resolution) parameter = parse_enumeration(Parameter, parameter) period_type = parse_enumeration(PeriodType, period_type) trp_mapping_filtered = { ts: { par: [p for p in pt if p in period_type] for par, pt in parameters_and_period_types.items() if par in parameter } for ts, parameters_and_period_types in TIME_RESOLUTION_PARAMETER_MAPPING.items() if ts in time_resolution } time_resolution_parameter_mapping = { str(time_resolution): { str(parameter): [str(period) for period in periods] for parameter, periods in parameters_and_periods.items() if periods } for time_resolution, parameters_and_periods in trp_mapping_filtered.items() if parameters_and_periods } return json.dumps(time_resolution_parameter_mapping, indent=4)
5,333,919
def set_template(template_name, file_name, p_name): """ Insert template into the E-mail. """ corp = template(template_name, file_name, p_name) msg = MIMEMultipart() msg['from'] = p_name msg['subject'] = f'{file_name}' msg.attach(MIMEText(corp, 'html')) return msg
5,333,920
def glVertex2dv(v): """ v - seq( GLdouble, 2) """ if 2 != len(v): raise TypeError(len(v), "2-array expected") _gllib.glVertex2dv(v)
5,333,921
def lazy_gettext(string): """A lazy version of `gettext`.""" if isinstance(string, _TranslationProxy): return string return _TranslationProxy(gettext, string)
5,333,922
def read_table(filepath_or_buffer: _io.BytesIO): """ usage.dask: 4 """ ...
5,333,923
def toggleautowithdrawalstatus(status, fid, alternate_token=False): """ Sets auto-withdrawal status of the account associated with the current OAuth token under the specified funding ID. :param status: Boolean for toggle. :param fid: String with funding ID for target account :return: String (Either "Enabled" or "Disabled") """ if not status: raise Exception('toggleautowithdrawlstatus() requires status parameter') if not fid: raise Exception('toggleautowithdrawlstatus() requires fid parameter') return r._post('/accounts/features/auto_withdrawl', { 'oauth_token': alternate_token if alternate_token else c.access_token, 'enabled': status, 'fundingId': fid })
5,333,924
def load_avenger_models(): """ Load each instance of data from the repository into its associated model at this point in the schema lifecycle """ avengers = [] for item in fetch_avenger_data(): # Explicitly assign each attribute of the model, so various attributes can be ignored avenger = Avenger(url=item.url, name=item.name, appearances=item.appearances, current=item.current == "YES", gender=item.gender, probationary=parse_date(item.probationary), full_reserve=parse_date(item.full_reserve, item.year), year=item.year, honorary=item.honorary, notes=item.notes) for occurrence in range(1, 6): # Iterate over the known indices of deaths (max in data range is 5) # If the death attribute exists and has a value, create a new Death instance and load the associated # instance data before adding it to the the list of deaths on the current avenger if getattr(item, f"death{occurrence}", None): avenger.deaths.append( Death(death=getattr(item, f"death{occurrence}") == "YES", # Convert string to boolean returned=getattr(item, f"return{occurrence}") == "YES", # Convert string to boolean sequence=occurrence) # Add the sequence of this death, order is important! ) else: break # If this is the last death, there is no reason to check subsequent iterations avengers.append(avenger) # Add this avenger to the list of avengers return avengers
5,333,925
def pytest_configure(config): """Scan for test files. Done here because other hooks tend to run once *per test*, and there's no reason to do this work more than once. """ global test_file_tuples global test_file_ids include_ruby = config.getoption('include_ruby') test_file_filter = config.getoption('test_file_filter') if test_file_filter: file_filters = [ re.compile(filt) for filt in test_file_filter.split(',') ] else: file_filters = [] # Tuples are 3-tuples of the form (scss filename, css filename, pytest # marker). That last one is used to carry xfail/skip, and is None for # regular tests. # "ids" are just names for the tests, in a parellel list. We just use # relative paths to the input file. test_file_tuples = [] test_file_ids = [] for fn in glob.glob(os.path.join(FILES_DIR, '*/*.scss')): relfn = os.path.relpath(fn, FILES_DIR) pytest_trigger = None if not include_ruby and ( relfn.startswith('from-sassc/') or relfn.startswith('from-ruby/')): pytest_trigger = pytest.skip elif relfn.startswith('xfail/'): pytest_trigger = pytest.xfail if file_filters and not any(rx.search(relfn) for rx in file_filters): pytest_trigger = pytest.skip test_file_tuples.append((fn, fn[:-5] + '.css', pytest_trigger)) test_file_ids.append(fn)
5,333,926
def aggregate_points(point_layer, bin_type=None, bin_size=None, bin_size_unit=None, polygon_layer=None, time_step_interval=None, time_step_interval_unit=None, time_step_repeat_interval=None, time_step_repeat_interval_unit=None, time_step_reference=None, summary_fields=None, output_name=None, gis=None, future=False): """ .. image:: _static/images/aggregate_points/aggregate_points.png This ``aggregate_points`` tool works with a layer of point features and a layer of areas. The layer of areas can be an input polygon layer or it can be square or hexagonal bins calculated when the task is run. The tool first determines which points fall within each specified area. After determining this point-in-area spatial relationship, statistics about all points in the area are calculated and assigned to the area. The most basic statistic is the count of the number of points within the area, but you can get other statistics as well. For example, suppose you have point features of coffee shop locations and area features of counties, and you want to summarize coffee sales by county. Assuming the coffee shops have a TOTAL_SALES attribute, you can get the sum of all TOTAL_SALES within each county, the minimum or maximum TOTAL_SALES within each county, or other statistics like the count, range, standard deviation, and variance. This tool can also work on data that is time-enabled. If time is enabled on the input points, then the time slicing options are available. Time slicing allows you to calculate the point-in area relationship while looking at a specific slice in time. For example, you could look at hourly intervals, which would result in outputs for each hour. For an example with time, suppose you had point features of every transaction made at a coffee shop location and no area layer. The data has been recorded over a year, and each transaction has a location and a time stamp. Assuming each transaction has a TOTAL_SALES attribute, you can get the sum of all TOTAL SALES within the space and time of interest. If these transactions are for a single city, we could generate areas that are one kilometer grids, and look at weekly time slices to summarize the transactions in both time and space. ================================================= ======================================================================== **Argument** **Description** ------------------------------------------------- ------------------------------------------------------------------------ point_layer Required point feature layer. The point features that will be aggregated into the polygons in the ``polygon_layer`` or bins of the specified ``bin_size``. See :ref:`Feature Input<FeatureInput>`. ------------------------------------------------- ------------------------------------------------------------------------ bin_type Optional string. If ``polygon_layer`` is not defined, it is required. The type of bin that will be generated and into which points will be aggregated. Choice list:['Square', 'Hexagon']. The default value is "Square". When generating bins for Square, the number and units specified determine the height and length of the square. For Hexagon, the number and units specified determine the distance between parallel sides. Either ``bin_type`` or ``polygon_layer`` must be specified. If ``bin_type`` is chosen, ``bin_size`` and ``bin_size_unit`` specifying the size of the bins must be included. ------------------------------------------------- ------------------------------------------------------------------------ bin_size (Required if ``bin_type`` is used) Optional float. The distance for the bins of type binType that the ``point_layer`` will be aggregated into. When generating bins, for Square, the number and units specified determine the height and length of the square. For Hexagon, the number and units specified determine the distance between parallel sides. ------------------------------------------------- ------------------------------------------------------------------------ bin_size_unit (Required if ``bin_size`` is used) Optional string. The distance unit for the bins that the ``point_layer`` will be aggregated into. Choice list:['Feet', 'Yards', 'Miles', 'Meters', 'Kilometers', 'NauticalMiles'] When generating bins for Square, the number and units specified determine the height and length of the square. For Hexagon, the number and units specified determine the distance between parallel sides. Either ``bin_type`` or ``polygon_layer`` must be specified. If ``bin_type`` is chosen, ``bin_size`` and ``bin_size_unit`` specifying the size of the bins must be included. ------------------------------------------------- ------------------------------------------------------------------------ polygon_layer Optional polygon feature layer. The polygon features (areas) into which the input points will be aggregated. See :ref:`Feature Input<FeatureInput>`. One of ``polygon_layer`` or bins ``bin_size`` and ``bin_size_unit`` is required. ------------------------------------------------- ------------------------------------------------------------------------ time_step_interval Optional integer. A numeric value that specifies duration of the time step interval. This option is only available if the input points are time-enabled and represent an instant in time. The default value is 'None'. ------------------------------------------------- ------------------------------------------------------------------------ time_step_interval_unit Optional string. A string that specifies units of the time step interval. This option is only available if the input points are time-enabled and represent an instant in time. Choice list:['Years', 'Months', 'Weeks', 'Days', 'Hours', 'Minutes', 'Seconds', 'Milliseconds'] The default value is 'None'. ------------------------------------------------- ------------------------------------------------------------------------ time_step_repeat_interval Optional integer. A numeric value that specifies how often the time step repeat occurs. This option is only available if the input points are time-enabled and of time type instant. ------------------------------------------------- ------------------------------------------------------------------------ time_step_repeat_interval_unit Optional string. A string that specifies the temporal unit of the step repeat. This option is only available if the input points are time-enabled and of time type instant. Choice list:['Years', 'Months', 'Weeks', 'Days', 'Hours', 'Minutes', 'Seconds', 'Milliseconds'] The default value is 'None'. ------------------------------------------------- ------------------------------------------------------------------------ time_step_reference Optional datetime. A date that specifies the reference time to align the time slices to, represented in milliseconds from epoch. The default is January 1, 1970, at 12:00 a.m. (epoch time stamp 0). This option is only available if the input points are time-enabled and of time type instant. ------------------------------------------------- ------------------------------------------------------------------------ summary_fields Optional list of dicts. A list of field names and statistical summary types that you want to calculate for all points within each polygon or bin. Note that the count of points within each polygon is always returned. By default, all statistics are returned. Example: [{"statisticType": "Count", "onStatisticField": "fieldName1"}, {"statisticType": "Any", "onStatisticField": "fieldName2"}] fieldName is the name of the fields in the input point layer. statisticType is one of the following for numeric fields: * ``Count`` -Totals the number of values of all the points in each polygon. * ``Sum`` -Adds the total value of all the points in each polygon. * ``Mean`` -Calculates the average of all the points in each polygon. * ``Min`` -Finds the smallest value of all the points in each polygon. * ``Max`` -Finds the largest value of all the points in each polygon. * ``Range`` -Finds the difference between the Min and Max values. * ``Stddev`` -Finds the standard deviation of all the points in each polygon. * ``Var`` -Finds the variance of all the points in each polygon. statisticType is one of the following for string fields: * ``Count`` -Totals the number of strings for all the points in each polygon. * ``Any` `-Returns a sample string of a point in each polygon. ------------------------------------------------- ------------------------------------------------------------------------ output_name Optional string. The method will create a feature service of the results. You define the name of the service. ------------------------------------------------- ------------------------------------------------------------------------ gis Optional, the GIS on which this tool runs. If not specified, the active GIS is used. ------------------------------------------------- ------------------------------------------------------------------------ context Optional dict. The context parameter contains additional settings that affect task execution. For this task, there are four settings: * Extent (``extent``) - a bounding box that defines the analysis area. Only those features that intersect the bounding box will be analyzed. * Processing spatial reference (``processSR``) The features will be projected into this coordinate system for analysis. * Output Spatial Reference (``outSR``) - the features will be projected into this coordinate system after the analysis to be saved. The output spatial reference for the spatiotemporal big data store is always WGS84. * Data store (``dataStore``) Results will be saved to the specified data store. The default is the spatiotemporal big data store. ------------------------------------------------- ------------------------------------------------------------------------ future optional Boolean. If True, a GPJob is returned instead of results. The GPJob can be queried on the status of the execution. ================================================= ======================================================================== :returns: result_layer : Output Features as feature layer item. .. code-block:: python # Usage Example: To aggregate number of 911 calls within 1 km summarized by Day count. agg_result = aggregate_points(calls, bin_size=1, bin_size_unit='Kilometers', time_step_interval=1, time_step_interval_unit="Years", summary_fields=[{"statisticType": "Count", "onStatisticField": "Day"}], output_name='testaggregatepoints01') """ kwargs = locals() gis = _arcgis.env.active_gis if gis is None else gis url = gis.properties.helperServices.geoanalytics.url params = {} for key, value in kwargs.items(): if value is not None: params[key] = value if output_name is None: output_service_name = 'Aggregate Points Analysis_' + _id_generator() output_name = output_service_name.replace(' ', '_') else: output_service_name = output_name.replace(' ', '_') output_service = _create_output_service(gis, output_name, output_service_name, 'Aggregate Points') params['output_name'] = _json.dumps({ "serviceProperties": {"name" : output_name, "serviceUrl" : output_service.url}, "itemProperties": {"itemId" : output_service.itemid}}) if isinstance(summary_fields, list): import json summary_fields = json.dumps(summary_fields) _set_context(params) param_db = { "point_layer": (_FeatureSet, "pointLayer"), "bin_type": (str, "binType"), "bin_size": (float, "binSize"), "bin_size_unit": (str, "binSizeUnit"), "polygon_layer": (_FeatureSet, "polygonLayer"), "time_step_interval": (int, "timeStepInterval"), "time_step_interval_unit": (str, "timeStepIntervalUnit"), "time_step_repeat_interval": (int, "timeStepRepeatInterval"), "time_step_repeat_interval_unit": (str, "timeStepRepeatIntervalUnit"), "time_step_reference": (_datetime, "timeStepReference"), "summary_fields": (str, "summaryFields"), "output_name": (str, "outputName"), "context": (str, "context"), "output": (_FeatureSet, "Output Features"), } return_values = [ {"name": "output", "display_name": "Output Features", "type": _FeatureSet}, ] try: _execute_gp_tool(gis, "AggregatePoints", params, param_db, return_values, _use_async, url, True, future=future) return output_service except: output_service.delete() raise
5,333,927
def scattered_embedding_lookup(params, values, dimension, name=None, hash_key=None): """Looks up embeddings using parameter hashing for each value in `values`. The i-th embedding component of a value v in `values` is found by retrieving the weight whose index is a fingerprint of the pair (v,i). The concept is explored as "feature hashing" for model compression in this paper: http://arxiv.org/pdf/1504.04788.pdf Feature hashing has the pleasant effect of allowing us to compute an embedding without needing a pre-determined vocabulary, relieving some amount of process complexity. It also allows for us to maintain embeddings for possibly trillions of features with a fixed amount of memory. Note that this is superior to out-of-vocabulary shared "hash buckets" in that the embedding is extremely likely to be unique for each token as opposed to being shared across probably-colliding tokens. The price is that we must compute a hash once for each scalar in the token's embedding as opposed to once per token. If `params` is a list, it represents a partition of the embedding parameters. Each tensor in the list should have the same length, except for the first ones which may have an additional element. For instance 10 parameters can be partitioned in 4 tensors with length `[3, 3, 2, 2]`. Args: params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`. Each tensor must be of rank 1 with fully-defined shape. values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`. dimension: Embedding dimension. name: An optional name for this op. hash_key: Specify the hash_key that will be used by the `FingerprintCat64` function to combine the crosses fingerprints on SparseFeatureCrossOp (optional). Returns: A `Tensor` with shape `[d0, ..., dn, dimension]`. Raises: ValueError: if dimension is not positive or the partition size is invalid. """ if dimension is None: raise ValueError("You must specify dimension.") return _sampled_scattered_embedding_lookup( params, values, dimension=dimension, sampled_candidates=None, hash_key=hash_key, name=name)
5,333,928
def supported_camera_list(): """ Grabs the list of gphoto2 cameras and parses into a list """ check_gphoto2() # No reason to keep going if GPhoto2 isn't installed # TODO: Error checking/Handling # Capture and cleanup camera list output cameras = subprocess.run("gphoto2 --list-cameras", shell=True, capture_output=True) cameras = cameras.stdout.decode("utf-8").split("\n\t") return [v.strip("\n").strip('"') for v in cameras][1:]
5,333,929
def register_multiple_fake_users(user_number: int, plans_number: int): """ Регистрация нескольких случайных пользователей :param user_number: Число пользователей :param plans_number: Число планов на пользователя """ from faker import Faker fake = Faker() from app.models.fake.profile import ProfileProvider fake.add_provider(ProfileProvider) for i in range(user_number): register_fake_user(fake, plans_number)
5,333,930
def get_features_and_labels(instances: Iterable[NewsHeadlineInstance], feature_generator: Callable[[NewsHeadlineInstance], dict[str]]) -> tuple[list[dict[str]], list[int]]: """ Return a tuple of the features and labels for each instance within the dataset. """ features = [] labels = [] for instance in instances: features.append(feature_generator(instance)) labels.append(instance.label) return features, labels
5,333,931
def countBarcodeStats(bcseqs,chopseqs='none',bcs = ["0","1"],use_specific_beginner=None): """this function uses edlib to count the number of matches to given bcseqs. chopseqs can be left, right, both, or none. This tells the program to chop off one barcode from either the left, right, both, or none of the ends.""" x=[] o1list = [] o2list = [] pcount = [] jcount = [] pjcount = [] jpcount = [] all_lists = {} switch_lists = {} run_lists = {} first_last = {} for bc in bcseqs: if(bc=="conditions"): continue seqs = [] for seq in bcseqs[bc]: #for every sequence we want to eliminate where it turns to -1 curseq = "" if(len(seq)==0): continue elif((use_specific_beginner is not None) and (use_specific_beginner not in seq)): continue elif("B" in str(seq[0]) or "E" in str(seq[-1])): #this sequence is already forwards for element in seq: if("B" in str(element)): continue elif(element == -1): continue elif('E' in str(element)): break else: curseq+=str(element) seqs += [curseq] elif("E" in str(seq[0]) or "B" in str(seq[-1])): #turn the seq forwards for element in seq[::-1]: if("B" in str(element)): continue elif(element == -1): continue elif('E' in str(element)): break else: curseq+=str(element) seqs += [curseq] seqschop = [] curpcount = 0 curjcount = 0 curjpcount = 0 curpjcount = 0 curbclist = [] curswlist = [] currunslist = [] curfirstlast = [0,0,0] for a in seqs: anew = a if(chopseqs=='right'): anew = a[:-1] elif(chopseqs == 'left'): anew = a[1:] elif(chopseqs == 'both'): anew = a[1:-1] #if(len(anew)>0): seqschop+=[anew] pct = anew.count(bcs[0]) jct = anew.count(bcs[1]) curbclist+=[[pct,jct]] curpcount+=pct curjcount+=jct pjct = anew.count("".join(bcs)) jpct = anew.count("".join(bcs[::-1])) curswlist += [[pjct,jpct]] curpjcount+=pjct curjpcount+=jpct currunslist += [longestRun(a,"".join(bcs))] if(len(anew)>1): if(anew[0]==bcs[1]): curfirstlast[0]+=1 #J in the first position if(anew[-1]==bcs[1]): curfirstlast[1]+=1 #J in the last position curfirstlast[2]+=1 #this one counts all seqs first_last.update({bc:tuple(curfirstlast)}) run_lists.update({bc:currunslist}) all_lists.update({bc:curbclist}) switch_lists.update({bc:curswlist}) pcount+=[curpcount] jcount+=[curjcount] jpcount +=[curjpcount] pjcount +=[curpjcount] return all_lists,run_lists,switch_lists,first_last
5,333,932
def update_world(): """Update function for our world """ global millis_elapsed millis_elapsed += clock.get_time() #millis_elapsed is total time elapsed since world bagan cursor.update()
5,333,933
def on_post_message(data, token): """Clients send this event to when the user posts a message. All messages here are broadcasted to all room members """ try: data['roomid'] = session['roomid'] except: data['roomid'] = 0 rv = requests.post('/api/messages', json=data, headers={'Authorization': 'Bearer ' + token}, raise_for_status=False) if rv.status_code != 401: session['token'] = token # save token, disconnect() might need it else: emit('expired_token')
5,333,934
def test_DiskCache(): """Unit tests for DiskCache class""" testdir = tempfile.mkdtemp() try: # Testing that subfolders get created, hence the long pathname tmp_fname = os.path.join( testdir, 'subfolder1/subfolder2/DiskCache.sqlite' ) dc = DiskCache(db_fpath=tmp_fname, max_depth=3, is_lru=False) assert 0 not in dc dc[0] = 'zero' assert dc[0] == 'zero' dc[1] = 'one' dc[2] = 'two' dc[3] = 'three' assert 0 not in dc assert dc[3] == 'three' finally: shutil.rmtree(testdir, ignore_errors=True) logging.info('<< PASS : test_DiskCache >>')
5,333,935
def is_underflow(bin_nd, hist): """Retuns whether global bin number bin_nd is an underflow bin. Works for any number of dimensions """ flat1d_bin = get_flat1d_bin(bin_nd, hist, False) return flat1d_bin == 0
5,333,936
def evaluate_speed(model, dataloader): """This function evaluates only the speed of the given model""" with torch.no_grad(): t0 = time.time() for inputs, _ in dataloader: model(inputs) t1 = time.time() time_elapsed = t1 - t0 print('Evaluation complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Inference speed was {:.5f}s per sample at batch size {:d}'.format( time_elapsed / float(len(dataloader.dataset)), dataloader.batch_size))
5,333,937
def deprecated_func_docstring(foo=None): """DEPRECATED. Deprecated function.""" return foo
5,333,938
def inv_recv_attr(status): """ Set field attributes for inv_recv table """ s3db = current.s3db settings = current.deployment_settings table = s3db.inv_recv table.sender_id.readable = table.sender_id.writable = False table.grn_status.readable = table.grn_status.writable = False table.cert_status.readable = table.cert_status.writable = False table.eta.readable = False table.req_ref.writable = True if status == SHIP_STATUS_IN_PROCESS: if settings.get_inv_recv_ref_writable(): f = table.recv_ref f.writable = True f.widget = lambda f, v: \ StringWidget.widget(f, v, _placeholder = current.T("Leave blank to have this autogenerated")) else: table.recv_ref.readable = False table.send_ref.writable = True table.sender_id.readable = False else: # Make all fields writable False for field in table.fields: table[field].writable = False if settings.get_inv_recv_req(): s3db.inv_recv_req.req_id.writable = False if status == SHIP_STATUS_SENT: table.date.writable = True table.recipient_id.readable = table.recipient_id.writable = True table.comments.writable = True
5,333,939
async def test_departures_error_server(httpx_mock): """Test server error handling.""" httpx_mock.add_response(data="error", status_code=500) rmv = RMVtransport() station_id = "3006904" await rmv.get_departures(station_id)
5,333,940
def get_all_nodes(starting_node : 'NodeDHT') -> 'list[NodeDHT]': """Return all nodes in the DHT""" nodes = [starting_node] node = starting_node while node != starting_node: node = node.succ nodes.append(node) return nodes
5,333,941
def reversed_lines(fin): """Generate the lines of file in reverse order.""" part = '' for block in reversed_blocks(fin): if PY3PLUS: block = block.decode("utf-8") for c in reversed(block): if c == '\n' and part: yield part[::-1] part = '' part += c if part: yield part[::-1]
5,333,942
def get_uvj(field, v4id): """Get the U-V and V-J for a given galaxy Parameters: field (str): field of the galaxy v4id (int): v4id from 3DHST Returns: uvj_tuple (tuple): tuple of the form (U-V, V-J) for the input object from mosdef """ # Read the file uvj_df = ascii.read(imd.loc_uvj).to_pandas() # Get the object from mosdef_df, since we need id and not v4id mosdef_obj = get_mosdef_obj(field, v4id) # Get the input object obj = uvj_df[np.logical_and( uvj_df['field'] == field, uvj_df['id'] == mosdef_obj['ID'])] # Get the U-V and V-J for that object try: u_v = obj['u_v'].iloc[0] v_j = obj['v_j'].iloc[0] uvj_tuple = (u_v, v_j) except IndexError: sys.exit(f'Could not find object ({field}, {v4id}) in uvj_df') return uvj_tuple
5,333,943
def import_data_from_folder2(folder_path): """ 导入期货1分钟历史数据 :param folder_path: :return: """ logger.info("导入历史数据 开始 %s", folder_path) datetime_start = datetime.now() # 获取文件列表 file_path_list = [] for dir_path, sub_dir_path_list, file_name_list in os.walk(folder_path): for file_name in file_name_list: file_base_name, file_extension = os.path.splitext(file_name) if file_extension.lower() != '.csv': continue file_path = os.path.join(dir_path, file_name) file_path_list.append(file_path) # 获取文件数据 file_count_tot = len(file_path_list) # 建立 consumer 池 task_queue = JoinableQueue() result_queue = Queue() consumer_list = [] worker_num = 4 # cpu_count() 8核 太烧机器了 logger.info("%d workers will be created", worker_num) for n in range(worker_num): consumer = Consumer(task_queue, result_queue, next_trade_date_dic) # , str(n) consumer.start() consumer_list.append(consumer) # 创建任务列表 for file_count, file_path in enumerate(file_path_list): # data_count = load_csv_2_db(file_count, file_count_tot, file_path) task_params = (file_count, file_count_tot, file_path) task_queue.put(task_params) # 等待任务结束 logging.info("等待全部任务执行结束") task_queue.join() logging.info("全部任务执行结束,开始结束进程") for n in range(worker_num): task_queue.put(None) task_queue.join() # 检查是否有错误文件 logger.info("统计执行结果") err_count = 0 data_count_tot = 0 while True: try: result = result_queue.get(timeout=1) file_count, file_count_tot, file_path, data_count, exp = result if exp is None: data_count_tot += data_count else: logger.exception("%d) %5d/%4d 处理文件错误:%s \n%s", err_count, file_count, file_count_tot, file_path, exp) err_count += 1 except Empty: break datetime_end = datetime.now() logger.info("导入历史数据 结束。 %d 数据文件 %d 数据被导入 %d 导入失败,耗时:%s", file_count_tot, data_count_tot, err_count, datetime_end - datetime_start)
5,333,944
def by_regex(regex_tuples, default=True): """Only call function if regex_tuples is a list of (regex, filter?) where if the regex matches the requested URI, then the flow is applied or not based on if filter? is True or False. For example: from aspen.flows.filter import by_regex @by_regex( ( ("/secret/agenda", True), ( "/secret.*", False ) ) ) def use_public_formatting(request): ... would call the 'use_public_formatting' flow step only on /secret/agenda and any other URLs not starting with /secret. """ regex_res = [ (re.compile(regex), disposition) \ for regex, disposition in regex_tuples.iteritems() ] def filter_function(function): def function_filter(request, *args): for regex, disposition in regex_res: if regex.matches(request.line.uri): if disposition: return function(*args) if default: return function(*args) algorithm._transfer_func_name(function_filter, function) return function_filter return filter_function
5,333,945
def deserialize(name): """Get the activation from name. :param name: name of the method. among the implemented Keras activation function. :return: """ name = name.lower() if name == SOFTMAX: return backward_softmax if name == ELU: return backward_elu if name == SELU: return backward_selu if name == SOFTPLUS: return backward_softplus if name == SOFTSIGN: return backward_softsign if name == SIGMOID: return backward_sigmoid if name == TANH: return backward_tanh if name in [RELU, RELU_]: return backward_relu if name == EXPONENTIAL: return backward_exponential if name == LINEAR: return backward_linear raise ValueError("Could not interpret " "activation function identifier:", name)
5,333,946
def expected_improvement_search(features, genotype): """ implementation of CATE-DNGO-LS on the DARTS search space """ CURR_BEST_VALID = 0. CURR_BEST_TEST = 0. CURR_BEST_GENOTYPE = None PREV_BEST = 0 MAX_BUDGET = args.max_budgets window_size = 1024 round = 0 counter = 0 visited = {} best_trace = defaultdict(list) trainer = Train() feat_samples, geno_samples, valid_label_samples, test_label_samples, visited = get_samples(features, genotype, visited, trainer) for feat, geno, acc_valid, acc_test in zip(feat_samples, geno_samples, valid_label_samples, test_label_samples): counter += 1 if acc_valid > CURR_BEST_VALID: CURR_BEST_VALID = acc_valid CURR_BEST_TEST = acc_test CURR_BEST_GENOTYPE = geno best_trace['validation_acc'].append(float(CURR_BEST_VALID)) best_trace['test_acc'].append(float(CURR_BEST_TEST)) best_trace['genotype'].append(str(CURR_BEST_GENOTYPE)) best_trace['counter'].append(counter) while counter < MAX_BUDGET: if round == args.rounds: feat_samples, geno_samples, valid_label_samples, test_label_samples, visited = get_samples(features, genotype, visited, trainer) for feat, geno, acc_valid, acc_test in zip(feat_samples, geno_samples, valid_label_samples, test_label_samples): counter += 1 if acc_valid > CURR_BEST_VALID: CURR_BEST_VALID = acc_valid CURR_BEST_TEST = acc_test CURR_BEST_GENOTYPE = geno best_trace['validation_acc'].append(float(CURR_BEST_VALID)) best_trace['test_acc'].append(float(CURR_BEST_TEST)) best_trace['genotype'].append(str(CURR_BEST_GENOTYPE)) best_trace['counter'].append(counter) round = 0 print("current counter: {}, best validation acc.: {}, test acc.: {}".format(counter, CURR_BEST_VALID, CURR_BEST_TEST)) print("current best genotype: {}".format(CURR_BEST_GENOTYPE)) model = DNGO(num_epochs=30, n_units=128, do_mcmc=False, normalize_output=False) model.train(X=feat_samples.numpy(), y=valid_label_samples.view(-1).numpy(), do_optimize=True) print(model.network) m = [] v = [] chunks = int(features.shape[0] / window_size) if features.shape[0] % window_size > 0: chunks += 1 features_split = torch.split(features, window_size, dim=0) for i in range(chunks): m_split, v_split = model.predict(features_split[i].numpy()) m.extend(list(m_split)) v.extend(list(v_split)) mean = torch.Tensor(m) sigma = torch.Tensor(v) u = (mean - torch.Tensor([args.objective]).expand_as(mean)) / sigma ei = sigma * (u * stats.norm.cdf(u) + 1 + stats.norm.pdf(u)) feat_next, geno_next, label_next_valid, label_next_test, visited = \ propose_location(ei, features, genotype, visited, trainer) # add proposed networks to the pool for feat, geno, acc_valid, acc_test in zip(feat_next, geno_next, label_next_valid, label_next_test): if acc_valid.item() > CURR_BEST_VALID: print('FIND BEST VALID FROM DNGO') CURR_BEST_VALID = acc_valid.item() CURR_BEST_TEST = acc_test.item() CURR_BEST_GENOTYPE = geno feat_samples = torch.cat((feat_samples, feat.view(1, -1)), dim=0) geno_samples.append(geno) valid_label_samples = torch.cat((valid_label_samples.view(-1, 1), acc_valid.view(1, 1)), dim=0) test_label_samples = torch.cat((test_label_samples.view(-1, 1), acc_test.view(1, 1)), dim=0) counter += 1 best_trace['validation_acc'].append(float(CURR_BEST_VALID)) best_trace['test_acc'].append(float(CURR_BEST_TEST)) best_trace['genotype'].append(str(CURR_BEST_GENOTYPE)) best_trace['counter'].append(counter) if counter > MAX_BUDGET: break if args.computation_aware_search: feat_samples, valid_label_samples, test_label_samples, visited, best_trace, counter, CURR_BEST_VALID, CURR_BEST_TEST, CURR_BEST_GENOTYPE = \ computation_aware_search(label_next_valid, feat_samples, valid_label_samples, test_label_samples, visited, best_trace, counter, args.topk, features, genotype, CURR_BEST_VALID, CURR_BEST_TEST, CURR_BEST_GENOTYPE, MAX_BUDGET, trainer) if PREV_BEST < CURR_BEST_VALID: PREV_BEST = CURR_BEST_VALID else: round += 1 res = dict() res['validation_acc'] = best_trace['validation_acc'] res['test_acc'] = best_trace['test_acc'] res['genotype'] = best_trace['genotype'] res['counter'] = best_trace['counter'] save_path = args.dataset + '/' + args.output_path + '/' + 'dim{}'.format(args.dim) if not os.path.exists(save_path): os.makedirs(save_path, exist_ok=True) print('save to {}'.format(save_path)) fh = open(os.path.join(save_path, 'run_{}.json'.format(args.seed)), 'w') json.dump(res, fh) fh.close()
5,333,947
def delete_item_image(itemid, imageid): """ Delete an image from item. Args: itemid (int) - item's id imageid (int) - image's id Status Codes: 204 No Content – when image deleted successfully """ path = '/items/{}/images/{}'.format(itemid, imageid) return delete(path, auth=True, accepted_status_codes=[204])
5,333,948
def plot_results_unordered(predicted_data, true_data, plt_file): """Plot actual vs predicted results""" fig = plt.figure(facecolor='white', figsize=(20,5)) # fig = plt.figure(facecolor='white', figsize=(20,15)) # uncomment for DWS plot axis = fig.add_subplot(111) axis.plot(true_data, label='Truth') # comment for non-DWS plot plt.plot(predicted_data, label='Predicted') # comment for non-DWS plot # plt.plot(predicted_data, label='Modelled flow', color='black', linewidth=1) # uncomment for DWS plot # plt.plot(true_data, label='Recorded flow', color='green', linewidth=0.75) # uncomment for DWS plot plt.ylabel('$Flow (m^3/s)$', fontsize=14) plt.xlabel('Time (hours)', fontsize=14) plt.tick_params(axis='both', labelsize=14) # plt.ylim(0, 150) # uncomment for DWS plot plt.legend(prop={'size': 14}) plt.savefig(plt_file, bbox_inches='tight', dpi=200, papertype='a3') plt.close()
5,333,949
def bytes_(s, encoding='utf-8', errors='strict'): # pragma: no cover """Utility to ensure binary-like usability. If ``s`` is an instance of ``text_type``, return ``s.encode(encoding, errors)``, otherwise return ``s``""" if isinstance(s, text_type): return s.encode(encoding, errors) return s
5,333,950
def create_job_id(success_file_path): """Create job id prefix with a consistent naming convention based on the success file path to give context of what caused this job to be submitted. the rules for success file name -> job id are: 1. slashes to dashes 2. all non-alphanumeric dash or underscore will be replaced with underscore Note, gcf-ingest- can be overridden with environment variable JOB_PREFIX 3. uuid for uniqueness """ clean_job_id = os.getenv('JOB_PREFIX', constants.DEFAULT_JOB_PREFIX) clean_job_id += constants.NON_BQ_JOB_ID_REGEX.sub( '_', success_file_path.replace('/', '-')) # add uniqueness in case we have to "re-process" a success file that is # republished (e.g. to fix a bad batch of data) or handle multiple load jobs # for a single success file. clean_job_id += str(uuid.uuid4()) return clean_job_id[:1024]
5,333,951
def test_stochatreat_within_strata_no_probs(n_treats, stratum_cols, df): """ Tests that within strata treatment assignment counts are only as far from the required counts as misfit assignment randomization allows with equal treatment assignment probabilities but a differing number of treatments """ probs = n_treats * [1 / n_treats] lcm_prob_denominators = n_treats treats = stochatreat( data=df, stratum_cols=stratum_cols, treats=n_treats, idx_col="id", random_state=42 ) comp = compute_count_diff(treats, probs) assert_msg = """The counts differences exceed the bound that misfit allocation should not exceed""" assert (comp["count_diff"] < lcm_prob_denominators).all(), assert_msg
5,333,952
def normal_transform(matrix): """Compute the 3x3 matrix which transforms normals given an affine vector transform.""" return inv(numpy.transpose(matrix[:3,:3]))
5,333,953
async def async_unload_entry(hass, config_entry): """Unload OMV config entry.""" unload_ok = await hass.config_entries.async_unload_platforms( config_entry, PLATFORMS ) if unload_ok: controller = hass.data[DOMAIN][config_entry.entry_id] await controller.async_reset() hass.data[DOMAIN].pop(config_entry.entry_id) return True
5,333,954
def test_scenario_delete_meta_warning(mp): """ Scenario.delete_meta works but raises a deprecation warning. This test can be removed once Scenario.delete_meta is removed. """ scen = ixmp.Scenario(mp, **DANTZIG) meta = {"sample_int": 3, "sample_string": "string_value"} remove_key = "sample_string" scen.set_meta(meta) with pytest.warns(DeprecationWarning): scen.delete_meta(remove_key) expected = copy.copy(meta) del expected[remove_key] obs = scen.get_meta() assert obs == expected
5,333,955
def create_prediction_data(validation_file: typing.IO) -> dict: """Create a dictionary object suitable for prediction.""" validation_data = csv.DictReader(validation_file) races = {} # Read each horse from each race for row in validation_data: race_id = row["EntryID"] finish_pos = float(row["Placement"]) if race_id not in races: races[race_id] = [] # Skip horses that didn't run if finish_pos < 1: continue # Create validation array data = np.array( [ float(feat if len(str(feat)) > 0 else 0) for feat in list(row.values())[4:] ] ) data = data.reshape(1, -1) races[race_id].append( {"data": data, "prediction": None, "finish_pos": finish_pos} ) return races
5,333,956
def username(request): """ Returns ESA FTP username """ return request.config.getoption("--username")
5,333,957
def complete_data(df): """Add some temporal columns to the dataset - day of the week - hour of the day - minute Parameters ---------- df : pandas.DataFrame Input data ; must contain a `ts` column Returns ------- pandas.DataFrame Data with additional columns `day`, `hour` and `minute` """ logger.info("Complete some data") df = df.copy() df['day'] = df['ts'].apply(lambda x: x.weekday()) df['hour'] = df['ts'].apply(lambda x: x.hour) df['minute'] = df['ts'].apply(lambda x: x.minute) return df
5,333,958
def is_valid_mac_address_normalized(mac): """Validates that the given MAC address has what we call a normalized format. We've accepted the HEX only format (lowercase, no separators) to be generic. """ return re.compile('^([a-f0-9]){12}$').match(mac) is not None
5,333,959
def cleanup(): """Resource cleanup.""" mega.close() print('Resource cleanup completed.') exit(0)
5,333,960
def get_Y(data): """ Function: convert pandas data table to sklearn Y variable Arguments --------- data: panadas data table Result ------ Y[:,:]: float sklearn Y variable """ return np.array((data["H"],data["sigma"])).T
5,333,961
def get_bbox(mask, show=False): """ Get the bbox for a binary mask Args: mask: a binary mask Returns: bbox: (col_min, col_max, row_min, row_max) """ area_obj = np.where(mask != 0) bbox = np.min(area_obj[0]), np.max(area_obj[0]), np.min(area_obj[1]), np.max(area_obj[1]) if show: cv2.rectangle(mask, (bbox[2], bbox[0]), (bbox[3], bbox[1]), (255, 255, 255), 1) mmcv.imshow(mask, "test", 10) exit() return bbox
5,333,962
def check_mobile_mode() -> bool: """ Return if you are working in mobile mode, searching local settings or check QtCore.QSysInfo().productType(). @return True or False. """ from pineboolib.core import settings return ( True if QtCore.QSysInfo().productType() in ("android", "ios") else settings.CONFIG.value(u"ebcomportamiento/mobileMode", False) )
5,333,963
def check_for_overflow_candidate(node): """ Checks if the node contains an expression which can potentially produce an overflow meaning an expression which is not wrapped by any cast, which involves the operator +, ++, *, **. Note, the expression can have several sub-expression. It is the case of the expression (a + 3 > 0 && a * 3 > 5). In this case, the control is not just done for the first expression (which is the &&), but should be applied recursively to all the subexpression, until it founds the expression with one of the whitelisted operator. :param node: Node could be an Expression or AstNode (Tuple or Literal) in both cases, they have a dictionary called 'dic'. :return: List of tuples [(AstNode, {exp_id: expression}], where the AstNode is a node which of type Identifier and it is refereeing to a newly created variable called exp_id. The seconds object of the tuple is the map between the name of the variable added and its expression. """ # Check if in all the expression (also in depth) there is some operations expression_candidates = [] whitelist_operators = ['+', '++', '*', '**', '-', '--'] logic_operators = ['||', '&&', '>', '>=', '<', '<=', '==', '!='] # to let find_parent works if not node: return None if node.parent: node.parent = None first_expression = asthelper.find_node(node.dic, {'nodeType': r'.*Operation'}) if not first_expression: # no expression it is or an identifier or a literal return None if asthelper.find_parent(first_expression, {'kind': 'typeConversion'}) is not None: # The expression is wrapped by a cast, if wrapped, can't be a candidate return None if first_expression['operator'] in whitelist_operators: exp_map = {} if 'name' not in first_expression.dic: # if not name, it is not a variable declaration # so expression is identifier exp_name = 'exp_{}'.format(first_expression.dic['id']) exp_map[exp_name] = expressionhelper.Expression(first_expression.dic) # override first_expression.dic['name'] = exp_name first_expression.dic['nodeType'] = 'Identifier' return [(first_expression, exp_map)] # recursive case if first_expression['operator'] in logic_operators: left_candidates = check_for_overflow_candidate(expressionhelper.Expression(first_expression['leftExpression'])) right_candidates = check_for_overflow_candidate(expressionhelper.Expression(first_expression['rightExpression'])) if left_candidates is not None: expression_candidates += left_candidates if right_candidates is not None: expression_candidates += right_candidates return expression_candidates return None
5,333,964
def parse_monitor_message(msg): """decode zmq_monitor event messages. Parameters ---------- msg : list(bytes) zmq multipart message that has arrived on a monitor PAIR socket. First frame is:: 16 bit event id 32 bit event value no padding Second frame is the endpoint as a bytestring Returns ------- event : dict event description as dict with the keys `event`, `value`, and `endpoint`. """ if len(msg) != 2 or len(msg[0]) != 6: raise RuntimeError("Invalid event message format: %s" % msg) event = { 'event': struct.unpack("=hi", msg[0])[0], 'value': struct.unpack("=hi", msg[0])[1], 'endpoint': msg[1], } return event
5,333,965
def load_config(fpath): """ Load configuration from fpath and return as AttrDict. :param fpath: configuration file path, either TOML or JSON file :return: configuration object """ if fpath.endswith(".toml"): data = toml.load(fpath) elif fpath.endswith(".json"): with open(fpath, "rt", encoding="utf-8") as infp: data = json.load(infp) else: raise Exception(f"Cannot load config file {fpath}, must be .toml or json file") return AttrDict(data)
5,333,966
def find_packages(name, pkg_dir): """Locate pre-built packages in the _packages directory""" for c in (FileSystemPackageBuilder, ZipPackageBuilder, ExcelPackageBuilder): package_path, cache_path = c.make_package_path(pkg_dir, name) if package_path.exists(): yield c.type_code, package_path, cache_path
5,333,967
def softmax_layer(inputs, n_hidden, random_base, drop_rate, l2_reg, n_class, scope_name='1'): """ Method adapted from Trusca et al. (2020). Encodes the sentence representation into a three dimensional vector (sentiment classification) using a softmax function. :param inputs: :param n_hidden: :param random_base: :param drop_rate: :param l2_reg: :param n_class: :param scope_name: :return: """ w = tf.get_variable( name='softmax_w' + scope_name, shape=[n_hidden, n_class], # initializer=tf.random_normal_initializer(mean=0., stddev=np.sqrt(2. / (n_hidden + n_class))), initializer=tf.random_uniform_initializer(-random_base, random_base), regularizer=tf.keras.regularizers.L2(l2_reg) ) b = tf.get_variable( name='softmax_b' + scope_name, shape=[n_class], # initializer=tf.random_normal_initializer(mean=0., stddev=np.sqrt(2. / (n_class))), initializer=tf.random_uniform_initializer(-random_base, random_base), regularizer=tf.keras.regularizers.L2(l2_reg) ) with tf.name_scope('softmax'): outputs = tf.nn.dropout(inputs, rate=drop_rate) predict = tf.matmul(outputs, w) + b predict = tf.nn.softmax(predict) return predict, w
5,333,968
def node2freqt(docgraph, node_id, child_str='', include_pos=False, escape_func=FREQT_ESCAPE_FUNC): """convert a docgraph node into a FREQT string.""" node_attrs = docgraph.node[node_id] if istoken(docgraph, node_id): token_str = escape_func(node_attrs[docgraph.ns+':token']) if include_pos: pos_str = escape_func(node_attrs.get(docgraph.ns+':pos', '')) return u"({pos}({token}){child})".format( pos=pos_str, token=token_str, child=child_str) else: return u"({token}{child})".format(token=token_str, child=child_str) else: # node is not a token label_str=escape_func(node_attrs.get('label', node_id)) return u"({label}{child})".format(label=label_str, child=child_str)
5,333,969
def build_arg_parser(): """Build the ArgumentParser.""" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-f", "--fritzbox", default="fritz.box") parser.add_argument("-u", "--username", default="dslf-config") parser.add_argument("-p", "--password", required=True) return parser
5,333,970
def get_marathon_url(): """Get Marathon URL from the environment. This is optional, default: http://leader.mesos:8080. """ marathon_url = os.environ.get("MARATHON_URL", None) if marathon_url is None: logger.warning("Unable to parse MARATHON_URL environment variable, using default: http://leader.mesos:8080") marathon_url = "http://leader.mesos:8080" return marathon_url
5,333,971
def _load_dataset(name, split, return_X_y, extract_path=None): """Load time series classification datasets (helper function).""" # Allow user to have non standard extract path if extract_path is not None: local_module = os.path.dirname(extract_path) local_dirname = extract_path else: local_module = MODULE local_dirname = DIRNAME if not os.path.exists(os.path.join(local_module, local_dirname)): os.makedirs(os.path.join(local_module, local_dirname)) if name not in _list_downloaded_datasets(extract_path): url = "http://timeseriesclassification.com/Downloads/%s.zip" % name # This also tests the validitiy of the URL, can't rely on the html # status code as it always returns 200 try: _download_and_extract( url, extract_path=extract_path, ) except zipfile.BadZipFile as e: raise ValueError( "Invalid dataset name. ", extract_path, "Please make sure the dataset " + "is available on http://timeseriesclassification.com/.", ) from e if isinstance(split, str): split = split.upper() if split in ("TRAIN", "TEST"): fname = name + "_" + split + ".ts" abspath = os.path.join(local_module, local_dirname, name, fname) X, y = load_from_tsfile_to_dataframe(abspath) # if split is None, load both train and test set elif split is None: X = pd.DataFrame(dtype="object") y = pd.Series(dtype="object") for split in ("TRAIN", "TEST"): fname = name + "_" + split + ".ts" abspath = os.path.join(local_module, local_dirname, name, fname) result = load_from_tsfile_to_dataframe(abspath) X = pd.concat([X, pd.DataFrame(result[0])]) y = pd.concat([y, pd.Series(result[1])]) y = pd.Series.to_numpy(y, dtype=np.str) else: raise ValueError("Invalid `split` value =", split) # Return appropriately if return_X_y: return X, y else: X["class_val"] = pd.Series(y) return X
5,333,972
def run_cc_net_nmf(run_parameters): """ wrapper: call sequence to perform network based stratification with consensus clustering and write results. Args: run_parameters: parameter set dictionary. """ tmp_dir = 'tmp_cc_net_nmf' run_parameters = update_tmp_directory(run_parameters, tmp_dir) processing_method = run_parameters['processing_method' ] number_of_clusters = run_parameters['number_of_clusters' ] number_of_bootstraps = run_parameters['number_of_bootstraps' ] gg_network_name_full_path = run_parameters['gg_network_name_full_path' ] spreadsheet_name_full_path = run_parameters['spreadsheet_name_full_path'] network_mat, \ unique_gene_names = kn.get_sparse_network_matrix(gg_network_name_full_path) network_mat = kn.normalize_sparse_mat_by_diagonal(network_mat) lap_diag, lap_pos = kn.form_network_laplacian_matrix(network_mat) spreadsheet_df = kn.get_spreadsheet_df(spreadsheet_name_full_path) spreadsheet_df = kn.update_spreadsheet_df(spreadsheet_df, unique_gene_names) spreadsheet_mat = spreadsheet_df.values number_of_samples = spreadsheet_mat.shape[1] sample_names = spreadsheet_df.columns if processing_method == 'serial': for sample in range(0, number_of_bootstraps): run_cc_net_nmf_clusters_worker (network_mat, spreadsheet_mat, lap_diag, lap_pos, run_parameters, sample ) elif processing_method == 'parallel': find_and_save_cc_net_nmf_clusters_parallel(network_mat, spreadsheet_mat, lap_diag, lap_pos, run_parameters, number_of_bootstraps) elif processing_method == 'distribute': func_args = [network_mat, spreadsheet_mat, lap_diag, lap_pos, run_parameters] dependency_list = [run_cc_net_nmf_clusters_worker, save_a_clustering_to_tmp, dstutil.determine_parallelism_locally] cluster_ip_address = run_parameters['cluster_ip_address'] dstutil.execute_distribute_computing_job( cluster_ip_address , number_of_bootstraps , func_args , find_and_save_cc_net_nmf_clusters_parallel , dependency_list ) else: raise ValueError('processing_method contains bad value.') consensus_matrix = form_consensus_matrix(run_parameters, number_of_samples) distance_matrix = pairwise_distances(consensus_matrix , n_jobs = -1 ) # [n_samples, n_samples] use all available cores labels = kn.perform_kmeans (consensus_matrix, number_of_clusters) save_consensus_clustering (consensus_matrix, sample_names, labels, run_parameters ) calculate_and_save_silhouette_scores (distance_matrix, sample_names, labels, run_parameters ) save_final_samples_clustering ( sample_names, labels, run_parameters ) save_spreadsheet_and_variance_heatmap(spreadsheet_df, labels, run_parameters, network_mat) kn.remove_dir(run_parameters["tmp_directory"])
5,333,973
def search(news_name): """method to fetch search results""" news_name_list = news_name.split(" ") search_name_format = "+".join(news_name_list) searched_results = search_news(search_name_format) sourcess=get_source_news() title = f'search results for {news_name}' return render_template('search.html', results=searched_results,my_sources=sourcess)
5,333,974
def encrypt_document(document): """ Useful method to encrypt a document using a random cipher """ cipher = generate_random_cipher() return decrypt_document(document, cipher)
5,333,975
def do_width_file(width, filename): """ This function takes a file pairs of unicode values (hex), each of which is a range of unicode values, that all have the given width. """ for line in open(filename).readlines(): if line.startswith("#"): continue vals = line.split() while len(vals) > 1: start = int(vals[0], 16) end = int(vals[1], 16) val = start while val <= end: key = u8_str(val) val += 1 sym = SYMBOLS.get(key, None) if sym == None: continue print("%s\t%d" % (sym, width)) vals = vals[2:]
5,333,976
def test_open_run( data=( (0, 0.75, True, False, False), (1, 0.25, True, False, False), (2, 0.75, False, True, False), (3, 0.25, False, True, False), (4, 0.75, False, False, True), (5, 0.25, False, False, True), ), in_columns=( "subject_id", "greenish", "is_animal", "is_vegetable", "is_mineral", ), check=""" select run_id, subject_id, score, greenish, is_animal, is_vegetable, is_mineral from predictions natural join features id where run_id = %(run_id)s""", ): """Test open_run.""" batch = Batch( as_of=None, duration=None, microservice_version="1.0.0", time_zone=None, ) persistor = Persistor() model_batch = ModelBatch(model_version="1.0.0", parent=batch) with persistor.open_run(parent=model_batch) as run: df = DataFrame(data=list(data), columns=in_columns) df.set_index("subject_id") df["score"] = ~df["is_mineral"] * ( (df["is_animal"] * df["greenish"]) + (df["is_vegetable"] * (1.0 - df["greenish"])) ) run.predictions = df with persistor.rollback() as cur: cur.execute(persistor.sql.schema) df = read_sql_query( sql=check, con=cur.connection, params={"run_id": run.id} ) df.set_index("subject_id") # reorder columns to match run.predictions df = df[run.predictions.columns] # logger.error(df.head(10)) # logger.error(run.predictions.head(10)) assert df.equals(run.predictions)
5,333,977
def bootstrap_alert(visitor, items): """ Format: [[alert(class=error)]]: message """ txt = [] for x in items: cls = x['kwargs'].get('class', '') if cls: cls = 'alert-%s' % cls txt.append('<div class="alert %s">' % cls) if 'close' in x['kwargs']: txt.append('<button class="close" data-dismiss="alert">&times;</button>') text = visitor.parse_text(x['body'], 'article') txt.append(text) txt.append('</div>') return '\n'.join(txt)
5,333,978
def masked_mean(x, *, mask, axis, paxis_name, keepdims): """Calculates the mean of a tensor, excluding masked-out entries. Args: x: Tensor to take the mean of. mask: Boolean array of same shape as 'x'. True elements are included in the mean, false elements are excluded. axis: Axis of 'x' to compute the mean over. paxis_name: Optional. If not None, will take a distributed mean of 'x' across devices using the specified parallel axis. keepdims: Same meaning as the corresponding parameter in `numpy.mean`. Whether to keep the reduction axes or squeeze them out. Returns: Tensor resulting from reducing 'x' over axes in 'axis'. """ assert x.shape == mask.shape x_masked_sum = masked_sum( x, mask=mask, axis=axis, paxis_name=paxis_name, keepdims=keepdims) mask_count = masked_sum( x=mask, mask=None, axis=axis, paxis_name=paxis_name, keepdims=keepdims) x_masked_mean = x_masked_sum / mask_count return x_masked_mean
5,333,979
def clients_ping(): """ Ping bountytools clients to test connectivity :return: """
5,333,980
def dbsession(engine, tables): """Returns an sqlalchemy session, and after the test tears down everything properly.""" connection = engine.connect() # begin the nested transaction transaction = connection.begin() # use the connection with the already started transaction session = Session(bind=connection) yield session session.close() # roll back the broader transaction transaction.rollback() # put back the connection to the connection pool connection.close()
5,333,981
def run_project_patcher_internal(context, identifier, dry_run, should_log, deployment_name=None): """Internal call point for post project update""" # Patch project stack if it exists if identifier is None: # Select default patch identifier = DEFAULT_PATCH_IDENTIFIER if identifier == DEFAULT_PATCH_IDENTIFIER: __run_032020_project_patch(context, dry_run, should_log, deployment_name) else: __output_message("No patch selected", should_log)
5,333,982
def log_error(error, file_path="logs/", message=None): """log Exception to error log with optional message. Args: exception (var, str): output from except statement file_path (str): path to error log message (str, optional): custom message. Defaults to None. """ msg = f"{date_time()} - {message}. {error}" print(msg) with open(f"{file_path}", "a") as f: f.write(msg + "\n\n") return
5,333,983
def aspectRatioFix(preserve,anchor,x,y,width,height,imWidth,imHeight): """This function helps position an image within a box. It first normalizes for two cases: - if the width is None, it assumes imWidth - ditto for height - if width or height is negative, it adjusts x or y and makes them positive Given (a) the enclosing box (defined by x,y,width,height where x,y is the \ lower left corner) which you wish to position the image in, and (b) the image size (imWidth, imHeight), and (c) the 'anchor point' as a point of the compass - n,s,e,w,ne,se etc \ and c for centre, this should return the position at which the image should be drawn, as well as a scale factor indicating what scaling has happened. It returns the parameters which would be used to draw the image without any adjustments: x,y, width, height, scale used in canvas.drawImage and drawInlineImage """ scale = 1.0 if width is None: width = imWidth if height is None: height = imHeight if width<0: width = -width x -= width if height<0: height = -height y -= height if preserve: imWidth = abs(imWidth) imHeight = abs(imHeight) scale = min(width/float(imWidth),height/float(imHeight)) owidth = width oheight = height width = scale*imWidth-1e-8 height = scale*imHeight-1e-8 if anchor not in ('nw','w','sw'): dx = owidth-width if anchor in ('n','c','s'): x += dx/2. else: x += dx if anchor not in ('sw','s','se'): dy = oheight-height if anchor in ('w','c','e'): y += dy/2. else: y += dy return x,y, width, height, scale
5,333,984
def __do_core(SM, ToDB): """RETURNS: Acceptance trace database: map: state_index --> MergedTraces ___________________________________________________________________________ This function walks down almost each possible path trough a given state machine. During the process of walking down the paths it develops for each state its list of _Trace objects. ___________________________________________________________________________ IMPORTANT: There is NO GUARANTEE that the paths from acceptance to 'state_index' or the paths from input position storage to 'state_index' are complete! The calling algorithm must walk these paths on its own. This is due to a danger of exponential complexity with certain setups. Any path analysis is dropped as soon as a state is reached with an equivalent history. ___________________________________________________________________________ """ def print_path(x): print(x.state_index, " ", end=' ') if x.parent is not None: print_path(x.parent) else: print() class TraceFinder(TreeWalker): """Determines _Trace objects for each state. The heart of this function is the call to '_Trace.next_step()' which incrementally develops the acceptance and position storage history of a path. Recursion Terminal: When state has no target state that has not yet been handled in the 'path' in the same manner. That means, that if a state appears again in the path, its trace must be different or the recursion terminates. """ def __init__(self, state_machine, ToDB): self.sm = state_machine self.to_db = ToDB self.result = dict((i, []) for i in self.sm.states.keys()) self.path = [] # Under some circumstances, the init state may accept! # (E.g. the appendix state machines of the 'loopers') TreeWalker.__init__(self) def on_enter(self, Args): PreviousTrace = Args[0] StateIndex = Args[1] # (*) Update the information about the 'trace of acceptances' dfa_state = self.sm.states[StateIndex] if not self.path: trace = _Trace(self.sm.init_state_index, dfa_state) else: trace = PreviousTrace.next_step(StateIndex, dfa_state) target_index_list = self.to_db[StateIndex] # (*) Recursion Termination: # # If a state has been analyzed before with the same trace as result, # then it is not necessary dive into deeper investigations again. All # of its successor paths have been walked along before. This catches # two scenarios: # # (1) Loops: A state is reached through a loop and nothing # changed during the walk through the loop since # the last passing. # # There may be connected loops, so it is not sufficient # to detect a loop and stop. # # (2) Knots: A state is be reached through different branches. # However, the traces through those branches are # indifferent in their positioning and accepting # behavior. Only one branch needs to consider the # subsequent states. # # (There were cases where this blew the computation time # see bug-2257908.sh in $QUEX_PATH/TEST). # existing_trace_list = self.result.get(StateIndex) if existing_trace_list: end_of_road_f = (len(target_index_list) == 0) for pioneer in existing_trace_list: if not trace.is_equivalent(pioneer, end_of_road_f): continue elif trace.has_parent(pioneer): # Loop detected -- Continuation unnecessary. # Nothing new happened since last passage. # If trace was not equivalent, the loop would have to be stepped through again. return None else: # Knot detected -- Continuation abbreviated. # A state is reached twice via two separate paths with # the same positioning_states and acceptance states. The # analysis of subsequent states on the path is therefore # complete. Almost: There is no alternative paths from # store to restore that must added later on. return None # (*) Mark the current state with its acceptance trace self.result[StateIndex].append(trace) # (*) Add current state to path self.path.append(StateIndex) # (*) Recurse to all (undone) target states. return [(trace, target_i) for target_i in target_index_list ] def on_finished(self, Args): # self.done_set.add(StateIndex) self.path.pop() trace_finder = TraceFinder(SM, ToDB) trace_finder.do((None, SM.init_state_index)) return trace_finder.result
5,333,985
def circle_area(radius: int) -> float: """ estimate the area of a circle using the monte carlo method. Note that the decimal precision is log(n). So if you want a precision of three decimal points, n should be $$ 10 ^ 3 $$. :param r (int): the radius of the circle :return (int): the estimated area of the circle to three decimal places """ hits = 0 n = 1000 left_bottom = -1 * radius right_top = radius for _ in range(n): # get random coordinates x = left_bottom + (random() * right_top) y = left_bottom + (random() * right_top) # check if points fall within the bounds of the circle (geometrically) if sqrt((x ** 2) + (y ** 2)) < radius: hits += 1 return (hits / n) * ((2 * radius) ** 2)
5,333,986
def Quantized_MLP(pre_model, args): """ quantize the MLP model :param pre_model: :param args: :return: """ #full-precision first and last layer weights = [p for n, p in pre_model.named_parameters() if 'fp_layer' in n and 'weight' in n] biases = [pre_model.fp_layer2.bias] #layers that need to be quantized ternary_weights = [p for n, p in pre_model.named_parameters() if 'ternary' in n] params = [ {'params': weights}, {'params': ternary_weights}, {'params': biases} ] optimizer = optim.SGD(params, lr=args.lr) loss_fun = nn.CrossEntropyLoss() return pre_model, loss_fun, optimizer
5,333,987
def _super_tofrom_choi(q_oper): """ We exploit that the basis transformation between Choi and supermatrix representations squares to the identity, so that if we munge Qobj.type, we can use the same function. Since this function doesn't respect :attr:`Qobj.type`, we mark it as private; only those functions which wrap this in a way so as to preserve type should be called externally. """ data = q_oper.data.toarray() dims = q_oper.dims new_dims = [[dims[1][1], dims[0][1]], [dims[1][0], dims[0][0]]] d0 = np.prod(np.ravel(new_dims[0])) d1 = np.prod(np.ravel(new_dims[1])) s0 = np.prod(dims[0][0]) s1 = np.prod(dims[1][1]) return Qobj(dims=new_dims, inpt=data.reshape([s0, s1, s0, s1]). transpose(3, 1, 2, 0).reshape((d0, d1)))
5,333,988
def main(args): """ This is the main function of the repo runner It will be executed with the arguments suplied if the end user called this. """ dependency.installCommon() dependency.installFullRepo() if not checks.inCorrectDirectory("repo"): logger.log("Could not detect build files in the current directory, Setting up environment") download.downloadRepo("repo") logger.log("Build files should be here: {}".format(os.getcwd())) if args.all: BuildFullRepo(args.upload) return if args.packages: buildBase(args.upload) if args.fonts: buildFonts(args.upload) if args.kernel: buildKernel(args.upload) if args.sync: syncRepo(args.upload) if args.list: listAllPackages() elif args.list_fonts: listFonts() elif args.list_packages: listPackages()
5,333,989
def get_attention_weights(data): """Get the attention weights of the given function.""" # USE INTERACTIONS token_interaction = data['tokeninteraction'] df_token_interaction = pd.DataFrame(token_interaction) # check clicked tokens to draw squares around them clicked_tokens = np.array(data['finalclickedtokens']) clicked_tokens_indices = np.where(clicked_tokens == 1)[0].tolist() # COMPUTE ATTENTION attentions = [] for i, t in enumerate(data['tokens']): new_attention = \ get_attention(index_token=t['id'], df_interaction=df_token_interaction) attentions.append(new_attention) return attentions
5,333,990
def save_ecg_example(gen_data: np.array, image_name, image_title='12-lead ECG'): """ Save 12-lead ecg signal in fancy .png :param gen_data: :param image_name: :param image_title: :return: """ fig = plt.figure(figsize=(12, 14)) for _lead_n in range(gen_data.shape[1]): curr_lead_data = gen_data[:, _lead_n] plt.subplot(4, 3, _lead_n + 1) plt.plot(curr_lead_data, label=f'lead_{_lead_n + 1}') plt.title(f'lead_{_lead_n + 1}') fig.suptitle(image_title) plt.savefig(f'out/{image_name}.png', bbox_inches='tight') plt.close(fig) return fig
5,333,991
def zip_files(name_of_zip: str, files_to_zip: str) -> None: """Zip files. Examples -------- .. code-block:: robotframework ZipFiles my_zip_file rabbit.txt ZipFiles my_zip_file_2 dog.txt ZipFiles my_zip_file_3 rabbit.txt, dog.txt ZipFiles my_zip_file_4 C:/Users/pace/secrets/cat.txt ZipFiles my_zip_file_5 C:/Users/pace/secrets/cat.txt, C:/automation/kangaroo.txt Parameters ---------- name_of_zip : str Name of the zip file created. files_to_zip : str Files to be zipped, separated by "," in case of multiple files. """ if not name_of_zip.endswith('.zip'): name_of_zip += '.zip' files = files_to_zip.split(',') try: with ZipFile(name_of_zip, 'w') as zipped: for file in files: file = str(download.get_path(file.strip())) if os.path.isdir(file): for root, _, files2 in os.walk(file): for file2 in files2: zipped.write(os.path.join(root, file2)) else: zipped.write(file, _basename(file)) except OSError as e: raise QWebValueError('\nFile name "{}" contained illegal characters.' '\nError message: {}'.format(name_of_zip, str(e))) from e logger.info('Zipped files {} into the file {}'.format(str(files), name_of_zip), also_console=True)
5,333,992
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload an entry.""" component: EntityComponent = hass.data[DOMAIN] return await component.async_unload_entry(entry)
5,333,993
def logging_init(log_path, log_filename, html=False): """ Initializes the LOG object for global logging, which is a rotating log-handler: creates max of LOG_BACK_COUNT log files; older ones are deleted, with each log file of size LOG_MAX_BYTES. Can be configured to log HTML or text, defaults to text. """ global LOG log_format = "[%(asctime)s %(threadName)s, %(levelname)s] %(message)s" file_name = "{0}/{1}.log".format(log_path, log_filename) if html: log_format = "<p>" + log_format + "</p>" file_name = "{0}/{1}.html".format(log_path, log_filename) log_formatter = logging.Formatter(log_format) file_logging_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=LOG_MAX_BYTES, backupCount=LOG_BACKUP_COUNT) file_logging_handler.setFormatter(log_formatter) LOG.addHandler(file_logging_handler)
5,333,994
def get_all_score_dicts(ref_punc_folder_name, res_punc_folder_name): """ Return a list of score dictionaries for a set of two folders. This function assumes the naming of the files in the folders are correct according to the diagram and hence if sorted match files. Both folders should be in the directory this script is also in. :param ref_punc_folder_name: Filename of the reference punctuation folder :param res_punc_folder_name: Filename of the restored punctuation folder :return: A list of score dictionaries """ filenames_ref_punc = os.listdir(ref_punc_folder_name) filenames_res_punc = os.listdir(res_punc_folder_name) # print(f"Filenames Reference Punc: {filenames_ref_punc}") # print(f"Filenames Restored Punc: {filenames_res_punc}") # print(filenames_ref_punc) print(f"Number of reference punctuation files: {len(filenames_ref_punc)}") print(f"Number of restored punctuation files: {len(filenames_res_punc)}") counter = 0 score_dicts_list = [] start_timer = time.time() for i in tqdm(range(0, 461)): # 301, 461 # print(i) fileName = str(i) ref_punc_filename = ref_punc_folder_name + "\\" + fileName + "_reference_punc.txt" res_punc_filename = res_punc_folder_name + "\\" + "pr_" + fileName + "_asr_output.txt" if os.path.isfile(ref_punc_filename) == os.path.isfile(res_punc_filename) and os.path.isfile( ref_punc_filename): counter += 1 score_dicts_list.append(ref_and_res_to_scores(refPuncFileName=ref_punc_filename, resPuncFileName=res_punc_filename)) # print("--- %s seconds ---" % (time.time() - start_time)) print(f"--- Processed {counter} files in {time.time() - start_timer} seconds ---") # score_dicts_list = [] # assert len(filenames_ref_punc) == len(filenames_res_punc), "Amount of restored punctuation and reference punctuation files should be equal to calculate scores!" # for i in range(len(filenames_ref_punc)): # # print(f"ref file 0:3 {filenames_ref_punc[i][0:3]}") # # print(f"res file 0:3 {filenames_res_punc[i][0:3]}") # ref_path = ref_punc_folder_name + "\\" + filenames_ref_punc[i] # res_path = res_punc_folder_name + "\\" + filenames_res_punc[i] # score_dicts_list.append(ref_and_res_to_scores(refPuncFileName=ref_path, # resPuncFileName=res_path)) return score_dicts_list
5,333,995
def run_random_climate(gdir, nyears=1000, y0=None, halfsize=15, bias=None, seed=None, temperature_bias=None, climate_filename='climate_monthly', climate_input_filesuffix='', output_filesuffix='', init_area_m2=None, unique_samples=False): """Runs the random mass balance model for a given number of years. This initializes a :py:class:`oggm.core.vascaling.RandomVASMassBalance`, and runs and stores a :py:class:`oggm.core.vascaling.VAScalingModel` with the given mass balance model. Parameters ---------- gdir : :py:class:`oggm.GlacierDirectory` the glacier directory to process nyears : int, optional length of the simulation, default = 1000 y0 : int, optional central year of the random climate period. The default is to be centred on t*. Default = None halfsize : int, optional the half-size of the time window (window size = 2 * halfsize + 1), default = 15 bias : float, optional bias of the mb model. Default is to use the calibrated one, which is often a better idea. For t* experiments it can be useful to set it to zero. Default = None seed : int seed for the random generator. If you ignore this, the runs will be different each time. Setting it to a fixed seed accross glaciers can be usefull if you want to have the same climate years for all of them temperature_bias : float, optional add a bias to the temperature timeseries, default = None climate_filename : str, optional name of the climate file, e.g. 'climate_monthly' (default) or 'gcm_data' climate_input_filesuffix: str, optional filesuffix for the input climate file output_filesuffix : str, optional this add a suffix to the output file (useful to avoid overwriting previous experiments) init_area_m2: float, optional glacier area with which the model is initialized, default is RGI value unique_samples: bool, optional if true, chosen random mass-balance years will only be available once per random climate period-length if false, every model year will be chosen from the random climate period with the same probability (default) Returns ------- :py:class:`oggm.core.vascaling.VAScalingModel` """ # instance mass balance model mb_mod = RandomVASMassBalance(gdir, y0=y0, halfsize=halfsize, bias=bias, seed=seed, filename=climate_filename, input_filesuffix=climate_input_filesuffix, unique_samples=unique_samples) if temperature_bias is not None: # add given temperature bias to mass balance model mb_mod.temp_bias = temperature_bias # where to store the model output diag_path = gdir.get_filepath('model_diagnostics', filesuffix='vas', delete=True) # instance the model min_hgt, max_hgt = get_min_max_elevation(gdir) if init_area_m2 is None: init_area_m2 = gdir.rgi_area_m2 model = VAScalingModel(year_0=0, area_m2_0=init_area_m2, min_hgt=min_hgt, max_hgt=max_hgt, mb_model=mb_mod) # specify path where to store model diagnostics diag_path = gdir.get_filepath('model_diagnostics', filesuffix=output_filesuffix, delete=True) # run model model.run_until_and_store(year_end=nyears, diag_path=diag_path) return model
5,333,996
def export_action(modeladmin, request, queryset): """Admin action to launch the export process.""" for import_model in queryset: import_model.export_data(async_process=True) modeladmin.message_user(request, _("Launched export tasks..."))
5,333,997
def sync_get_ami_arch_from_instance_type(instance_type: str, region_name: Optional[str]=None) -> str: """For a given EC2 instance type, returns the AMI architecture associated with the instance type Args: instance_type (str): An EC2 instance type; e.g., "t2.micro" region_name (Optional[str], optional): AWS region to use for query, or None to use the default region. Defaults to None. Returns: str: The AMI architecture associated with instance_type """ processor_arches = sync_get_processor_arches_from_instance_type(instance_type, region_name=region_name) result = sync_get_ami_arch_from_processor_arches(processor_arches) return result
5,333,998
def atan2(y, x): """Returns angle of a 2D coordinate in the XY plane""" return math.atan2(y, x)
5,333,999