content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def reference(t, viscosity, density, concentration_init, concentration_init_dev, solutes, permittivity, enable_NS, enable_EC, **namespace): """ This contains the analytical reference for convergence analysis. """ mu = viscosity[0] rho_0 = density[0] K = solutes[0][2] # Same diffusivity for both species c0 = concentration_init chi = concentration_init_dev veps = permittivity[0] code_strings = reference_code(solutes) expr = dict() for key, code_string in code_strings.items(): if bool((enable_NS and key in ["u", "p"]) or (enable_EC and key in ["V"] + [solute[0] for solute in solutes])): expr[key] = df.Expression(code_string, t=t, mu=mu, rho_0=rho_0, chi=chi, c0=c0, veps=veps, K=K, degree=2) return expr
388e74271c17029e4477407d0048e931e756f48b
3,629,600
def std_err_mean(data): """ calculates deviation from mean. data: all spikes """ return data.std(0).mean()/np.sqrt(data.shape[0])
53f6aecf543617f5d85ddca88c193c41605ab668
3,629,601
import copy from unittest.mock import patch def mock_history(initial): """ mock the commands that interact with the .history file, to fake history management """ releases = copy.copy(initial) project = 'project' def list_releases(): return releases def _append_to_history(release): releases.append(release) def _replace_history(release_list): releases[:] = release_list with nested( patch('gonzo.tasks.release.list_releases', list_releases), patch('gonzo.tasks.release._append_to_history', _append_to_history), patch('gonzo.tasks.release._replace_history', _replace_history), patch('gonzo.tasks.release.get_project'), ) as (_, _, _, get_project): get_project.return_value = project with disable_external_commands(): yield releases
ae65684fe3c1fda6bbecc73a84bb767aa5d92713
3,629,602
def _EpoOff(opts, node_list, inst_map): """Does the actual power off. @param opts: The command line options selected by the user @param node_list: The list of nodes to operate on (all need to support OOB) @param inst_map: A dict of inst -> nodes mapping @return: The desired exit status """ if not _InstanceStart(opts, inst_map.keys(), False, no_remember=True): ToStderr("Please investigate and stop instances manually before continuing") return constants.EXIT_FAILURE if not node_list: return constants.EXIT_SUCCESS if _OobPower(opts, node_list, False): return constants.EXIT_SUCCESS else: return constants.EXIT_FAILURE
9e23d1979813fadb54b168b2f57a546275b3cf8b
3,629,603
def available_numbers(request): """Uses the Twilio API to generate a list of available phone numbers""" form = AreaCodeForm(request.POST) if form.is_valid(): # We received a valid area code - query the Twilio API area_code = form.cleaned_data['area_code'] available_numbers = search_phone_numbers(area_code=area_code) # Check if there are no numbers available in this area code if not available_numbers: messages.error( request, 'There are no Twilio numbers available for area code {0}. Search for numbers in a different area code.'.format(area_code)) return redirect('call_tracking:home') context = {} context['available_numbers'] = available_numbers return render(request, 'call_tracking/available_numbers.html', context) else: # Our area code was invalid - flash a message and redirect back home bad_area_code = form.data['area_code'] messages.error(request, '{0} is not a valid area code. Please search again.' .format(bad_area_code)) return redirect('call_tracking:home')
b00cabe2a2c276f09d5185b46f20d0640e66a672
3,629,604
def get_julia_version(path): """Return version of the Julia installed in *path*""" return exec_shell_cmd('julia.exe -v', path).splitlines()[0].split(" ")[-1]
8421a6de683b7c364e835c6070c8de9f17a4a3c0
3,629,605
def get_pvc_file(flags, spa, pvc_name): """ Get pvc config """ outfile = os.path.join(os.getenv('TMPDIR', '/tmp'), pvc_name + '.yaml') args = [ 'spa', 'get', '-A', flags.account_arg, '--id', spa['meta']['id'], '-K', 'k8sPvcYaml', '-O', outfile ] print 'Getting PVC file:', args nvctl(*args, json=False, no_login=flags.no_login) file_object = open(outfile, "r") pvc_yaml = yaml.load(file_object.read()) file_object.close() pvc_yaml['metadata']['namespace'] = flags.namespace pvc_yaml['metadata']['name'] = pvc_name pvc_yaml['metadata'].update({'labels': {'app': flags.app_label}}) file_object = open(outfile, "w") out_yaml = yaml.dump(pvc_yaml) file_object.write(out_yaml) file_object.close() return outfile
611f3f9878db66b30d4119364c754a3c11af41c6
3,629,606
def video_feed(): """Video streaming route. Put this in the src attribute of an img tag.""" return Response(attendence(), mimetype='multipart/x-mixed-replace; boundary=frame')
0ba1d98a4287406f364900eda2b12faeac491237
3,629,607
def current(ticker): """ Return the current price for a stock """ return rh.get_latest_price(ticker)[0]
abfa290c79267981571ae25ee7bd1827b4182141
3,629,608
import os def _file(*args): """ Wrapper around os.path.join and os.makedirs.""" filename = os.path.join(*args) _makedirs_for_file(filename) return filename
c8cb9e7a05b09ada4d0d5ac682a4fbdb394a2475
3,629,609
import os def get_root_data_dir() -> str: """ Return the directory where the mephisto data is expected to go """ global loaded_data_dir if loaded_data_dir is None: default_data_dir = os.path.join(get_root_dir(), "data") actual_data_dir = get_config_arg(CORE_SECTION, DATA_STORAGE_KEY) if actual_data_dir is None: data_dir_location = input( "Please enter the full path to a location to store Mephisto run data. By default this " f"would be at '{default_data_dir}'. This dir should NOT be on a distributed file " "store. Press enter to use the default: " ).strip() if len(data_dir_location) == 0: data_dir_location = default_data_dir data_dir_location = os.path.expanduser(data_dir_location) os.makedirs(data_dir_location, exist_ok=True) # Check to see if there is existing data to possibly move to the data dir: database_loc = os.path.join(default_data_dir, "database.db") if os.path.exists(database_loc) and data_dir_location != default_data_dir: should_migrate = ( input( "We have found an existing database in the default data directory, do you want to " f"copy any existing data from the default location to {data_dir_location}? (y)es/no: " ) .lower() .strip() ) if len(should_migrate) == 0 or should_migrate[0] == "y": copy_tree(default_data_dir, data_dir_location) print( "Mephisto data successfully copied, once you've confirmed the migration worked, " "feel free to remove all of the contents in " f"{default_data_dir} EXCEPT for `README.md`." ) add_config_arg(CORE_SECTION, DATA_STORAGE_KEY, data_dir_location) loaded_data_dir = get_config_arg(CORE_SECTION, DATA_STORAGE_KEY) if not os.path.isdir(loaded_data_dir): raise NotADirectoryError( f"The provided Mephisto data directory {loaded_data_dir} as set in " f"{DEFAULT_CONFIG_FILE} is not a directory! Please locate your Mephisto " f"data directory and update {DEFAULT_CONFIG_FILE} to point to it." ) return loaded_data_dir
ef0d97940c7086918e34e8d63ee3e6e36e8de439
3,629,610
from typing import Optional from typing import Tuple def fetch_indicators_command( client, initial_interval, limit, last_run_ctx, fetch_full_feed: bool = False, filter_args: Optional[dict] = None, ) -> Tuple[list, dict]: """ Fetch indicators from TAXII 2 server :param client: Taxii2FeedClient :param initial_interval: initial interval in parse_date_range format :param limit: upper limit of indicators to fetch :param last_run_ctx: last run dict with {collection_id: last_run_time string} :param fetch_full_feed: when set to true, will ignore last run, and try to fetch the entire feed :param filter_args: filter args requested by the user :return: indicators in cortex TIM format """ if initial_interval: initial_interval, _ = parse_date_range( initial_interval, date_format=TAXII_TIME_FORMAT ) if filter_args is None: filter_args = {} last_fetch_time = ( last_run_ctx.get(client.collection_to_fetch.id) if client.collection_to_fetch else None ) filter_args["added_after"] = get_added_after( fetch_full_feed, initial_interval, last_fetch_time, filter_args ) if client.collection_to_fetch is None: # fetch all collections if client.collections is None: raise DemistoException(ERR_NO_COLL) indicators: list = [] for collection in client.collections: client.collection_to_fetch = collection filter_args["added_after"] = get_added_after( fetch_full_feed, initial_interval, last_run_ctx.get(collection.id) ) fetched_iocs = client.build_iterator(limit, **filter_args) indicators.extend(fetched_iocs) if limit >= 0: limit -= len(fetched_iocs) if limit <= 0: break last_run_ctx[collection.id] = client.last_fetched_indicator__modified else: # fetch from a single collection indicators = client.build_iterator(limit, **filter_args) last_run_ctx[client.collection_to_fetch.id] = ( client.last_fetched_indicator__modified if client.last_fetched_indicator__modified else filter_args.get("added_after") ) return indicators, last_run_ctx
e527391a0055c7604d4423e0565cf04b017e8038
3,629,611
def aggregate(loss, weights=None, mode='mean'): """Aggregates an element- or item-wise loss to a scalar loss. Parameters ---------- loss : Theano tensor The loss expression to aggregate. weights : Theano tensor, optional The weights for each element or item, must be broadcastable to the same shape as `loss` if given. If omitted, all elements will be weighted the same. mode : {'mean', 'sum', 'normalized_sum'} Whether to aggregate by averaging, by summing or by summing and dividing by the total weights (which requires `weights` to be given). Returns ------- Theano scalar A scalar loss expression suitable for differentiation. Notes ----- By supplying binary weights (i.e., only using values 0 and 1), this function can also be used for masking out particular entries in the loss expression. Note that masked entries still need to be valid values, not-a-numbers (NaNs) will propagate through. When applied to batch-wise loss expressions, setting `mode` to ``'normalized_sum'`` ensures that the loss per batch is of a similar magnitude, independent of associated weights. However, it means that a given datapoint contributes more to the loss when it shares a batch with low-weighted or masked datapoints than with high-weighted ones. """ if weights is not None: loss = loss * weights if mode == 'mean': return loss.mean() elif mode == 'sum': return loss.sum() elif mode == 'normalized_sum': if weights is None: raise ValueError("require weights for mode='normalized_sum'") return loss.sum() / weights.sum() else: raise ValueError("mode must be 'mean', 'sum' or 'normalized_sum', " "got %r" % mode)
6d888d1854cfa78e13fcd5eba412e224164386d7
3,629,612
def mirror_ud(image): """Mirrors an image between up and down :param image: An input image to convert :type image: array_like :return: Mirrored image. Same dimensions as input. :rtype: ndarray """ return np.flipud(image)
8a8113fcc4d7335fcaa7bd5a259b19e856408814
3,629,613
import torch def ifftn(input, s=None, dim=-1, norm='backward', real=None): """N-dimensional discrete inverse Fourier transform. Parameters ---------- input : tensor Input signal. If torch <= 1.5, the last dimension must be of length 2 and contain the real and imaginary parts of the signal, unless `real is True`. s : sequence[int], optional Signal size in the transformed dimensions. If given, each dimension dim[i] will either be zero-padded or trimmed to the length s[i] before computing the IFFT. If a length -1 is specified, no padding is done in that dimension. Default: s = [input.size(d) for d in input.dim()] dim : sequence[int], optional Dimensions to be transformed. Default: all dimensions, or the last len(s) dimensions if s is given. If torch <= 1.5, the dimension encoding the real and imaginary parts are not taken into account in dimension indexing. norm : {'forward', 'backward', 'ortho'}, default='backward' forward : no normalization backward : normalize by 1/n ortho : normalize by 1/sqrt(n) (making the IFFT orthonormal) real : bool, default=False Only used if torch <= 1.5. If True, the input signal has no imaginary component and the dimension encoding the real and imaginary parts does not exist. Returns ------- output : tensor Inverse Fourier transform of the input signal. Complex tensor. If torch <= 1.5, the last dimension is of length 2 and contain the real and imaginary parts of the signal. """ if _torch_has_fft_module: return fft_mod.ifftn(input, s, dim, norm=norm) # Output shape oldcomplex = not (real or _torch_has_complex) if dim: ndim = len(dim) elif s: ndim = len(s) else: ndim = input.dim() - oldcomplex s = s or [-1] * ndim dim = dim or list(range(input.dim()-oldcomplex-len(s), input.dim()-oldcomplex)) dim = [input.dim()-oldcomplex+d if d < 0 else d for d in dim] ndim = len(dim) input = utils.movedim(input, dim, -1-oldcomplex) # Make real and move processed dimension to the right if _torch_has_complex: if input.is_complex(): input = torch.view_as_real(input) real = False else: real = True # Crop/pad newdim = list(range(-ndim-(not real), -(not real))) for j, (s1, d1) in enumerate(zip(s, newdim)): if s1 is None or s1 < 0: s[j] = input.shape[d1] else: if input.shape[d1] > s1: input = utils.slice_tensor(input, slice(s1), d1) elif input.shape[d1] < s1: pad = [0] * (d1-1) + [s1 - input.shape[d1]] input = utils.pad(input, pad, side='post') # do fft if real: fft_fn = lambda x, d: torch.rfft(x, d, normalized=(norm == 'ortho'), onesided=False) output = fft_fn(input, min(ndim, 3)) output[..., -1].neg_() # conjugate if norm == 'backward': output /= py.prod(s[-3:]) else: fft_fn = lambda x, d: torch.ifft(x, d, normalized=(norm == 'ortho')) output = fft_fn(input, min(ndim, 3)) if norm == 'forward': output *= py.prod(s[-3:]) # remaining dimensions fft_fn = lambda x, d: torch.ifft(x, d, normalized=(norm == 'ortho')) for j in range(max(0, ndim-3)): output = utils.movedim(output, -j-ndim-1, -2) output = fft_fn(output, 1) output = utils.movedim(output, -2, -j-ndim-1) if norm == 'forward' and ndim > 3: output *= py.prod(s[:-3]) # Make complex and move back dimension to its original position newdim = list(range(-ndim-1, -1)) output = utils.movedim(output, newdim, dim) if _torch_has_complex: output = output.view_as_complex() return output
0fa3c678a6747d25634f5a68e78fa3d4597ac587
3,629,614
def list_del(): """ del """ mylist = ['dog', 'lion', 'snake', 'elephant', 'cow', 'donkey', 'goat', 'duck'] del mylist[2] return mylist
716fbb915bc477f4232e66b6b7fcb720bb0520f4
3,629,615
def _get_group_definition(group): """Get an instance of the group definition for the specified item. This definition can be used to clone or download the group. Keyword arguments: group - The arcgis.GIS.Group to get the definition for.""" return _GroupDefinition(dict(group), thumbnail=None, portal_group=group)
7a00dc3117ec725bbd9eef2dbafe39e1d39d9de7
3,629,616
import os import urllib def fetch_bike_dataset(years, data_dir="data"): """ Dowload bike dataset for a given year and return the list of files. """ base_url = "https://s3.amazonaws.com/capitalbikeshare-data/" files = [] for year in years: filename = str(year) + "-capitalbikeshare-tripdata.zip" filepath = os.path.join(data_dir, filename) if not os.path.isfile(filepath): urllib.request.urlretrieve(base_url+filename, filepath, reporthook=show_progress) with ZipFile(filepath) as myzip: files += [os.path.join(data_dir, name) for name in myzip.namelist()] myzip.extractall(data_dir) print("Files extracted: "+ str(files)) return files
55733e7fa65299c3f4f23125dde80c83fb7caae2
3,629,617
def assert_rank(x, rank, data=None, summarize=None, message=None, name=None): """Assert `x` has rank equal to `rank`. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_rank(x, 2)]): output = tf.reduce_sum(x) ``` Args: x: Numeric `Tensor`. rank: Scalar integer `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and the shape of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_rank". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank. If static checks determine `x` has correct rank, a `no_op` is returned. Raises: ValueError: If static checks determine `x` has wrong rank. """ with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])): x = ops.convert_to_tensor(x, name='x') rank = ops.convert_to_tensor(rank, name='rank') message = message or '' static_condition = lambda actual_rank, given_rank: actual_rank == given_rank dynamic_condition = math_ops.equal if context.executing_eagerly(): name = '' else: name = x.name if data is None: data = [ message, 'Tensor %s must have rank' % name, rank, 'Received shape: ', array_ops.shape(x) ] try: assert_op = _assert_rank_condition(x, rank, static_condition, dynamic_condition, data, summarize) except ValueError as e: if e.args[0] == 'Static rank condition failed': raise ValueError( '%s. Tensor %s must have rank %d. Received rank %d, shape %s' % (message, name, e.args[2], e.args[1], x.get_shape())) else: raise return assert_op
447f95b909b21aa5c918bae2eccd327fb3bdb463
3,629,618
def get_lo_hi_from_CI(s, exclude=None): """ Parse the confidence interval from CI. >>> get_lo_hi_from_CI("20-20/40-60") (40, 60) """ a, b = s.split("|") ai, aj = a.split("-") bi, bj = b.split("-") los = [int(ai), int(bi)] his = [int(aj), int(bj)] if exclude and exclude in los: los.remove(exclude) if exclude and exclude in his: his.remove(exclude) return max(los), max(his)
69c0fb14afd18444465cb7b0f8b23990d044a2b9
3,629,619
import transformers def get_pretrained_model(path='saved_models/checkpoint-1480'): """Gets a BERT classification model from a given pretrained model checkpoint (supplied with a filepath). Args: path (str): a valid filepath that contains the checkpoint of a DistilBert model Returns: transformers.DistilBertForSequenceClassification: a DistilBert model """ model = transformers.AutoModelForSequenceClassification.from_pretrained(path) return model
49a6a8603a7dc221f4ec9b85ef24ff2c422d0b51
3,629,620
from typing import Type from typing import Iterable from typing import Optional def get_docstring_summary( cls: Type, *, fallback_to_ancestors: bool = False, ignored_ancestors: Iterable[Type] = (object,) ) -> Optional[str]: """Get the summary line(s) of docstring for a class. If the summary is one more than one line, this will flatten them into a single line. """ # This will fix indentation and strip unnecessary whitespace. all_docstring = get_docstring( cls, fallback_to_ancestors=fallback_to_ancestors, ignored_ancestors=ignored_ancestors ) if all_docstring is None: return None lines = all_docstring.splitlines() first_blank_line_index = next( (i for i, line in enumerate(lines) if line.strip() == ""), len(lines) ) return " ".join(lines[:first_blank_line_index])
0fbd076962dd08e4b537d00dd39e473a94331a5c
3,629,621
import logging def get_input_evaluation_tensors(reader, data_pattern, batch_size=1024, num_readers=1): """Creates the section of the graph which reads the evaluation data. Args: reader: A class which parses the training data. data_pattern: A 'glob' style path to the data files. batch_size: How many examples to process at a time. num_readers: How many I/O threads to use. Returns: A tuple containing the features tensor, labels tensor, and optionally a tensor containing the number of frames per video. The exact dimensions depend on the reader being used. Raises: IOError: If no files matching the given pattern were found. """ logging.info("Using batch size of " + str(batch_size) + " for evaluation.") with tf.name_scope("eval_input"): files = gfile.Glob(data_pattern) if not files: raise IOError("Unable to find the evaluation files.") logging.info("number of evaluation files: " + str(len(files))) filename_queue = tf.train.string_input_producer( files, shuffle=False, num_epochs=1) eval_data = [ reader.prepare_reader(filename_queue) for _ in range(num_readers) ] return tf.train.batch_join( eval_data, batch_size=batch_size, capacity=4 * batch_size, allow_smaller_final_batch=True, enqueue_many=True)
6f677a368ab55b8ecc4816f104484444ed8bfa3c
3,629,622
def load_extensions(app): """ To load navitaire extension :param app: :param name: :return: """ if 'stargate' not in app.extensions: raise GeneralError({ "code": "General Error", "description": "{name} not exist".format(name='stargate') }, 'info', http_code.HTTP_428_PRECONDITION_REQUIRED) return app.extensions['stargate']
6239bc9cafdc1a948634b1b7329cfd6f1bbc1a4f
3,629,623
def blur(img): """ This function will blur the original image --------------------------------------------- :param img: SimpleImage, the original image :return: SimpleImage, the blurred image """ new_img = SimpleImage.blank(img.width, img.height) for x in range(img.width): for y in range(img.height): img_p = img.get_pixel(x, y) new_img_p = new_img.get_pixel(x, y) if x == 0 and y == 0: # The pixel at the upper left corner img_p1 = img.get_pixel(x, y + 1) img_p2 = img.get_pixel(x + 1, y) img_p3 = img.get_pixel(x + 1, y + 1) new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red) // 4 new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green) // 4 new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue) // 4 elif x == 0 and y == new_img.height - 1: # The pixel at the bottom left corner img_p1 = img.get_pixel(x, y - 1) img_p2 = img.get_pixel(x + 1, y) img_p3 = img.get_pixel(x + 1, y - 1) new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red) // 4 new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green) // 4 new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue) // 4 elif x == new_img.width - 1 and y == 0: # The pixel at the upper right corner img_p1 = img.get_pixel(x, y + 1) img_p2 = img.get_pixel(x - 1, y) img_p3 = img.get_pixel(x - 1, y + 1) new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red) // 4 new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green) // 4 new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue) // 4 elif x == new_img.width - 1 and y == new_img.height - 1: # The pixel at the bottom right corner img_p1 = img.get_pixel(x, y - 1) img_p2 = img.get_pixel(x - 1, y) img_p3 = img.get_pixel(x - 1, y - 1) new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red) // 4 new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green) // 4 new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue) // 4 elif x == 0 and y < new_img.height - 1: # Pixels on the left edge img_p1 = img.get_pixel(x, y - 1) img_p2 = img.get_pixel(x, y + 1) img_p3 = img.get_pixel(x + 1, y) img_p4 = img.get_pixel(x + 1, y - 1) img_p5 = img.get_pixel(x + 1, y + 1) new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red + img_p4.red + img_p5.red) // 6 new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green + img_p4.green + img_p5.green) // 6 new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue + img_p4.blue + img_p5.blue) // 6 elif x == new_img.width - 1 and y < new_img.height - 1: # Pixels on the right edge img_p1 = img.get_pixel(x, y - 1) img_p2 = img.get_pixel(x, y + 1) img_p3 = img.get_pixel(x - 1, y) img_p4 = img.get_pixel(x - 1, y - 1) img_p5 = img.get_pixel(x - 1, y + 1) new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red + img_p4.red + img_p5.red) // 6 new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green + img_p4.green + img_p5.green) // 6 new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue + img_p4.blue + img_p5.blue) // 6 elif x < new_img.width - 1 and y == 0: # Pixels on the upper edge img_p1 = img.get_pixel(x, y + 1) img_p2 = img.get_pixel(x - 1, y) img_p3 = img.get_pixel(x - 1, y + 1) img_p4 = img.get_pixel(x + 1, y) img_p5 = img.get_pixel(x + 1, y + 1) new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red + img_p4.red + img_p5.red) // 6 new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green + img_p4.green + img_p5.green) // 6 new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue + img_p4.blue + img_p5.blue) // 6 elif x < new_img.width - 1 and y == new_img.height - 1: # Pixels on the bottom edge img_p1 = img.get_pixel(x, y - 1) img_p2 = img.get_pixel(x - 1, y) img_p3 = img.get_pixel(x - 1, y - 1) img_p4 = img.get_pixel(x + 1, y) img_p5 = img.get_pixel(x + 1, y - 1) new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red + img_p4.red + img_p5.red) // 6 new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green + img_p4.green + img_p5.green) // 6 new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue + img_p4.blue + img_p5.blue) // 6 else: # Pixels in the middle img_p1 = img.get_pixel(x, y - 1) img_p2 = img.get_pixel(x, y + 1) img_p3 = img.get_pixel(x - 1, y) img_p4 = img.get_pixel(x - 1, y - 1) img_p5 = img.get_pixel(x - 1, y + 1) img_p6 = img.get_pixel(x + 1, y) img_p7 = img.get_pixel(x + 1, y - 1) img_p8 = img.get_pixel(x + 1, y + 1) new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red + img_p4.red + img_p5.red + img_p6.red + img_p7.red + img_p8.red) // 9 new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green + img_p4.green + img_p5.green + img_p6.green + img_p7.green + img_p8.green) // 9 new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue + img_p4.blue + img_p5.blue + img_p6.blue + img_p7.blue + img_p8.blue) // 9 return new_img
b41784396ff4f4402a7a8604796a32089f328b30
3,629,624
import requests import logging def check_virustotal(domain, api_key, threshold): """ Checks VirusTotal to see if the domain is malicious """ #resource = "{0}domain".format("http://www.", domain) url = 'https://www.virustotal.com/vtapi/v2/url/report' params = {'resource': domain, 'apikey': api_key, 'allinfo': 1} try: response = requests.get(url, params=params) if response.status_code == requests.codes.ok: response_json = response.json() logging.info("\tSubmitted domain {0} to VirusTotal for verification, response was {1}".format(domain, response_json.get('verbose_msg', ''))) if response_json['response_code'] == 0: logging.info("\tVT: Has not seen {0} before, assuming domain is benign".format(domain)) return True elif response_json['response_code'] == -1: logging.debug("\tVT: Reporting that domain {0} is malformed, assuming malicious".format(domain)) return False elif response_json['response_code'] == 1: total = int(response_json.get('total', 0)) positive = int(response_json.get('positives', 0)) additionalinfo = response_json.get('additional_info', '') if additionalinfo: logging.info("\tVT: Category is: {0}".format(additionalinfo.get('categories', ''))) logging.info("\tVT: Positive scans: {0} out of {1} total scans".format(positive, total)) if positive > int(threshold): logging.info("\tVT: Threshold exceeded, skipping domain") return False else: logging.info("\tVT: Under threshold, domain is benign") return True except: logging.debug("Exception caught from VirusTotal when receiving report") return False
1e7330a41c95eec7372aa001093c872ee97b633e
3,629,625
def describe_db_instances( name=None, filters=None, jmespath="DBInstances", region=None, key=None, keyid=None, profile=None, ): """ Return a detailed listing of some, or all, DB Instances visible in the current scope. Arbitrary subelements or subsections of the returned dataset can be selected by passing in a valid JMSEPath filter as well. CLI Example: .. code-block:: bash salt myminion boto_rds.describe_db_instances jmespath='DBInstances[*].DBInstanceIdentifier' """ conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) pag = conn.get_paginator("describe_db_instances") args = {} args.update({"DBInstanceIdentifier": name}) if name else None args.update({"Filters": filters}) if filters else None pit = pag.paginate(**args) pit = pit.search(jmespath) if jmespath else pit try: return [p for p in pit] except ClientError as e: code = getattr(e, "response", {}).get("Error", {}).get("Code") if code != "DBInstanceNotFound": log.error(__utils__["boto3.get_error"](e)) return []
6393a969fd421fe966b593213acf8edc47520665
3,629,626
import imp import os def get_parent_until(path): """ Given a file path, determine the full module path. e.g. '/usr/lib/python2.7/dist-packages/numpy/core/__init__.pyc' yields 'numpy.core' """ dirname = osp.dirname(path) try: mod = osp.basename(path) mod = osp.splitext(mod)[0] imp.find_module(mod, [dirname]) except ImportError: return items = [mod] while 1: items.append(osp.basename(dirname)) try: dirname = osp.dirname(dirname) imp.find_module('__init__', [dirname + os.sep]) except ImportError: break return '.'.join(reversed(items))
abf283df8ead744b4f1fc7df93fb721064e9268d
3,629,627
import os def DSHAPE(tmpdir_factory): """Run DSHAPE example.""" input_path = ".//tests//inputs//DSHAPE" output_dir = tmpdir_factory.mktemp("result") desc_h5_path = output_dir.join("DSHAPE_out.h5") desc_nc_path = output_dir.join("DSHAPE_out.nc") vmec_nc_path = ".//tests//inputs//wout_DSHAPE.nc" booz_nc_path = output_dir.join("DSHAPE_bx.nc") cwd = os.path.dirname(__file__) exec_dir = os.path.join(cwd, "..") input_filename = os.path.join(exec_dir, input_path) print("Running DSHAPE test.") print("exec_dir=", exec_dir) print("cwd=", cwd) args = ["-o", str(desc_h5_path), input_filename, "-vv"] main(args) DSHAPE_out = { "input_path": input_path, "desc_h5_path": desc_h5_path, "desc_nc_path": desc_nc_path, "vmec_nc_path": vmec_nc_path, "booz_nc_path": booz_nc_path, } return DSHAPE_out
f5d07a9f4f97ccb90281f6c4c9d8b205b051d11e
3,629,628
def get_control_changes(midi, use_drums=True): """Retrieves a list of control change events from a given MIDI song. Arguments: midi (PrettyMIDI): The MIDI song. """ midi_control_changes = [] for num_instrument, midi_instrument in enumerate(midi.instruments): if not midi_instrument.is_drum or use_drums: for midi_control_change in midi_instrument.control_changes: midi_control_changes.append(( midi_instrument.program, num_instrument, midi_instrument.is_drum, midi_control_change )) return midi_control_changes
c3c264c11f9ef38aa79c24cd795e35145139beb1
3,629,629
def is_workinprogress(change): """Return True if the patchset is WIP :param dict change: De-serialized dict of a gerrit change :return: True if one of the votes on the review sets it to WIP. """ # This indicates WIP for older Gerrit versions if change['status'] != 'NEW': return True # Gerrit 2.8 WIP last_patch = change['patchSets'][-1] try: approvals = last_patch['approvals'] except KeyError: # Means no one has voted on the latest patch set yet return False for a in approvals: if a['type'] == 'Workflow' and int(a['value']) < 0: return True return False
ac2f5ba1ab8d5fd432ef7b13c5b033e0c3710fd4
3,629,630
def get_bot() -> NoneBot: """ 获取全局 NoneBot 对象。可用于在计划任务的回调中获取当前 NoneBot 对象。 返回: NoneBot: 全局 NoneBot 对象 异常: ValueError: 全局 NoneBot 对象尚未初始化 用法: ```python bot = nonebot.get_bot() ``` """ if _bot is None: raise ValueError('NoneBot instance has not been initialized') return _bot
175c53c0b3bc73d303e1b766df94fcfb074f3de0
3,629,631
def nbr(nir_agg: xr.DataArray, swir2_agg: xr.DataArray, name='nbr'): """ Computes Normalized Burn Ratio. Used to identify burned areas and provide a measure of burn severity. Parameters ---------- nir_agg : xr.DataArray 2D array of near-infrared band. swir_agg : xr.DataArray 2D array of shortwave infrared band. (Landsat 4-7: Band 6) (Landsat 8: Band 7) name : str, default='nbr' Name of output DataArray. Returns ------- nbr_agg : xr.DataArray of the same type as inputs 2D array of nbr values. All other input attributes are preserved. References ---------- - USGS: https://www.usgs.gov/land-resources/nli/landsat/landsat-normalized-burn-ratio # noqa Examples -------- .. plot:: :include-source: import matplotlib.pyplot as plt from xrspatial.multispectral import nbr from xrspatial.datasets import get_data # Open Example Data data = get_data('sentinel-2') nir = data['NIR'] swir2 = data['SWIR2'] # Generate NBR Aggregate Array nbr_agg = nbr(nir_agg = nir, swir2_agg = swir2) # Plot NIR Band nir.plot(cmap = 'Greys', aspect = 2, size = 4) plt.title("NIR Band") plt.ylabel("latitude") plt.xlabel("longitude") # Plot SWIR2 Band swir2.plot(cmap = 'Greys', aspect = 2, size = 4) plt.title("SWIR2 Band") plt.ylabel("latitude") plt.xlabel("longitude") # Plot NBR nbr_agg.plot(cmap = 'Greys', aspect = 2, size = 4) plt.title("NBR") plt.ylabel("latitude") plt.xlabel("longitude") .. sourcecode:: python print(nbr_agg[100:103, 100: 102]) <xarray.DataArray 'nbr' (y: 3, x: 2)> array([[-0.10251108, -0.1321408 ], [-0.09691096, -0.12659354], [-0.10823033, -0.14486392]]) Coordinates: * x (x) float64 6.01e+05 6.01e+05 * y (y) float64 4.699e+06 4.699e+06 4.699e+06 band int32 ... Attributes: (12/13) transform: [ 1.00000e+01 0.00000e+00 6.00000e+05 0.0000... # noqa crs: +init=epsg:32719 res: [10. 10.] is_tiled: 1 nodatavals: nan scales: 1.0 ... ... instrument: Sentinel-2 Band: 07 Name: NIR Bandwidth (µm): 115 Nominal Wavelength (µm): 0.842 Resolution (m): 10 """ validate_arrays(nir_agg, swir2_agg) mapper = ArrayTypeFunctionMapping( numpy_func=_normalized_ratio_cpu, dask_func=_run_normalized_ratio_dask, cupy_func=_run_normalized_ratio_cupy, dask_cupy_func=_run_normalized_ratio_dask_cupy, ) out = mapper(nir_agg)(nir_agg.data, swir2_agg.data) return DataArray(out, name=name, coords=nir_agg.coords, dims=nir_agg.dims, attrs=nir_agg.attrs)
7e83911382f484a201df93e1eb31510860100c41
3,629,632
def white_noise(sigma, T, seed): """Реализация независимого белого шума длины T, ε ~ N(0, sigma^2)""" np.random.seed(seed) noise = np.random.normal(loc=0, scale=sigma, size=(1, T)) return noise
e6c88bd9f6857ff6aa0cb6f4af916fecb7aa04a7
3,629,633
def register_serializable(cls): """A class decorator registering the class for serialization.""" __types[type2str(cls)] = cls return cls
f6f65288235a291b9cb064b179e454580ecff280
3,629,634
import io from datetime import datetime def generate_realized_trips_from_gtfs(gtfs_path): """Transforms a GTFS feed to realized_trips format (see README for specification). It can either read a feed zip file or a folder. Parameters ---------- gtfs_path : str GTFS feed zip file or folder path Returns ------- pd.DataFrame realized_trips data structure (see README for specification) """ gtfs = io.read_gtfs(gtfs_path, "km") # Generates all exits calendar_dates_by_trip_id = ( pd.merge( gtfs.trips[["service_id", "trip_id"]], gtfs.calendar, on=["service_id"] ) ).drop_duplicates(subset=["service_id", "trip_id"]) realized_trips = [] for i, row in gtfs.frequencies.iterrows(): realized_trips.append(calculate_exits(row, calendar_dates_by_trip_id)) realized_trips = pd.concat(realized_trips) # Adds statistics realized_trips = pd.merge( realized_trips, gtfs.compute_trip_stats()[ ["trip_id", "duration", "distance", "speed", "start_stop_id", "end_stop_id"] ], on="trip_id", ).rename( columns={ "duration": "elapsed_time", "speed": "average_speed", "start_stop_id": "departure_id", "end_stop_id": "arrival_id", } ) # Adds arrival time realized_trips["arrival_datetime"] = realized_trips.apply( lambda row: row["departure_datetime"] + datetime.timedelta(hours=row["elapsed_time"]), 1, ) # Adds trajectory type realized_trips["trajectory_type"] = "complete_trip" # creates missing columns for c in tables.realized_trips_cols: if c not in realized_trips.columns: realized_trips[c] = None return realized_trips[tables.realized_trips_cols]
4a5050ebf63d39cb3d800adbc42d98410857e302
3,629,635
def get_stockdata_from_sql(mode,begin,end,name): """ get stock market data from sql,include: [Open,High,Low,Close,Pctchg,Vol, Amount,total_shares,free_float_shares,Vwap] """ try: conn = pymysql.connect(**config) cursor = conn.cursor() if mode == 0: query = "SELECT stock_id,%s FROM stock_market_data WHERE trade_date='%s';"%(name,begin) else: query = "SELECT trade_date,stock_id,%s FROM stock_market_data WHERE trade_date >='%s' \ AND trade_date <= '%s';"%(name,begin,end) cursor.execute(query) date = pd.DataFrame(list(cursor.fetchall())) if mode == 0: date.columns =['ID','name'] else: date.columns =['date','ID','name'] date = date.set_index('ID') date.columns = ['date',name] date = date.set_index([date['date'],date.index],drop = True) del date['date'] return date finally: if conn: conn.close()
a9456019e51e1049d1bcedb1be45e40304de373f
3,629,636
def full_isomorphism(gra1, gra2): """ full graph isomorphism """ assert gra1 == explicit(gra1) and gra2 == explicit(gra2) nxg1 = _networkx.from_graph(gra1) nxg2 = _networkx.from_graph(gra2) iso_dct = _networkx.isomorphism(nxg1, nxg2) return iso_dct
086b0f85e72beb4b30f405706c0d0896cb65c002
3,629,637
def likelihood_overlap(lk1, lk2): """ Returns overlap area of two likelihood functions. Parameters ---------- lk1 : numpy.ndarray First likelihood function. lk2 : numpy.ndarray Second likelihood function. Returns ------- overlap : float Overlap area. """ return np.sum(np.min((lk1, lk2), axis=0))
d5af7b90ad7eac5fe47d7444e284317b573d8780
3,629,638
import resource def get_total_cpu_time_and_memory_usage(): """ Gives the total cpu time of itself and all its children, and the maximum RSS memory usage of itself and its single largest child. """ me = resource.getrusage(resource.RUSAGE_SELF) children = resource.getrusage(resource.RUSAGE_CHILDREN) total_cpu_time = me.ru_utime + me.ru_stime + children.ru_utime + children.ru_stime total_memory_usage = me.ru_maxrss + children.ru_maxrss return total_cpu_time, total_memory_usage
2073440a0ef6e9185b5b4c7613a56c902a722dc3
3,629,639
def require_single_skillet(func): """Commands decorated with this require one skillet to be uniquely specified""" def wrap(command): if not command.sli.options.get("name") and len(command.sli.skillets) > 1: raise InvalidArgumentsException( f"Specify a skillet to run with --name or -n when more than 1 is present for command {command.sli_command}" ) target_name = ( command.sli.options.get("name") if command.sli.options.get("name") else command.sli.skillets[0].name ) command.sli.skillet = command.sli.sl.get_skillet_with_name(target_name) if command.sli.skillet is None: raise SkilletLoaderException(f"Unable to load skillet {target_name} by name") return func(command) return wrap
98b79706d1b2281a30bfa4a8dc10120a9631f620
3,629,640
def get_ecf_player(database, key): """Return ECFrefDBrecordECFplayer instance for dbrecord[key].""" p = database.get_primary_record(filespec.ECFPLAYER_FILE_DEF, key) pr = ECFrefDBrecordECFplayer() pr.load_record(p) return pr
189ab0219c8a730d7f416aa8419d3ef00352d6f5
3,629,641
import pickle def pickle_load(namefile: str): """Load Python variable, given name of file. :param namefile: A string of file to load. :return output: A loaded variable. """ with open(namefile, 'rb') as load_file: output = pickle.load(load_file) return output
425e53b8daf69bf832abc45a4270cc01f383c50e
3,629,642
import pkg_resources def load_preset_linelist(name): """ Returns one of our preset line lists, loaded into an astropy QTable """ metadata = get_linelist_metadata() if name not in metadata.keys(): raise ValueError("Line name not in available set of line lists. " + "Valid list names are: {}".format(list(metadata.keys()))) fname_base = metadata[name]["filename_base"] fname = pkg_resources.resource_filename("jdaviz", "data/linelists/{}.csv".format(fname_base)) units = metadata[name]["units"] linetable = QTable.read(fname) # Add units linetable['Rest Value'].unit = units # Add column with list name refernece linetable['listname'] = name # Rename remaining columns linetable.rename_columns(('Line Name', 'Rest Value'), ('linename', 'rest')) return linetable
ea435e63f30eaab8748fb01a7214050349cd89df
3,629,643
def scale_matrix(matrix): """ nn works best with values between 0.01 and 1 """ return matrix / 255 * 0.99 + 0.01
b4c0d34a21724ee5712caf8dca131b3e1e1d0753
3,629,644
import os def load_record_set(path): """ 加载训练/测试集 """ record_list = [] if not os.path.isfile(path): raise IOError("File not Found!") with open(path, "r") as f: content = list(f) record_list = encapsule(content) return record_list
148ec3300b5afe122bc794e0a96fbec467481d7c
3,629,645
def negate(condition): """ Returns a CPP conditional that is the opposite of the conditional passed in. """ if condition.startswith('!'): return condition[1:] return "!" + condition
5f31ed3ee2f16a53674f830402fdec890af25032
3,629,646
import maya.utils as utils import hdefereval import multiprocessing def __worker(func): """ thread runner Args: func: Returns: """ if env.Maya(): utils.executeDeferred(func) # https://forums.odforce.net/topic/22570-execute-in-main-thread-with-results/ elif env.Houdini(): n = 0 while n < multiprocessing.cpu_count() + 1: hdefereval.executeInMainThreadWithResult(func) n += 1 elif env.Nuke(): meta.executeInMainThreadWithResult(func) elif env.Max(): try: func.acquire() with meta.mxstoken(): func() except: raise finally: if func.locked(): func.release() return func
a6d7635a09fe02975ed802cd4b59d70d2e163090
3,629,647
def _compute_n50_and_n95_np(readlengths): """ Numpy implementation of N50/N95 calculation. """ if isinstance(readlengths, list): readlengths = np.array(readlengths) readlengths[::-1].sort() # = np.sort(readlengths) total_length = np.sum(readlengths) csum = np.cumsum(readlengths) half_length = total_length // 2 csum_n50 = csum[csum > half_length].min() i_n50 = np.where(csum == csum_n50)[0] n50 = readlengths[i_n50[0]] n95_length = int(total_length * 0.05) csum_n95 = csum[csum > n95_length].min() i_n95 = np.where(csum == csum_n95)[0] n95 = readlengths[i_n95[0]] return (n50, n95)
9a3ac1ccdd3a2af10e76524cc6c628b8868d758b
3,629,648
def get_dominant_horizontal_line_colour(ip): """ >>> get_dominant_horizontal_line_colour(np.array([[0,0,0], [1,1,1], [1,0,0], [2,2,2],[0,0,0]])) {1, 2} """ #get unique list of colours per row row_info = ([np.unique(row) for row in ip]) # identify the whole colour lines, we do this if there is only one unique colour per line # and it is not 0 (background colour) # we return a set as we could have more than 1 line of a certain colour return set([token[0] for token in row_info if len(token)==1 and token[0] != 0])
e955b76940f85bf448e959e29f5134b1247d45ab
3,629,649
def _filter_out_disabled(d): """ Helper to remove Nones (actually any false-like type) from the scrubbers. This is needed so we can disable global scrubbers in a per-model basis. """ return {k: v for k, v in d.items() if v}
64a4577d6e5998e647ef82f126c50360388aba9a
3,629,650
def main(): """See fplutil/disttools/push_package.py. Returns: 0 if successful, non-zero otherwise. """ return disttools.push_package.main(disttools.push_package.parse_arguments( project_dir=PROJECT_DIR, config_json=CONFIG_JSON))
9e6ccf0d7654d980361e0ef446a0674344a8d3a5
3,629,651
import glob import os def extractRotationInteractively(input_dir,zmax_identifier): """ Rotation based on zmax image. Loads zmax image, then asks user to draw a line along the long axis. Rotation is the angle of the line. """ # find & open image with zmax projection try: fn_zmax=glob.glob(input_dir+os.sep+"*" +zmax_identifier+"*")[0] except: IJ.log("No ZMax image found. Skipping derotation.") return None imp=BF.openImagePlus(fn_zmax)[0] imp.show() WaitForUserDialog("User input required","Draw a straight line along the centerline of the worm (from tail to head).\nThen click ok.").show() roi=imp.getRoi() imp.close() if roi is not None and roi.getType()==Roi.LINE: angle=roi.getAngle() IJ.log("Detected angle (interactive mode): "+str(angle)) return angle else: IJ.log("A Straight Line selection is required for derotation. Skipping derotation.") return None
e4dbcc9ad945a2e7d95cf214c16a071f66760c88
3,629,652
def _check_supports_private_deps(repository_ctx, swiftc_path, temp_dir): """Returns True if `swiftc` supports implementation-only imports.""" source_file = _scratch_file( repository_ctx, temp_dir, "main.swift", """\ @_implementationOnly import Foundation print("Hello") """, ) return _swift_succeeds( repository_ctx, swiftc_path, source_file, )
4971d84914b957f80e467b6956395806e06117bf
3,629,653
import os def can_reuse(fpath, cmp_f, silent=False): """Check if a file `fpath` exists, is non-empty and is more recent than `cmp_f` """ do_reuse = os.environ.get('REUSE', '1') if do_reuse == '0': return False if not fpath or not isfile(fpath): return False elif verify_file(fpath, cmp_f=cmp_f, silent=True): if not silent: debug('Reusing ' + fpath) return True else: return False
5d9cfae7994b19265327cdc0da9988835621d20a
3,629,654
def createNullOperator(context, domain, range, dualToRange, label=None): """ Create and return a null (zero-valued) operator. *Parameters:* - context (Context) A Context object to control the assembly of the weak form of the newly constructed operator. - domain (Space) Function space to be taken as the domain of the operator. - range (Space) Function space to be taken as the range of the operator. - dualToRange (Space) Function space to be taken as the dual to the range of the operator. - label (string) Textual label of the operator. If set to None (default), a unique label will be generated automatically. *Returns* a newly constructed BoundaryOperator_BasisFunctionType_ResultType object, with BasisFunctionType and ResultType determined automatically from the context argument and equal to either float32, float64, complex64 or complex128. """ return _constructOperator( "nullOperator", context, domain, range, dualToRange, label)
149ee689c30f6da1ac47c418de41533dc1706663
3,629,655
def stsb(dataset): """Convert STSB examples to text2text format. STSB maps two sentences to a floating point number between 1 and 5 representing their semantic similarity. Since we are treating all tasks as text-to-text tasks we need to convert this floating point number to a string. The vast majority of the similarity score labels in STSB are in the set [0, 0.2, 0.4, ..., 4.8, 5.0]. So, we first round the number to the closest entry in this set, and then we convert the result to a string (literally e.g. "3.4"). This converts STSB roughly into a 26-class classification dataset. This function uses the feature names from the dataset to unpack examples into a format amenable for a text2text problem. For example, a typical example from STSB might look like { "sentence1": "Three more US soldiers killed in Afghanistan", "sentence2": "NATO Soldier Killed in Afghanistan", "label": 1.8, } This example would be transformed to { "inputs": ( "stsb sentence1: Three more US soldiers killed in Afghanistan " "sentence2: NATO Soldier Killed in Afghanistan" ), "targets": "1.8", } Args: dataset: a tf.data.Dataset to process. Returns: a tf.data.Dataset """ def my_fn(x): """Collapse an example into a text2text pair.""" strs_to_join = [ 'stsb sentence1:', x['sentence1'], 'sentence2:', x['sentence2'] ] label_string = tf.as_string(tf.round(x['label']*5)/5, precision=1) joined = tf.strings.join(strs_to_join, separator=' ') return {'inputs': joined, 'targets': label_string, 'idx': x['idx']} return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
a19664d2fd7b24efa132852fbb39dd9300b73f59
3,629,656
def getString(data: dict = None, separater: str = " ") -> str: """Debug message when debug is enabled. :param data: The value in either str or list. :type data: str,list :param separater: The separater between the words. :type separater: str :rtype: str :return: The message in string. """ if isinstance(data, str): return data elif isinstance(data, list): return separater.join(data)
026dd1838213a48c3bf65c33ce1820a0e2867b79
3,629,657
import os import re def make_spectrograms_old(spectros=None, overwrite=False, cmap='magma', subdirs=['no_sax', 'sax_sec', 'sax_solo'] ): """ *** VERSION 1, USES OLD FILE FORMAT *** Makes spectrograms for all audio files, or for those for which spectrograms have not yet been made. --- IN overwrite: if True, will overwrite any pre-existing spectrograms (bool) cmap: matplotlib colormap, usually 'magma' or 'gray' (str) spectros: dictionary of pre-existing spectrograms, or None (dict) OUT spectros: dictionary of spectrograms with root filename as key """ file_ids = set() if not spectros: spectros = {} if not overwrite: for root, dirs, fnames in os.walk('../specs'): for fname in fnames: if re.match(r'\w{2}\d{4}', fname): file_ids.add(fname[:6]) print(file_ids) for subdir in subdirs: for fname in os.listdir('../audio/' + subdir): f_id = fname[:6] if f_id not in file_ids and re.match(r'\w{2}\d{4}', f_id): print(f_id) fp = subdir + '/' + f_id spectros[f_id] = make_spectro(fp, show=False, save=True, cmap=cmap ) return spectros
8e9ef34b35158eecf947823a3c4d6a35f6fe35ed
3,629,658
def scatter_matrix(df,theme=None,bins=10,color='grey',size=2): """ Displays a matrix with scatter plot for each pair of Series in the DataFrame. The diagonal shows a histogram for each of the Series Parameters: ----------- df : DataFrame Pandas DataFrame theme : string Theme to be used (if not the default) bins : int Number of bins to use for histogram color : string Color to be used for each scatter plot size : int Size for each marker on the scatter plot """ if not theme: theme = auth.get_config_file()['theme'] figs=[] for i in df.columns: for j in df.columns: if i==j: fig=df.iplot(kind='histogram',keys=[i],asFigure=True,bins=bins) figs.append(fig) else: figs.append(df.iplot(kind='scatter',mode='markers',x=j,y=i,asFigure=True,size=size,colors=[color])) layout=getLayout(theme) layout['xaxis'].update(showgrid=False) layout['yaxis'].update(showgrid=False) sm=subplots(figs,shape=(len(df.columns),len(df.columns)),shared_xaxes=False,shared_yaxes=False, horizontal_spacing=.05,vertical_spacing=.07,base_layout=layout) sm['layout'].update(bargap=.02,showlegend=False) return sm
db92ed3b03ecb081364e7f50ddbb9b0a777ce820
3,629,659
def _gr1_sorted_ ( graph , reverse = False ) : """Make sorted graph >>> graph = ... >>> s = graph.sorted() """ oitems = ( i for i in graph.iteritems() ) sitems = sorted ( oitems , key = lambda s :s[1].value() , reverse = reverse ) new_graph = ROOT.TGraphErrors ( len( graph ) ) copy_graph_attributes ( graph , new_graph ) ip = 0 for item in sitems : new_graph[ip] = item[1:] ip += 1 return new_graph
147edad4505c9d7ab5df181d6e731e8c7f6713b3
3,629,660
def compute_features_paa(filename, with_timebase=False, verbose=False): """compute_features_paa Compute a bag of standard audio features to be used for some downstream task. """ if verbose: print('compute_features_paa loading from {0}'.format(filename)) [Fs, x_] = audioBasicIO.read_audio_file(filename) if verbose: print('compute_features_paa: loaded {1} samples from {0}'.format(filename, x_.shape)) if len(x_.shape) > 1 and x_.shape[1] > 1: x = audioBasicIO.stereo_to_mono(x_) else: x = x_ x_duration = x.shape[0]/Fs if verbose: print(f'compute_features_paa: {x_duration} seconds of audio at {Fs}Hz') mt_win = 1.0*Fs mt_step = 0.5*Fs st_win = 0.050*Fs st_step = 0.025*Fs # F, F_names = audioFeatureExtraction.stFeatureExtraction(x, Fs, st_win, st_step) # G, F, F_names = audioFeatureExtraction.mtFeatureExtraction(x, Fs, mt_win, mt_step, st_win, st_step) G, F, F_names = mF.mid_feature_extraction(x, Fs, mt_win, mt_step, st_win, st_step) if with_timebase: G_time = np.linspace(0, G.shape[1] * 0.5, G.shape[1] + 1) F_time = np.linspace(0, F.shape[1] * 0.025, F.shape[1] + 1) else: G_time = None F_time = None if verbose: print(f'compute_features_paa: F = {F.shape} {F}') print(f'compute_features_paa: {F_time}') print(f'compute_features_paa: G = {G.shape} {G}') print(f'compute_features_paa: {G_time}') if with_timebase: return F, F_names, G, F_time, G_time else: return F, F_names, G
2a91c4dfc64bcae6b79c6a5f0bfd5f7fdeac2db6
3,629,661
def str_match_end(name, strip): """ :param name: :param strip: :return: """ if name is None: return False if name[len(name)-len(strip):len(name)] == strip: return True return False
ba9c84644d22f0b2ce68f7cb6efb1279209084f8
3,629,662
import subprocess def run_post_script(logger, post_script, vm, mac_ip, custom_mac): """ Runs a post script for a vm """ if mac_ip: logger.info('Running post-script command: %s %s %s %s' % (post_script, vm.config.name, mac_ip[0], mac_ip[1])) retcode = subprocess.call([post_script, vm.config.name, mac_ip[0], mac_ip[1]]) logger.debug('Received return code %s for command: %s %s %s %s' % (retcode, post_script, vm.config.name, mac_ip[0], mac_ip[1])) elif custom_mac: logger.info('Running post-script command: %s %s %s' % (post_script, vm.config.name, custom_mac)) retcode = subprocess.call([post_script, vm.config.name]) logger.debug('Received return code %s for command: %s %s' % (retcode, post_script, vm.config.name)) else: logger.info('Running post-script command: %s %s' % (post_script, vm.config.name)) retcode = subprocess.call([post_script, vm.config.name]) logger.debug('Received return code %s for command: %s %s' % (retcode, post_script, vm.config.name)) return retcode
4e089308b60524839acec0138245664a2d37e572
3,629,663
def points(start, end): """ Bresenham's Line Drawing Algorithm in 2D """ l = [] x0, y0 = start x1, y1 = end dx = abs(x1 - x0) dy = abs(y1 - y0) if x0 < x1: sx = 1 else: sx = -1 if y0 < y1: sy = 1 else: sy = -1 err = dx - dy while True: l.append((x0, y0)) if x0 == x1 and y0 == y1: break e2 = 2 * err if e2 > -dy: # overshot in the y direction err = err - dy x0 = x0 + sx if e2 < dx: # overshot in the x direction err = err + dx y0 = y0 + sy return l
ffa8be5eb09e2b454242e4095883bfee239e5319
3,629,664
import re def ExtractIconReps(icon_file_name): """Reads the contents of the given icon file and returns a dictionary of icon sizes to vector commands for different icon representations stored in that file. Args: icon_file_name: The file path of the icon file to read. """ with open(icon_file_name, "r") as icon_file: icon_file_contents = icon_file.readlines() current_icon_size = REFERENCE_SIZE_DIP icon_sizes = [] current_icon_representation = [] icon_representations = {} for line in icon_file_contents: # Strip comments and empty lines. line = line.partition(CPP_COMMENT_DELIMITER)[0].strip() if not line: continue # Retrieve sizes specified by CANVAS_DIMENSIONS to ensure icons are added in # sorted order by size descending. if line.startswith(CANVAS_DIMENSIONS): sizes = re.findall(r"\d+", line) if len(sizes) != 1: Error("Malformed {} line in {} - it should specify exactly one size." .format(CANVAS_DIMENSIONS, icon_file_name)) icon_sizes.append(int(sizes[0])) # All icons except the first / default icon must start with # "CANVAS_DIMENSIONS", so rely on it here as a icon delimiter. if current_icon_representation: icon_representations = AddIconToDictionary( icon_file_name, current_icon_representation, current_icon_size, icon_representations) current_icon_representation = [] current_icon_size = icon_sizes[-1] current_icon_representation.append(line) if current_icon_representation: icon_representations = AddIconToDictionary( icon_file_name, current_icon_representation, current_icon_size, icon_representations) if not icon_representations: Error("Didn't find any icons in {}.".format(icon_file_name)) if len(icon_representations) != len(icon_sizes): icon_sizes.insert(0, REFERENCE_SIZE_DIP) if sorted(icon_sizes, reverse=True) != icon_sizes: Error("The icons in {} should be sorted in descending order of size." .format(icon_file_name)) return icon_representations
14cbe3a9d8ee107fd60643dc61648cfeba3167ae
3,629,665
def _is_device_list_local(devices): """Checks whether the devices list is for local or multi-worker. Args: devices: a list of device strings, either local for remote devices. Returns: a boolean indicating whether these device strings are for local or for remote. Raises: ValueError: if device strings are not consistent. """ all_local = None for d in devices: d_spec = tf_device.DeviceSpec().parse_from_string(d) is_local = d_spec.job in (None, "localhost") if all_local is None: # Determine all_local from first device. all_local = is_local if all_local: if not is_local: raise ValueError("Local device string cannot have job specified other " "than 'localhost'") else: if is_local: raise ValueError("Remote device string must have job specified.") if d_spec.task is None: raise ValueError("Remote device string must have task specified.") return all_local
adb1c414f8e22ecab3a31a0cd61666d96af1acfd
3,629,666
def ab_from_mv(m, v): """ estimate beta parameters (a,b) from given mean and variance; return (a,b). Note, for uniform distribution on [0,1], (m,v)=(0.5,1/12) """ phi = m*(1-m)/v - 1 # z = 2 for uniform distribution return (phi*m, phi*(1-m))
0326c165e44c1ab9df091e0344f12b9fab8c0e19
3,629,667
def get_credentials(): """Get the Google credentials needed to access our services.""" credentials = GoogleCredentials.get_application_default() if credentials.create_scoped_required(): credentials = credentials.create_scoped(SCOPES) return credentials
196013b7e49a87ca43a6824a02cf72db830e5420
3,629,668
def mtrax_mat_to_big_arrays(data): """translation of code sent to me by alice""" if np.any(~np.isfinite(data['identity'])): # make sure no funny numbers raise ValueError('cannot handle non-finite data on identity') identity = np.array( data['identity'], dtype=int ) # cast to int assert np.allclose( identity, data['identity'] ) # double check idscurr,tmp,identity = unique(identity) if np.any(~np.isfinite(data['ntargets'])): # make sure no funny numbers raise ValueError('cannot handle non-finite data on ntargets') ntargets = np.asarray(data['ntargets'],dtype=int) # cast to int assert np.allclose( ntargets, data['ntargets'] ) nframes = len(ntargets) nflies = max(identity)+1 ## print 'nflies',nflies ## print 'nframes',nframes ## print 'identity.shape',identity.shape ## print 'identity[:5]',identity[:5] ## print 'identity[-5:]',identity[-5:] def nan(m,n): return np.nan * np.ones((m,n)) X = nan(nflies,nframes) Y = nan(nflies,nframes) THETA = nan(nflies,nframes) startframe = nan(nflies,1) stopframe = nan(nflies,1) ## print 'X.shape',X.shape j=0 for i in range(len(ntargets)): idx = j+np.arange(ntargets[i]) id = identity[idx] ## print ## print 'i',i ## print 'idx',idx ## print 'id',id ## print 'X.shape',X.shape ## print 'X[id,i].shape',X[id,i].shape ## print "data['x_pos'][idx].shape",data['x_pos'][idx].shape X[id,i] = data['x_pos'][idx] Y[id,i] = data['y_pos'][idx] THETA[id,i] = data['angle'][idx] j += ntargets[i]; X = X.T Y = Y.T THETA = THETA.T return X,Y,THETA, list(idscurr)
f7c7b2d9c3e731cf7af05aa771e3739ddc66df95
3,629,669
from typing import Union def normalize_class(c:Union[str,int], ensure_scored:bool=False) -> str: """ finished, checked, normalize the class name to its abbr., facilitating the computation of the `load_weights` function Parameters ---------- c: str or int, abbr. or SNOMEDCTCode of the class ensure_scored: bool, default False, ensure that the class is a scored class, if True, `ValueError` would be raised if `c` is not scored Returns ------- nc: str, the abbr. of the class """ nc = snomed_ct_code_to_abbr.get(str(c), str(c)) if ensure_scored and nc not in df_weights_abbr.columns: raise ValueError(f"class `{c}` not among the scored classes") return nc
320710e59ba585c1de276be4b0195335127c08c8
3,629,670
def appearance_kernel(x_1: int, y_1: int, p_1: np.ndarray, x_2: int, y_2: int, p_2: np.ndarray, theta_alpha: float, theta_beta: float) -> float: """Compute appearance kernel. Args: x_1: X coordinate of first pixel. y_1: Y coordinate of first pixel. p_1: Color vector of first pixel. x_2: X coordinate of second pixel. y_2: Y coordinate of second pixel. p_2: Color vector of second pixel. theta_alpha: Standard deviation for the position. theta_beta: Standard deviation for the color. Returns: The output of the appearence kernel. """ return np.exp(-((x_1-x_2)**2.0+(y_1-y_2)**2.0)/(2*theta_alpha**2.0) - np.sum((p_1-p_2)**2.0)/(2.0*theta_beta**2.0))
4d9f70268baba752352de5e3c692e0bc2ceaac64
3,629,671
def update_game_log_tables(player_id, season): """ Loads player's game for specific season as pandas DataFrame :param player_id: string, :param season: int :return: column names, table data for 'games-table' object and table title for text object """ global df_reg_games, df_po_games, df_ids if player_id != 'player_id' and season != 'year': # get reg. season and playoff (if available) game logs df_reg_games = get_gamelogs(player_id, int(season.split('-')[0])+1) df_po_games = get_gamelogs(player_id, int(season.split('-')[0])+1, True) # create dictionary for table and list with column names table_data_rs = df_reg_games.to_dict('records') col_names_rs = [{"name": i, "id": i} for i in df_reg_games.columns] # create dictionary for table and list with column names table_data_po = df_po_games.to_dict('records') col_names_po = [{"name": i, "id": i} for i in df_po_games.columns] return col_names_rs, table_data_rs, col_names_po, table_data_po else: # no updates if input is not full return no_update
0627694434e26118b93587265bde710840f8d4a2
3,629,672
from datetime import datetime def _convert_relative_time(relative_time): """ Convert a Cb Response relative time boundary (i.e., start:-1440m) to a device_timestamp: device_timestamp:[2019-06-02T00:00:00Z TO 2019-06-03T23:59:00Z] """ time_format = "%Y-%m-%dT%H:%M:%SZ" minus_minutes = relative_time.split(':')[1].split('m')[0].split('-')[1] end_time = datetime.now() start_time = end_time - timedelta(minutes=int(minus_minutes)) device_timestamp = 'device_timestamp:[{0} TO {1}]'.format(start_time.strftime(time_format), end_time.strftime(time_format)) return device_timestamp
533b2c6b53d9f34754f8d5f1452c9b4caf93108c
3,629,673
def NTFSolve_conv(M, Mmis, Mt0, Mw0, Mb0, nc, tolerance, LogIter, Status0, MaxIterations, NMFFixUserLHE, NMFFixUserRHE, NMFFixUserBHE, NMFSparseLevel, NTFUnimodal, NTFSmooth, NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox): """Estimate NTF matrices (HALS) Input: M: Input matrix Mmis: Define missing values (0 = missing cell, 1 = real cell) Mt0: Initial left hand matrix Mw0: Initial right hand matrix Mb0: Initial block hand matrix nc: NTF rank tolerance: Convergence threshold LogIter: Log results through iterations Status0: Initial displayed status to be updated during iterations MaxIterations: Max iterations NMFFixUserLHE: = 1 => fixed left hand matrix columns NMFFixUserRHE: = 1 => fixed right hand matrix columns NMFFixUserBHE: = 1 => fixed block hand matrix columns NMFSparseLevel : sparsity level (as defined by Hoyer); +/- = make RHE/LHe sparse NTFUnimodal: Apply Unimodal constraint on factoring vectors NTFSmooth: Apply Smooth constraint on factoring vectors NTFLeftComponents: Apply Unimodal/Smooth constraint on left hand matrix NTFRightComponents: Apply Unimodal/Smooth constraint on right hand matrix NTFBlockComponents: Apply Unimodal/Smooth constraint on block hand matrix NBlocks: Number of NTF blocks NTFNConv: Half-Size of the convolution window on 3rd-dimension of the tensor NMFPriors: Elements in Mw that should be updated (others remain 0) Output: Mt : if NTFNConv > 0 only otherwise empty. Contains sub-components for each phase in convolution window Mt_simple: Left hand matrix (sum of columns Mt_conv for each k) Mw_simple: Right hand matrix Mb_simple: Block hand matrix diff: objective cost Note: This code extends HALS to allow for shifting on the 3rd dimension of the tensor. Suffix '_simple' is added to the non-convolutional components. Convolutional components are named the usual way. """ cancel_pressed = 0 n, p0 = M.shape n_Mmis = Mmis.shape[0] nc = int(nc) NBlocks = int(NBlocks) NTFNConv = int(NTFNConv) p = int(p0 / NBlocks) nxp = int(n * p) nxp0 = int(n * p0) Mt_simple = np.copy(Mt0) Mw_simple = np.copy(Mw0) Mb_simple = np.copy(Mb0) # StepIter = math.ceil(MaxIterations/10) StepIter = 1 pbar_step = 100 * StepIter / MaxIterations IDBlockp = np.arange(0, (NBlocks - 1) * p + 1, p) A = np.zeros(n) B = np.zeros(p) C = np.zeros(NBlocks) MtMw = np.zeros(nxp) NTFNConv2 = 2*NTFNConv + 1 #Initialize Mt, Mw, Mb Mt = np.repeat(Mt_simple, NTFNConv2, axis=1) / NTFNConv2 Mw = np.repeat(Mw_simple, NTFNConv2, axis=1) Mb = np.repeat(Mb_simple, NTFNConv2, axis=1) for k3 in range(0, nc): n_shift = -NTFNConv - 1 for k2 in range(0, NTFNConv2): n_shift += 1 k = k3*NTFNConv2+k2 Mb[:,k] = shift(Mb_simple[:, k3], n_shift) # Initialize Residual tensor Mfit = np.zeros((n, p0)) for k3 in range(0, nc): for k2 in range(0, NTFNConv2): k = k3*NTFNConv2+k2 for iBlock in range(0, NBlocks): Mfit[:, IDBlockp[iBlock]:IDBlockp[iBlock] + p] += Mb[iBlock,k] * \ np.reshape(Mt[:, k], (n, 1)) @ np.reshape(Mw[:, k], (1, p)) denomt = np.zeros(n) denomw = np.zeros(p) denomBlock = np.zeros((NBlocks, nc)) Mt2 = np.zeros(n) Mw2 = np.zeros(p) denomCutoff = .1 if n_Mmis > 0: Mres = (M - Mfit) * Mmis else: Mres = M - Mfit myStatusBox.init_bar(delay=1) # Loop cont = 1 iIter = 0 diff0 = 1.e+99 Mpart = np.zeros((n, p0)) alpha = NMFSparseLevel alpha_blocks = 0 PercentZeros = 0 iterSparse = 0 while (cont > 0) & (iIter < MaxIterations): for k3 in range(0, nc): for k2 in range(0, NTFNConv2): k = k3*NTFNConv2+k2 NBlocks, Mpart, IDBlockp, p, Mb, k, Mt, n, Mw, n_Mmis, Mmis, Mres, \ NMFFixUserLHE, denomt, Mw2, denomCutoff, alpha ,\ NTFUnimodal, NTFLeftComponents, NTFSmooth, A, NMFFixUserRHE, \ denomw, Mt2, NTFRightComponents, B, NMFFixUserBHE, MtMw, nxp, \ denomBlock, NTFBlockComponents, C, Mfit, NMFPriors = \ NTFUpdate(NBlocks, Mpart, IDBlockp, p, Mb, k, Mt, n, Mw, n_Mmis, Mmis, Mres, \ NMFFixUserLHE, denomt, Mw2, denomCutoff, alpha, \ NTFUnimodal, NTFLeftComponents, NTFSmooth, A, NMFFixUserRHE, \ denomw, Mt2, NTFRightComponents, B, NMFFixUserBHE, MtMw, nxp, \ denomBlock, NTFBlockComponents, C, Mfit, NMFPriors) #Update Mt_simple, Mw_simple & Mb_simple k = k3*NTFNConv2+NTFNConv Mt_simple[:, k3] = Mt[:, k] Mw_simple[:, k3] = Mw[:, k] Mb_simple[:, k3] = Mb[:, k] # Update Mw & Mb Mw[:,:] = np.repeat(Mw_simple, NTFNConv2, axis=1) n_shift = -NTFNConv - 1 for k2 in range(0, NTFNConv2): n_shift += 1 k = k3*NTFNConv2+k2 Mb[:,k] = shift(Mb_simple[:, k3], n_shift) if iIter % StepIter == 0: # Check convergence diff = np.linalg.norm(Mres) ** 2 / nxp0 if (diff0 - diff) / diff0 < tolerance: cont = 0 else: diff0 = diff Status = Status0 + 'Iteration: %s' % int(iIter) if NMFSparseLevel != 0: Status = Status + '; Achieved sparsity: ' + str(round(PercentZeros, 2)) + '; alpha: ' + str( round(alpha, 2)) if LogIter == 1: myStatusBox.myPrint(Status) myStatusBox.update_status(delay=1, status=Status) myStatusBox.update_bar(delay=1, step=pbar_step) if myStatusBox.cancel_pressed: cancel_pressed = 1 return [Mt, Mt_simple, Mw_simple, Mb_simple, cancel_pressed] if LogIter == 1: myStatusBox.myPrint(Status0 + " Iter: " + str(iIter) + " MSR: " + str(diff)) iIter += 1 if (cont == 0) | (iIter == MaxIterations): if NMFSparseLevel > 0: SparseTest = np.zeros((p, 1)) for k in range(0, nc): SparseTest[np.where(Mw[:, k] > 0)] = 1 PercentZeros0 = PercentZeros n_SparseTest = np.where(SparseTest == 0)[0].size PercentZeros = max(n_SparseTest / p, .01) if PercentZeros == PercentZeros0: iterSparse += 1 else: iterSparse = 0 if (PercentZeros < 0.99 * NMFSparseLevel) & (iterSparse < 50): alpha *= min(1.01 * NMFSparseLevel / PercentZeros, 1.01) if alpha < .99: iIter = 1 cont = 1 elif NMFSparseLevel < 0: SparseTest = np.zeros((n, 1)) for k in range(0, nc): SparseTest[np.where(Mt[:, k] > 0)] = 1 PercentZeros0 = PercentZeros n_SparseTest = np.where(SparseTest == 0)[0].size PercentZeros = max(n_SparseTest / n, .01) if PercentZeros == PercentZeros0: iterSparse += 1 else: iterSparse = 0 if (PercentZeros < 0.99 * abs(NMFSparseLevel)) & (iterSparse < 50): alpha *= min(1.01 * abs(NMFSparseLevel) / PercentZeros, 1.01) if abs(alpha) < .99: iIter = 1 cont = 1 if (n_Mmis > 0) & (NMFFixUserBHE == 0): Mb *= denomBlock return [Mt, Mt_simple, Mw_simple, Mb_simple, diff, cancel_pressed]
a84e5e561ac692f7996c675a26b0e8eaf791d50c
3,629,674
def fds_remove_crc_gaps(rom): """Remove each block's CRC padding so it can be played by FDS https://wiki.nesdev.org/w/index.php/FDS_disk_format """ offset = 0x0 def get_block(size, crc_gap=2): nonlocal offset block = rom[offset : offset + size] offset += size + crc_gap return block disk_info_block = get_block(0x38) file_amount_block = get_block(0x2) assert file_amount_block[0] == 0x02 n_files = file_amount_block[1] blocks = [disk_info_block, file_amount_block] for i in range(n_files): file_header_block = get_block(0x10) assert file_header_block[0] == 3 blocks.append(file_header_block) file_size = int.from_bytes(file_header_block[13 : 13 + 2], "little") file_data_block = get_block(file_size + 1) blocks.append(file_data_block) out = b"".join(blocks) # Zero pad to be 65500 bytes long padding = b"\x00" * (65500 - len(out)) out += padding return out
935ecb4ac01c1256ec074f6888704fdd1db63ea4
3,629,675
def _apply_categorical_projection_naive(y, y_probs, z): """Naively implemented categorical projection for checking results. See (7) in https://arxiv.org/abs/1802.08163. """ batch_size, n_atoms = y.shape assert z.shape == (n_atoms,) assert y_probs.shape == (batch_size, n_atoms) v_min = z[0] v_max = z[-1] xp = chainer.cuda.get_array_module(z) proj_probs = xp.zeros((batch_size, n_atoms), dtype=np.float32) for b in range(batch_size): for i in range(n_atoms): yi = y[b, i] p = y_probs[b, i] if yi <= v_min: proj_probs[b, 0] += p elif yi > v_max: proj_probs[b, -1] += p else: for j in range(n_atoms - 1): if z[j] < yi <= z[j + 1]: delta_z = z[j + 1] - z[j] proj_probs[b, j] += (z[j + 1] - yi) / delta_z * p proj_probs[b, j + 1] += (yi - z[j]) / delta_z * p break else: assert False return proj_probs
3674e57a75508a22a3b12cda52e4fc06fa947d20
3,629,676
def calculate_xdf( arr, method="truncate", methodparam="adaptive", truncate_extrema=True, ): """Calculate xDF-corrected statistics for correlation coefficients. Parameters ---------- arr : numpy.ndarray of shape (S, T) S is features, T is time points method : {"truncate", "tukey"}, optional Regularization method. methodparam : str or int or float, optional "adaptive", "", an integer, or a float. "" and float are only valid for method == "tukey" "adaptive" and int are only valid for method == "truncate" truncate_extrema : bool, optional If an estimate exceeds the theoretical variance of a white noise then it curbs the estimate back to (1-rho^2)^2/n_samples. If you want it off, set to False. Default is True. Returns ------- xDFOut : dict """ n_features, n_timepoints = arr.shape assert isinstance( methodparam, (str, int, float) ), f"methodparam must be str, int, or float, not {type(methodparam)}" arr = arr.copy() # variance-normalize the time series arr /= arr.std(axis=1, ddof=1, keepdims=True) LGR.info("calculate_xdf: Time series standardised by their standard deviations.") # Estimate crosscorrelation and autocorrelation # Correlation rho = np.corrcoef(arr) z_naive = np.arctanh(rho) * np.sqrt(n_timepoints - 3) np.fill_diagonal(rho, val=0) np.fill_diagonal(z_naive, val=0) rho = np.round(rho, 7) z_naive = np.round(z_naive, 7) # Autocorrelation autocorr, CI = utils.autocorr_fft(arr, n_timepoints) autocorr = autocorr[:, 1 : n_timepoints - 1] # The last element of ACF is rubbish, the first one is 1, so why bother?! nLg = n_timepoints - 2 # Cross-correlation crosscorr, lid = utils.crosscorr_fft(arr, n_timepoints) # positive-lag xcorrs xc_p = crosscorr[:, :, 1 : n_timepoints - 1] xc_p = np.flip(xc_p, axis=2) # negative-lag xcorrs xc_n = crosscorr[:, :, n_timepoints:-1] # Start of Regularisation if method.lower() == "tukey": if methodparam == "": window = np.sqrt(n_timepoints) else: window = methodparam window = int(np.round(window)) LGR.debug(f"calculate_xdf: AC Regularisation: Tukey tapering of M = {window}") autocorr = utils.tukeytaperme(autocorr, nLg, window) xc_p = utils.tukeytaperme(xc_p, nLg, window) xc_n = utils.tukeytaperme(xc_n, nLg, window) elif method.lower() == "truncate": if isinstance(methodparam, str): # Adaptive Truncation if methodparam.lower() != "adaptive": raise ValueError( "What?! Choose adaptive as the option, or pass an integer for truncation" ) LGR.debug("calculate_xdf: AC Regularisation: Adaptive Truncation") autocorr, bp = utils.shrinkme(autocorr, nLg) # truncate the cross-correlations, by the breaking point found from the ACF. # (choose the largest of two) for i in np.arange(n_features): for j in np.arange(n_features): maxBP = np.max([bp[i], bp[j]]) xc_p[i, j, :] = utils.curbtaperme(xc_p[i, j, :], nLg, maxBP) xc_n[i, j, :] = utils.curbtaperme(xc_n[i, j, :], nLg, maxBP) elif isinstance(methodparam, int): # Non-Adaptive Truncation LGR.debug( f"calculate_xdf: AC Regularisation: Non-adaptive Truncation on M = {methodparam}" ) autocorr = utils.curbtaperme(autocorr, nLg, methodparam) xc_p = utils.curbtaperme(xc_p, nLg, methodparam) xc_n = utils.curbtaperme(xc_n, nLg, methodparam) else: raise ValueError( "calculate_xdf: methodparam for truncation method should be either str or int." ) # Start of Regularisation # Start of the Monster Equation wgt = np.arange(nLg, 0, -1) wgtm2 = np.tile((np.tile(wgt, [n_features, 1])), [n_features, 1]) wgtm3 = np.reshape(wgtm2, [n_features, n_features, np.size(wgt)]) # this is shit, eats all the memory! Tp = n_timepoints - 1 # Da Equation! var_hat_rho = ( (Tp * ((1 - (rho ** 2)) ** 2)) + ( (rho ** 2) * np.sum( wgtm3 * (matrix.SumMat(autocorr ** 2, nLg) + (xc_p ** 2) + (xc_n ** 2)), axis=2 ) ) - (2 * rho * np.sum(wgtm3 * (matrix.SumMat(autocorr, nLg) * (xc_p + xc_n)), axis=2)) + (2 * np.sum(wgtm3 * (matrix.ProdMat(autocorr, nLg) + (xc_p * xc_n)), axis=2)) ) / (n_timepoints ** 2) # End of the Monster Equation # Truncate to Theoretical Variance truncation_value = (1 - rho ** 2) ** 2 / n_timepoints np.fill_diagonal(truncation_value, val=0) extrema_idx = np.where(var_hat_rho < truncation_value) n_extrema = np.shape(extrema_idx)[1] / 2 if n_extrema > 0 and truncate_extrema: LGR.debug("Variance truncation is ON.") # Assuming that the variance can *only* get larger in presence of autocorrelation. var_hat_rho[extrema_idx] = truncation_value[extrema_idx] FGE = n_features * (n_features - 1) / 2 LGR.debug( f"calculate_xdf: {n_extrema} ({str(round((n_extrema / FGE) * 100, 3))}%) " "edges had variance smaller than the textbook variance!" ) else: LGR.debug("calculate_xdf: NO truncation to the theoretical variance.") # Sanity Check: # for ii in np.arange(n_extrema): # LGR.info( str( extrema_idx[0][ii]+1 ) + ' ' + str( extrema_idx[1][ii]+1 ) ) # Start of Statistical Inference rf = np.arctanh(rho) # delta method; make sure the n_features is correct! So they cancel out. sf = var_hat_rho / ((1 - rho ** 2) ** 2) rzf = rf / np.sqrt(sf) f_pval = 2 * sp.norm.cdf(-np.abs(rzf)) # both tails # diagonal is rubbish np.fill_diagonal(var_hat_rho, val=0) # NaN screws up everything, so get rid of the diag, but be careful here. np.fill_diagonal(f_pval, val=0) np.fill_diagonal(rzf, val=0) # End of Statistical Inference xDFOut = { "p": f_pval, "z": rzf, "z_naive": z_naive, "v": var_hat_rho, "truncate_extrema": truncation_value, "extrema_idx": extrema_idx, } return xDFOut
d0863b6680bf09926013458c1dc006120ce12a01
3,629,677
def truncatewords(base, length, ellipsis="..."): """Truncate a string by words""" # do we need to preserve the whitespaces? baselist = base.split() lenbase = len(baselist) if length >= lenbase: return base # instead of collapsing them into just a single space? return " ".join(baselist[:length]) + ellipsis
f6472c7511e7e9abf03d4da3ed10c94ef070f78a
3,629,678
def normalising_general(data,scoreMAX=[20,27,100,21],scoreMIN=[0,0,0,0],cumsum=True): """Normalises the data of the patient with missing count. Parameters ---------- data : numpy data, [number of observations, number of features] scoreMAX: max scores for asrm and qids scoreMIN: min scores for asrm and qids Returns ------- normalised_data: data that are normalised and cumulated if cumsum=True. """ normalised_data=np.zeros((data.shape[0],data.shape[1])) len_data=len(scoreMAX) for i in range(len_data): normalised_data[:,i]=standardise_sym(data[:,i],scoreMIN[i],scoreMAX[i]) if cumsum: normalised_data[:,:len(scoreMAX)]=np.cumsum(normalised_data[:,:len(scoreMAX)],axis=0) return normalised_data
560e63c79777ab3c3cb4550142ee68bd0ea3c621
3,629,679
import os import json def extract_csv_from_t2out(json_output=False): """It writes the parameter for every block from the last output file of TOUGH2 simulation on csv or json Parameters ---------- json_output : bool If True a json file is save on ../output/PT/json/ Returns ------- file PT.csv: on ../output/PT/csv/ Attention --------- The file ELEME.json needs to be updated """ eleme_dict={} ELEME_file='../mesh/ELEME.json' if os.path.isfile(ELEME_file): with open(ELEME_file) as file: blocks_position=json.load(file) for block in blocks_position: eleme_dict[block]=[blocks_position[block]['X'],blocks_position[block]['Y'],blocks_position[block]['Z']] else: return "The file %s does not exist"%ELEME_file last="" if os.path.isfile("../model/t2/t2.out"): t2file=open("../model/t2/t2.out","r") else: return "Theres is not t2.out file on t2/t2.out" cnt=0 t2string=[] #It finds the last section where OUTPUT DATA AFTER was printed and uses it to know where to start to extract data. But also converts every line of the file into an element in an array for linet2 in t2file: cnt+=1 t2string.append(linet2.rstrip()) if "OUTPUT DATA AFTER" in linet2.rstrip(): last=linet2.rstrip().split(",") line=cnt t2file.close() high_iteration=[int(s) for s in last[0].split() if s.isdigit()] for elementx in eleme_dict: cnt2=0 for lineout in t2string[line+cnt2:-1]: if " @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"==lineout: cnt2+=1 elif cnt2>2: break elif elementx in lineout: lineselect=lineout.split() eleme_dict[elementx].extend(lineselect) break csv_columns=['X','Y','Z','ELEM','INDEX','P','T','SG','SW','X(WAT1)','X(WAT2)','PCAP','DG','DW'] if json_output: eleme_pd=pd.DataFrame.from_dict(eleme_dict,orient='index',columns=csv_columns) dtype= {'X':'float', 'Y':'float', 'Z':'float', 'ELEM':'str', 'INDEX':'float', 'P':'float', 'T':'float', 'SG':'float', 'SW':'float', 'X(WAT1)':'float', 'X(WAT2)':'float', 'PCAP':'float', 'DG':'float', 'DW':'float'} eleme_pd= eleme_pd.astype(dtype) eleme_pd.to_json("../output/PT/json/PT_json.txt",orient="index",indent=2) with open("../output/PT/csv/PT.csv",'w') as file: file.write(','.join(csv_columns)) file.write('\n') for key in eleme_dict.keys(): string="" for d in eleme_dict[key]: string+="%s,"%(d) file.write(string[0:-2]) file.write('\n') file.close()
1f6d457bc8b752d342db57a6a88e0d89b201cc06
3,629,680
import tokenize import io import token def fix_lazy_json(in_text): """ This function modifies JS-contained JSON to be valid. Posted in http://stackoverflow.com/questions/4033633/handling-lazy-json-\ in-python-expecting-property-name by Pau Sánchez (codigomanso.com) """ tokengen = tokenize.generate_tokens(io.StringIO(in_text).readline) valid_tokens = ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN'] result = [] for tokid, tokval, _, _, _ in tokengen: # fix unquoted strings if tokid == token.NAME: tokid, tokval = fix_unquoted((tokid, tokval), valid_tokens) # fix single-quoted strings elif tokid == token.STRING: tokval = fix_single_quoted(tokval) # remove invalid commas elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')): result = remove_invalid_commas(result) result.append((tokid, tokval)) return tokenize.untokenize(result)
9d46297d9beb21fc2368322fe69cd9ab266f733b
3,629,681
def get_project_family(repo, namespace=None, username=None): """Return the family of projects for the specified project { code: 'OK', family: [ ] } """ allows_pr = flask.request.form.get("allows_pr", "").lower().strip() in [ "1", "true", ] allows_issues = flask.request.form.get( "allows_issues", "" ).lower().strip() in ["1", "true"] form = pagure.forms.ConfirmationForm() if not form.validate_on_submit(): response = flask.jsonify( {"code": "ERROR", "message": "Invalid input submitted"} ) response.status_code = 400 return response repo = pagure.lib.query.get_authorized_project( flask.g.session, repo, user=username, namespace=namespace ) if not repo: response = flask.jsonify( { "code": "ERROR", "message": "No repo found with the information provided", } ) response.status_code = 404 return response if allows_pr: family = [ p.url_path for p in pagure.lib.query.get_project_family(flask.g.session, repo) if p.settings.get("pull_requests", True) ] elif allows_issues: family = [ p.url_path for p in pagure.lib.query.get_project_family(flask.g.session, repo) if p.settings.get("issue_tracker", True) ] else: family = [ p.url_path for p in pagure.lib.query.get_project_family(flask.g.session, repo) ] return flask.jsonify({"code": "OK", "family": family})
3a208818e3a30911cd58e459de139aceb485ceb0
3,629,682
import warnings def transform( f, apply_rng=False, state=False, ) -> Transformed: """Transforms a function using Haiku modules into a pair of pure functions. The first thing to do is to define a `Module`. A module encapsulates some parameters and a computation on those parameters: >>> class MyModule(hk.Module): ... def __call__(self, x): ... w = hk.get_parameter("w", [], init=jnp.zeros) ... return x + w Next, define some function that creates and applies modules. We use `hk.transform` to transform that function into a pair of functions that allow us to lift all the parameters out of the function (`f.init`) and apply the function with a given set of parameters (`f.apply`): >>> def f(x): ... a = MyModule() ... b = MyModule() ... return a(x) + b(x) >>> f = hk.transform(f) To get the initial state of the module call the `init_fn` with an example input: >>> params = f.init(None, 1) >>> params frozendict({ 'my_module': frozendict({'w': DeviceArray(0., dtype=float32)}), 'my_module_1': frozendict({'w': DeviceArray(0., dtype=float32)}), }) You can then apply the function with the given parameters by calling `f.apply`: >>> f.apply(params, 1) DeviceArray(2., dtype=float32) It is expected that your program will at some point produce updated parameters and you will want to re-apply `f.apply`. You can do this by calling `f.apply` with different parameters: >>> new_params = {"my_module": {"w": jnp.array(2.)}, ... "my_module_1": {"w": jnp.array(3.)}} >>> f.apply(new_params, 2) DeviceArray(9., dtype=float32) If your transformed function needs to maintain internal state (e.g. moving averages in batch norm) then see :func:`transform_with_state`. Args: f: A function closing over `Module` instances. apply_rng: Whether `apply` should accept `rng` as an argument. state: *Deprecated:* use `hk.transform_with_state`. Returns: A named tuple with `init` and `apply` pure functions. """ analytics.log_once("transform") if state: warnings.warn( "Prefer using hk.transform_with_state(f) vs. passing state=True.", DeprecationWarning) if apply_rng: warnings.warn("Apply_rng will soon be removed and defaulted to True", DeprecationWarning) pair = transform_with_state(f) # type: Transformed if not apply_rng: pair = without_apply_rng(pair) if not state: pair = without_state(pair) return pair
422fe9850df87ac3db738e889461405b36443413
3,629,683
def direct_to_waypoint(aircraft_id, waypoint_name): """ Request aircraft to change heading toward a waypoint. Parameters ---------- aircraft_id : str A string aircraft identifier. For the BlueSky simulator, this has to be at least three characters. waypoint_name : str A string waypoint identifier. The waypoint to direct the aircraft to. Returns ------- TRUE if successful. Otherwise an exception is thrown. Notes ----- The waypoint must exist on the aircraft route. Examples -------- >>> pydodo.direct_to_waypoint("BAW123", waypoint_name = "TESTWPT") """ utils._validate_id(aircraft_id) utils._validate_string(waypoint_name, "waypoint name") body = {config_param("query_aircraft_id"): aircraft_id, "waypoint": waypoint_name} return post_request(config_param("endpoint_direct_to_waypoint"), body)
0c26bc72a994f14dc4df4c56de24b4637dd5f383
3,629,684
import re def username_allowed(username): """Returns True if the given username is not a blatent bad word.""" if not username: return False blacklist = cache.get(USERNAME_CACHE_KEY) if blacklist is None: f = open(settings.USERNAME_BLACKLIST, "r") blacklist = [w.strip() for w in f.readlines()] cache.set(USERNAME_CACHE_KEY, blacklist, settings.CACHE_SHORT_TIMEOUT) # 1 hour # Lowercase username = username.lower() # Add lowercased and non alphanumerics to start. usernames = {username, re.sub(r"\W", "", username)} # Add words split on non alphanumerics. for name in re.findall(r"\w+", username): usernames.add(name) # Do any match the bad words? return not usernames.intersection(blacklist)
42f856baf3d5e704af0ce0780a9ecbb0984b465b
3,629,685
def satisfiability(p=5, ratio=2.0, pr_edge=0.5, pr_exo=0.3, dsp_scm=True): """ Count the number of attempts to generate a linear P-SCM that can be uniquely recovered (i.e., satisfies the derived conditions). :param p: Number of observed nodes :param ratio: Source to observed node ratio. (Number of sources / Number of observed variables.) :param pr_edge: Average node degree (expected number of causal connections of an observed variable). :param pr_exo: Average source degree (expected number of exogenous connections of an observed variable). :param dsp_scm: If the model is a DSP-SCM. :return: Number of attempts count, Generated linear P-SCM (A,B) """ check = False count = 0 if pr_edge >= 1: # average degree for nodes pr_edge = pr_edge / (p-1) if pr_exo >= 1: # average degree for sources pr_exo = pr_exo / p while not check: if dsp_scm: A, B, _ = generate_dsp_scm(p=p, ratio=ratio, pr_edge=pr_edge, pr_exo=pr_exo) else: A, B, _ = generate_pscm(p=p, ratio=ratio, pr_edge=pr_edge, pr_exo=pr_exo) test = PSCMRecovery(a=A, b=B) check = test.check_uniqueness() count += 1 return count, A, B
596b8267b915f8b0dcf0e7bc993fa5ff9a62e1b9
3,629,686
import os def get_uri(env_var='DATABASE_URL'): """Grab and parse the url from the environment.""" parsed_result = urlparse( # Trick python3's urlparse into raising when env var is missing os.environ.get(env_var, 1337) ) meta = { 'scheme': parsed_result.scheme, 'username': unquote(parsed_result.username or ''), 'password': unquote(parsed_result.password or ''), 'hostname': parsed_result.hostname, 'port': parsed_result.port, 'path': unquote(parsed_result.path or '/'), } return meta
91cad8b1a2fef783ce9f33066c461db1b0f87fd2
3,629,687
def next_update_time(m): """Return the next update time. If the UpdateFrequency or Modified """ last_mod = m.doc['Root'].find_first_value('Root.Modified') if last_mod: last_mod = parse(last_mod) else: return False uf = m.doc['Root'].find_first_value('Root.UpdateFrequency') if uf: uf = timedelta(seconds=iso8601_duration_as_seconds(uf)) else: return False return (last_mod + uf).date()
8bd149b95719691a83563111a196e2b8ab900f99
3,629,688
import random def random_permutation(iterable, r=None): """Random selection from itertools.permutations(iterable, r)""" pool = tuple(iterable) if r is None: r = len(pool) return list(random.sample(pool, r))
09e9f22def2c1125bf0ffc50db73659eaac65105
3,629,689
def rabin_karp_pattern_set(test_file_text, k): """ Given a document to detect matches for, creates a set of for the "rolling" hashcodes of each shingle. Runtime: O(len(test_file_text)) with a very small constant factor @param test_file_text: string of file to detect matchse for @param k: length of shingles @return: set of "rolling" hashes for all shingles """ #note -- if we want to analyze this we should probably use a #bloom filter, because its actually guaranteed constant #insertion/lookup digest_set = set() hs = RollingHash(test_file_text, k) for i in range(len(test_file_text) - k + 1): digest_set.add(hs.hash) hs.update() return digest_set
dda851974207490717f9dcd1449cc83905fe7e83
3,629,690
def get_hms(t_sec): """Converts time in seconds to hours, minutes, and seconds. :param t_sec: time in seconds :return: time in hours, minutes, and seconds :rtype: list """ h = t_sec//3600 m = (t_sec - h*3600)//60 s = t_sec%60 return h,m,s
f873ea04905ebcc5b41a394a4dd880a566623c83
3,629,691
from typing import Dict def check_model_version_name(model_name: Text, model_version: Dict) -> bool: """Check model version name. Args: model_name {Text}: model name model_version {Dict}: model version dictionary Returns: True if model names are matches, otherwise false """ model_version = model_version.get('model_version', {}) registered_model = model_version.get('registered_model') return registered_model.get('name') == model_name
eed0b9a8a9b25fac8c1818ed18fa59a01a55b5f3
3,629,692
import logging def current_page_name(webdriver: webdriver, valid_url: str = "tricount.com") -> str: """ Checks current page for content, determines name. Returns: page name, according to names in PAGE_NAV_ORDER Raises: AssertionError - webdriver.current_url doesn't match expected valid_url IndexError - none of the expected elements retrieved """ logging.debug( "current_page_name: validating webdriver.current_url " f"'{webdriver.current_url}' against '{valid_url}'" ) if webdriver.current_url.find(valid_url) == -1: raise InvalidUrlError(expected_url=valid_url, current_url=webdriver.current_url) if is_on_expense_form(webdriver): return "expense_form" elif is_on_expenses_list(webdriver): return "expenses_list" elif is_on_users_list(webdriver): return "users_list" elif is_on_pre_users_list(webdriver): return "pre_users_list" else: raise InvalidPageError
652133e53883eaa839441dfbd15ce6fddd99f383
3,629,693
def task_jshat_app_watch(): """JsHat application - build all on change""" return {'actions': ['yarn run --silent watch ' '--config webpack.app.config.js'], 'task_dep': ['jshat_deps']}
96d9d42e5145cfb2aef3cc91362eb3c9da244ff7
3,629,694
from etools_permissions.models import Realm def get_realm(request): """ Currently not setting realm in session, so using user to get realm Expect tenant attribute to be set on request in Workspace in use, if user not set or user is superuser, then no tenant """ realm = None if request.user is not None and not request.user.is_superuser: if hasattr(request, "tenant"): try: realm = Realm.objects.get( user__pk=request.user.pk, workspace=request.tenant, ) except Realm.DoesNotExist: pass else: try: realm = Realm.objects.get(user__pk=request.user.pk) except Realm.DoesNotExist: pass return realm
b3c0a678553d2cd1da982a44a8376686a6f0f7af
3,629,695
def _marked_merging(A, criterion_fn, node_weights=None): """ Method of paper 'Weighted Graph Cuts without Eigenvectors: A Multilevel Approach' """ if node_weights is None: node_weights = np.ones(A.shape[0]) unmarked_vertices = list(np.arange(A.shape[0])) edges_to_merge = np.zeros_like(A) while len(unmarked_vertices) > 0: v = np.random.choice(unmarked_vertices) neighbors_v = np.where(A[v, :] != 0) unmarked_neighbors = np.intersect1d(unmarked_vertices, neighbors_v) if len(unmarked_neighbors) == 0: unmarked_vertices.remove(v) else: w_idx = criterion_fn(edge_weights=A[v, unmarked_neighbors], neighbor_weights=node_weights[unmarked_neighbors], v_weight=node_weights[v]) w = unmarked_neighbors[w_idx] edges_to_merge[v, w] = 1 unmarked_vertices.remove(v) unmarked_vertices.remove(w) return edges_to_merge + edges_to_merge.T
11fe427a8cd0697c123a8c503afdde28dba3b824
3,629,696
def fast_aggregate(X, Y): """If X has dims (T, ...) and Y has dims (T, ...), do dot product for each T to get length-T vector. Identical to np.sum(X*Y, axis=(1,...,X.ndim-1)) but avoids costly creation of intermediates, useful for speeding up aggregation in td by factor of 4 to 5.""" T = X.shape[0] Xnew = X.reshape(T, -1) Ynew = Y.reshape(T, -1) Z = np.empty(T) for t in range(T): Z[t] = Xnew[t, :] @ Ynew[t, :] return Z
dfcb15bdb9555fead4d37ab03c5112a13f944f52
3,629,697
def GroupConv2D(filters, kernel_size, strides=(1, 1), groups=32, kernel_initializer='he_uniform', use_bias=True, activation='linear', padding='valid', **kwargs): """ Grouped Convolution Layer implemented as a Slice, Conv2D and Concatenate layers. Split filters to groups, apply Conv2D and concatenate back. Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the length of the 1D convolution window. strides: An integer or tuple/list of a single integer, specifying the stride length of the convolution. groups: Integer, number of groups to split input filters to. kernel_initializer: Regularizer function applied to the kernel model_weights matrix. use_bias: Boolean, whether the layer uses a bias vector. activation: Activation function to use (see activations). If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). padding: one of "valid" or "same" (case-insensitive). Input shape: 4D tensor with shape: (batch, rows, cols, channels) if data_format is "channels_last". Output shape: 4D tensor with shape: (batch, new_rows, new_cols, filters) if data_format is "channels_last". rows and cols values might have changed due to padding. """ backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs) slice_axis = 3 if backend.image_data_format() == 'channels_last' else 1 def layer(input_tensor): inp_ch = int(backend.int_shape(input_tensor)[-1] // groups) # input grouped channels out_ch = int(filters // groups) # output grouped channels blocks = [] for c in range(groups): slice_arguments = { 'start': c * inp_ch, 'stop': (c + 1) * inp_ch, 'axis': slice_axis, } x = layers.Lambda(slice_tensor, arguments=slice_arguments)(input_tensor) x = layers.Conv2D(out_ch, kernel_size, strides=strides, kernel_initializer=kernel_initializer, use_bias=use_bias, activation=activation, padding=padding)(x) blocks.append(x) x = layers.Concatenate(axis=slice_axis)(blocks) return x return layer
16f4162859bd2186f508800fae04b1a493aa7a33
3,629,698
def shift(seq: Sequent) -> Sequent: """ The :math:`\\textbf{Opt${}^?$}` :math:`\\texttt{shift}` rule. From an :math:`n`-opetope :math:`\\omega`, creates the globular :math:`(n+1)`-opetope :math:`\\lbrace []: \\omega`. """ n = seq.source.dimension ctx = Context(n + 1) for a in seq.source.nodeAddresses(): ctx += (a.shift(), a) return Sequent( ctx, Preopetope.fromDictOfPreopetopes({Address.epsilon(n): seq.source}), seq.source)
d306604f6f809a2b38fb147e68f76b7d6e733435
3,629,699