content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from clinica.utils.dwi import merge_volumes_tdim import os.path as op import os def merge_noddi_ped(in_file, in_bvec, in_bval, alt_file, alt_bvec, alt_bval): """ This is to merge the two ped images and also concatenate the bvecs and bvals :return: """ out_bvals_tab = op.abspath('merged.bval') out_bvals = op.abspath('merged_bvals.bval') cmd_bval = 'paste ' + in_bval + ' ' + alt_bval + ' > ' + out_bvals_tab os.system(cmd_bval) with open(out_bvals_tab, 'r+') as fin, open(out_bvals, 'w') as fout: for line in fin: fout.write(line.replace('\t', ' ')) out_bvecs_tab = op.abspath('merged.bvec') out_bvecs = op.abspath('merged_bvecs.bvec') cmd_bvec = 'paste ' + in_bvec + ' ' + alt_bvec + ' > ' + out_bvecs_tab os.system(cmd_bvec) with open(out_bvecs_tab, 'r+') as fin, open(out_bvecs, 'w') as fout: for line in fin: fout.write(line.replace('\t', ' ')) out_file = merge_volumes_tdim(in_file, alt_file) return out_file, out_bvals, out_bvecs
07c0a49fcf6d0362625b5361a5ee4800d1824b71
3,634,100
import numpy def _xml_column_name_orig_to_new(column_name_orig): """Converts name of XML column from original (segmotion) to new format. :param column_name_orig: Column name in original format. :return: column_name: Column name in new format. """ orig_column_flags = [c == column_name_orig for c in XML_COLUMN_NAMES_ORIG] orig_column_index = numpy.where(orig_column_flags)[0][0] return XML_COLUMN_NAMES[orig_column_index]
4f5302381d6a94d74d311355e1aad8c79e1e0cbe
3,634,101
import os def copy_from_file(conn, df, table): """ Here we are going save the dataframe on disk as a csv file, load the csv file and use copy_from() to copy it to the table """ # Save the dataframe to disk tmp_df = "./tmp_dataframe.csv" df.to_csv(tmp_df, header=False) f = open(tmp_df, 'r') cursor = conn.cursor() try: cursor.copy_from(f, table, null='', sep=',') conn.commit() except (Exception, psycopg2.DatabaseError) as error: os.remove(tmp_df) print("Error: %s" % error) conn.rollback() cursor.close() return 1 print("copy_from_file() done") cursor.close() os.remove(tmp_df)
f49cddc4cc38fa3797eac91514a00ad4dd805d1f
3,634,102
import os def get_ocp_repo(rhel_major_version=None): """ Get ocp repo file, name will be generated dynamically based on ocp version. Args: rhel_major_version (int): Major version of RHEL. If not specified it will take major version from config.ENV_DATA["rhel_version"] Returns: string : Path to ocp repo file """ rhel_version = ( rhel_major_version or Version.coerce(config.ENV_DATA["rhel_version"]).major ) repo_path = os.path.join( constants.REPO_DIR, f"ocp_{get_ocp_version('_')}_rhel{rhel_version}.repo" ) path = os.path.expanduser(repo_path) assert os.path.exists(path), f"OCP repo file {path} doesn't exists!" return path
3997f513b59e8acc3290d9907b14d80f46552cc6
3,634,103
def ndwi(raster): """ Normalized Difference Water Index (NDWI) NDWI := factor * (Green - NIR1) / (Green + NIR1) :param raster: xarray or numpy array object in the form (c, h, w) :return: new band with SI calculated """ nir1, green = _get_band_locations( raster.attrs['band_names'], ['nir1', 'green']) index = ( (raster['band_data'][green, :, :] - raster['band_data'][nir1, :, :]) / (raster['band_data'][green, :, :] + raster['band_data'][nir1, :, :]) ).compute() return index.expand_dims(dim="band", axis=0).fillna(0).chunk(chunks=CHUNKS)
64e8d553b8ace8c3fc3ea2a7fed98c43fcadf22a
3,634,104
def get_settings(): """Utility function to retrieve settings.py values with defaults""" return { "DJANGO_WYSIWYG_MEDIA_URL": getattr(settings, "DJANGO_WYSIWYG_MEDIA_URL", urljoin(settings.STATIC_URL, "ckeditor/")), "DJANGO_WYSIWYG_FLAVOR": getattr(settings, "DJANGO_WYSIWYG_FLAVOR", "yui"), }
1028431d5facd406020892cb5019b26de55f7193
3,634,105
def einsum_via_matmul(input_tensor, w, num_inner_dims): """Implements einsum via matmul and reshape ops. Args: input_tensor: float Tensor of shape [<batch_dims>, <inner_dims>]. w: float Tensor of shape [<inner_dims>, <outer_dims>]. num_inner_dims: int. number of dimensions to use for inner products. Returns: float Tensor of shape [<batch_dims>, <outer_dims>]. """ input_shape = get_shape_list(input_tensor) w_shape = get_shape_list(w) batch_dims = input_shape[: -num_inner_dims] inner_dims = input_shape[-num_inner_dims:] outer_dims = w_shape[num_inner_dims:] inner_dim = np.prod(inner_dims) outer_dim = np.prod(outer_dims) if num_inner_dims > 1: input_tensor = tf.reshape(input_tensor, batch_dims + [inner_dim]) if len(w_shape) > 2: w = tf.reshape(w, [inner_dim, outer_dim]) ret = tf.matmul(input_tensor, w) if len(outer_dims) > 1: ret = tf.reshape(ret, batch_dims + outer_dims) return ret
acc672a84661e11444a452393587d0dfc164b636
3,634,106
def cost(guess, tdoa, array): """ Calculate the sum of the squares of the loss function of hyperbolic least squares problem guess : 1D or 2D row ndarray with one or more guesses coordinates tdoa : column 2D ndarray with the TDOA from some reference sensor receptorsPositions : row 2D ndarray with the coordinates of all receptors return 2D ndarray of shape (number_of_guesses, 1) with the costs related to all guesses loss_ij = tdoa_ij * soundSpeed - h(receptorsPositions_i, receptorsPositions_j, guess) cost = sum in combinations of ij (loss_ij ** 2) """ l = array.shape[0] m = 1 if np.ndim(guess) == 1 else guess.shape[0] cost = np.zeros((m, 1)) dists = np.concatenate((np.zeros((1, 1)), tdoa), axis=0) * propSpeed for count, c in enumerate( comb(range(l), 2) ): (i, j) = (c[0], c[1]) r1 = array[i] r2 = array[j] dij = dists[j] - dists[i] cost += (dij - h(r1, r2, guess))**2 return cost
27b08a1a6966d5f18974244f4db53db5c6c14fee
3,634,107
def from_literal(tup): """Convert from simple literal form to the more uniform typestruct.""" def expand(vals): return [from_literal(x) for x in vals] def union(vals): if not isinstance(vals, tuple): vals = (vals,) v = expand(vals) return frozenset(v) if not isinstance(tup, tuple): return ('prim', tup) elif isinstance(tup[0], str): tag, *vals = tup if tag == 'prim': return tup elif tag == 'tuple': params = tuple(expand(vals)) return (tag, params) elif tag == 'map': k, v = vals return (tag, (union(k), union(v))) else: vals, = vals # pylint: disable=self-assigning-variable return (tag, union(vals)) else: return tuple(expand(tup))
a06d35e27512bfeae030494ca6cad7ebac5c7d2c
3,634,108
def distill_resnet_32_to_15_cifar20x5(): """Set of hyperparameters.""" hparams = distill_base() hparams.teacher_model = "resnet" hparams.teacher_hparams = "resnet_cifar_32" hparams.student_model = "resnet" hparams.student_hparams = "resnet_cifar_15" hparams.optimizer_momentum_nesterov = True # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.) hparams.teacher_learning_rate = 0.25 * 128. * 8. / 256. hparams.student_learning_rate = 0.2 * 128. * 8. / 256. hparams.learning_rate_decay_scheme = "piecewise" hparams.add_hparam("learning_rate_boundaries", [40000, 60000, 80000]) hparams.add_hparam("learning_rate_multiples", [0.1, 0.01, 0.001]) hparams.task_balance = 0.28 hparams.distill_temperature = 2.0 hparams.num_classes = 20 return hparams
503b49f0e61191eb87516b8c83ca88fbc0313be2
3,634,109
from datetime import datetime def timestamp() -> datetime.datetime: """ Returns a datetime object representing the current UTC time. The last 3 digits of the microsecond frame are set to zero. :return: a UTC timestamp """ # Get tz-aware datetime object. dt = arrow.utcnow().naive # Set the last three ms digits to 0. dt = dt.replace(microsecond=int(str(dt.microsecond)[0:3] + "000")) return dt
e1fb7fbe39bef103704af0a7bfece4038506bbdc
3,634,110
def next_fake_batch(): """ Return random seeds for the generator. """ batch = np.random.uniform( -1.0, 1.0, size=[FLAGS.batch_size, FLAGS.seed_size]) return batch.astype(np.float32)
80c4b32fd145430dad06b16fd90273fd9aa944f1
3,634,111
def print_result(error, real_word): """" print_result""" if error == 5: print("You lost!") print("Real word is:", real_word) else: print("You won!") return 0
598814ac64ac767c102080a0a82541d3b888843c
3,634,112
def read_cpu_info(): """Return the CPU model number & number of CPUs.""" try: with open('/proc/cpuinfo') as f: models = [line[line.index(':')+2:] for line in f if line.startswith('model name')] return models[0].strip(), len(models) except: log.exception('Failed to read CPU info') return '', 0
68ce0de7a36d01fc18f3be7f182b37735ec5683a
3,634,113
import yaml def _yaml_parse(s): """Uses yaml module to parse s to a Python value. First tries to parse as an unnamed flag function with at least two args and, if successful, returns s unmodified. This prevents yaml from attempting to parse strings like '1:1' which it considers to be timestamps. """ try: name, args = parse_function(s) except ValueError: pass else: if name is None and len(args) >= 2: return s return yaml.safe_load(s)
52a788b63ade60bed879b5d0a14e21177902af2e
3,634,114
def mongo_convert(sch): """Converts a schema dictionary into a mongo-usable form.""" out = {} for k in sch.keys(): if k == 'type': out["bsonType"] = sch[k] elif isinstance(sch[k], list): out["minimum"] = sch[k][0] out["maximum"] = sch[k][1] elif isinstance(sch[k], dict): out[k] = mongo_convert(sch[k]) return out
0208ceda058042a9f44249a1b724c4b7883afec1
3,634,115
def files_identical(a, b): """Return a tuple (file a == file b, index of first difference)""" a_bytes = open(a, "rb").read() b_bytes = open(b, "rb").read() return bytes_identical(a_bytes, b_bytes)
a8e392f5b2682459525c329d1bd8ab64104628b6
3,634,116
import math def discounted_cumulative_gain(rank_list): """Calculate the discounted cumulative gain based on the input rank list and return a list.""" discounted_cg = [] discounted_cg.append(rank_list[0]) for i in range(1, len(rank_list)): d = rank_list[i]/math.log2(i+1) dcg = d + discounted_cg[i-1] discounted_cg.append(dcg) return discounted_cg
eaa5ad6185e2abb239097be5399dffd82d143fd3
3,634,117
def adapter(js_constructor, base=Adapter): """ Allows a class to implement its adapting logic with a `js_args()` method on the class itself. This just helps reduce the amount of code you have to write. For example: @adapter('wagtail.mywidget') class MyWidget(): ... def js_args(self): return [ self.foo, ] Is equivalent to: class MyWidget(): ... class MyWidgetAdapter(Adapter): js_constructor = 'wagtail.mywidget' def js_args(self, obj): return [ self.foo, ] """ def _wrapper(cls): ClassAdapter = type( cls.__name__ + "Adapter", (base,), { "js_constructor": js_constructor, "js_args": lambda self, obj: obj.js_args(), }, ) register(ClassAdapter(), cls) return cls return _wrapper
e808a4a8dd50fa61157f45a014ec390cc8ee1370
3,634,118
import sys import os def ancienne_fonction_chemin_absolu(relative_path): """ Donne le chemin absolu d'un fichier. PRE : - POST : Retourne ''C:\\Users\\sacre\\PycharmProjects\\ProjetProgra\\' + 'relative_path'. """ base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__))) print("b : ", base_path) correct = base_path.index("ProjetProgra") print("lll : ", base_path[:correct + 13]) return os.path.join(base_path[:-15], relative_path)
a33db91a2bd72273acc14caea415181297c16318
3,634,119
import warnings import inspect def data(input, name_func=None, doc_func=None, skip_on_empty=False, **legacy): """ A "brute force" method of parameterizing test cases. Creates new test cases and injects them into the namespace that the wrapped function is being defined in. Useful for parameterizing tests in subclasses of 'UnitTest', where Nose test generators don't work. >> @data([("foo", 1, 2)]) ... def test_add1(name, input, expected): ... actual = add1(input) ... assert_equal(actual, expected) ... >> locals() ... 'test_add1_foo_0': <function ...> ... >> """ input = _check_data(input) if "testcase_func_name" in legacy: warnings.warn("testcase_func_name= is deprecated; use name_func=", DeprecationWarning, stacklevel=2) if not name_func: name_func = legacy["testcase_func_name"] if "testcase_func_doc" in legacy: warnings.warn("testcase_func_doc= is deprecated; use doc_func=", DeprecationWarning, stacklevel=2) if not doc_func: doc_func = legacy["testcase_func_doc"] doc_func = doc_func or default_doc_func name_func = name_func or default_name_func def parameterized_expand_wrapper(f, instance=None): frame_locals = inspect.currentframe().f_back.f_locals parameters = parameterized.input_as_callable(input)() if not parameters: if not skip_on_empty: raise ValueError( "Parameters iterable is empty (hint: use " "`parameterized.expand([], skip_on_empty=True)` to skip " "this test when the input is empty)" ) return wraps(f)(skip_on_empty_helper) digits = len(str(len(parameters) - 1)) for num, p in enumerate(parameters): name = name_func(f, "{num:0>{digits}}".format(digits=digits, num=num), p) # If the original function has patches applied by 'mock.patch', # re-construct all patches on the just former decoration layer # of param_as_standalone_func so as not to share # patch objects between new functions nf = reapply_patches_if_need(f) frame_locals[name] = parameterized.param_as_standalone_func(p, nf, name) frame_locals[name].__doc__ = doc_func(f, num, p) # Delete original patches to prevent new function from evaluating # original patching object as well as re-constructed patches. delete_patches_if_need(f) f.__test__ = False return parameterized_expand_wrapper
2d38bb642a1e5a020f7c07de8348c5621c8cbccb
3,634,120
def get_form_field_names(form_class): """Return the list of field names of a WTForm. :param form_class: A `Form` subclass """ unbound_fields = form_class._unbound_fields if unbound_fields: return [f[0] for f in unbound_fields] field_names = [] # the following logic has been taken from FormMeta.__call__ for name in dir(form_class): if not name.startswith('_'): unbound_field = getattr(form_class, name) if hasattr(unbound_field, '_formfield'): field_names.append(name) return field_names
27c91a1e3c1b71f69d44747955d59cee525aa50e
3,634,121
def gen_empty_structure_data_array(number_of_atoms): """ Generate an array data structure to contain structure data. Parameters ---------- number_of_atoms : int The number of atoms in the structure. Determines the size of the axis 0 of the structure array. Returns ------- np.ndarray of (N, :attr:`libpdb.atom_slicers), dtype = '<U8' Where N is the ``number_of_atoms``. """ # require assert isinstance(number_of_atoms, int), \ f'`number_of_atoms` is not int, {type(number_of_atoms)} ' assert number_of_atoms > 0, \ f'or number is less than zero: {number_of_atoms}.' return np.empty( (number_of_atoms, len(libpdb.atom_slicers)), dtype='<U8', )
3602483721bd5573cfa9bb605db52511999349ac
3,634,122
from datetime import datetime def actives_alerts_table(strategy, style='', offset=None, limit=None, col_ofs=None, group=None, ordering=None, datetime_format='%y-%m-%d %H:%M:%S'): """ Returns a table of any active alerts. """ COLUMNS = ('Symbol', '#', 'Label', 'TF', 'Created', 'Expiry', 'Countdown', 'Condition', 'Cancellation', 'Message') columns = tuple(COLUMNS) total_size = (len(columns), 0) data = [] with strategy._mutex: alerts = get_all_active_alerts(strategy) total_size = (len(columns), len(alerts)) if offset is None: offset = 0 if limit is None: limit = len(alerts) limit = offset + limit if group: # in alpha order then by created alerts.sort(key=lambda x: x['sym']+str(x['ts']), reverse=True if ordering else False) else: # by created timestamp descending alerts.sort(key=lambda x: x['ts'], reverse=True if ordering else False) alerts = alerts[offset:limit] for t in alerts: row = [ t['sym'], t['id'], t['name'], t['tf'], datetime.fromtimestamp(t['ts']).strftime(datetime_format) if t['ts'] > 0 else "?", datetime.fromtimestamp(t['expiry']).strftime(datetime_format) if t['expiry'] > 0 else "never", t['ctd'], t['cond'], t['cancel'], t['msg'], ] data.append(row[0:3] + row[3+col_ofs:]) return columns[0:3] + columns[3+col_ofs:], data, total_size
729c9e7f30c1c73c4f41cb8ced18820b79639db7
3,634,123
from django.contrib.auth import logout def signout(request): """Logs out user""" logout(request) return redirect('/')
9b10ffc58f066affc915baf926815c032e041409
3,634,124
def _get_cached_item(cache_key): """Returns an item from memcache if cached """ return memcache.get()
0265025d0599a7ec887c866ce5712d1f89c9832b
3,634,125
def format_dnb_company_investigation(data): """ Format DNB company investigation payload to something DNBCompanyInvestigationSerlizer can parse. """ data['dnb_investigation_data'] = { 'telephone_number': data.pop('telephone_number', None), } return data
9c27990bad98b36649b42c20796caabeaae1e21b
3,634,126
def audio_feat(id): """ Return audio features of a track. search_id[0]['insert_feature_here'] id="4nb8OcZG8lpnHi5DmkEnY2" #Sample ID audio_feat("4nb8OcZG8lpnHi5DmkEnY2") :param id: :return: """ return sp.audio_features(tracks=id)
168801f4b0e01bf5fb4d416162bde5bfdd0d592e
3,634,127
import os def get_results_dir(tomo_path): """Return/Create the results directory""" res_dir = os.path.abspath(tomo_path)+"_results" common.mkdir_p(res_dir) return res_dir
f1be2d27e7e0170384f4859db85106baa0f406a0
3,634,128
def params_v2_biasless(exp_name, convtype='chebyshev5', pooltype='max', nmaps=16, activation_func='relu', stat_layer=None, input_channels=1, gc_depth=8, nfilters=64, const_k=5, var_k=None, filters=None, batch_norm_output=False, var_batch_norm=None, fc_layers=[], num_outputs=1, reg_factor=0, dropout_rate=0.8, verbose=True, num_epochs=1, learning_rate=1e-4, decay_factor=0.999, decay_freq=1, decay_staircase=False, loss_func="l1", nside=NSIDE, nsides=None, order=ORDER, batch_size=16): """ Returns params dict for biasless 1 architectures :param convtype: Type of graph convolution performed ("chebyshev5" or "monomials"). :param num_outputs: 1 for just sigma_8, 2 for sigma_8 and predicted log-variance q :param nsides: List of NSIDES for graph convolutional layers. Length = gc_depth :param nside: NSIDE of input maps. Should be 1024. :param loss_func: Choice of loss function ("l1", "custom1", "custom2", "l2"). Must be implemented in DeepSphere codebase. :param decay_staircase: If true, performs integer division in lr decay, decaying every decay_freq steps. :param decay_freq: If decay_staircase=true, acts to stagger decays. Otherwise, brings down decay factor. :param dropout_rate: Percentage of neurons kept. :param reg_factor: Multiplier for L2 Norm of weights. :param fc_layers: List of sizes of hidden fully connected layers (excluding the output layer). :param var_batch_norm: List of True/False values turning batch normalization on/off for each GC layer. :param batch_norm_output: Batch normalization value for the output layer (True/False). Ununsed if var_batch_norm is not None. :param var_k: List of GC orders K for each layer. Length = gc_depth. :param const_k: Constant K value for each GC layer. Unused if var_k is not None. :param stat_layer: Type of statistical layer applied for invariance. Can be None, mean, meanvar, var, or hist. :param pooltype: Type of pooling used for GC layers (max or avg). :param activation_func: Type of activation function applied for all GC and FC layers (relu, leaky_relu, elu, etc.). :param gc_depth: Number of GC layers in the network. Fixed at eight if NSIDE=1024 and pooling by two every layer. :param filters: List of # of filters for each GC layer. Length = gc_depth. :param batch_size: Batch size for training the network. Ideally a power of two. :param nfilters: Constant # of filters for each GC layer. Unused if filters is not None. :param exp_name: Experiment ID to define and track directories. :param order: HEALPIX order for partial-sky maps. Fixed at 2. :param decay_factor: Decay factor by which learning rate gets multiplied every decay_freq steps depending on decay_staircase. :param nmaps: Number of full-sky maps from which the training data is being generated. :param input_channels: Number of input partial-sky maps. 1 for convergence, 2 for shear, +1 for counts-in-cells. :param learning_rate: Initial learning rate to use during training :param num_epochs: Number of epochs for training the model :param verbose: Outputs information on model config :return: Params dict for DeepSphere model trained on FLASK v2 data without galaxy-matter bias. Doesn't allow for sophisticated regularization or loss functions. """ params = dict() params['dir_name'] = "flaskv2-biasless-{}".format(exp_name) # Types of layers. params['conv'] = convtype # Graph convolution: chebyshev5 or monomials. params['pool'] = pooltype # Pooling: max or average. params['activation'] = activation_func # Non-linearity: relu, elu, leaky_relu, softmax, tanh, etc. params['statistics'] = stat_layer # Statistics (for invariance): None, mean, var, meanvar, hist. # Architecture. if filters is None: filters = [nfilters] * gc_depth if var_k is None: var_k = [const_k] * gc_depth if var_batch_norm is None: var_batch_norm = [True] * (gc_depth - 1) + [batch_norm_output] if nsides is None: nsides = [nside // (2 ** i) for i in range(gc_depth + 1)] params['F'] = filters # Graph convolutional layers: number of feature maps. params['K'] = var_k # Polynomial orders. params['batch_norm'] = var_batch_norm # Batch normalization. params['M'] = fc_layers + [num_outputs] # Fully connected layers: output dimensionalities. params['input_channel'] = input_channels # Two channels (spherical maps) per sample. # Pooling. params['nsides'] = nsides params['indexes'] = utils.nside2indexes(nsides, order) # params['batch_norm_full'] = [] # Regularization (to prevent over-fitting). params[ 'regularization'] = reg_factor # Amount of L2 regularization over the weights # (will be divided by the number of weights). params['dropout'] = dropout_rate # Percentage of neurons to keep. # Training. params['num_epochs'] = num_epochs # Number of passes through the training data. params['batch_size'] = batch_size # Constant quantity of information (#pixels) per step (invariant to sample size). # Optimization: learning rate schedule and optimizer. params['scheduler'] = lambda step: tf.train.exponential_decay(learning_rate, step, decay_steps=decay_freq, decay_rate=decay_factor, staircase=decay_staircase) # params['scheduler'] = lambda step: learning_rate params['optimizer'] = lambda lr: tf.train.AdamOptimizer(lr, beta1=0.9, beta2=0.999, epsilon=1e-8) params['loss'] = loss_func # Regression loss. # Number of model evaluations during training (influence training time). params['eval_frequency'] = (12 * order * order * nmaps / batch_size) / 3 # Thrice per epoch if verbose: print('#sides: {}'.format(nsides)) print('#pixels: {}'.format([(nside // order) ** 2 for nside in nsides])) # Number of pixels on the full sphere: 12 * nsides**2. print('#samples per batch: {}'.format(params['batch_size'])) print('=> #pixels per batch (input): {:,}'.format(params['batch_size'] * (NSIDE // order) ** 2)) print('=> #pixels for training (input): {:,}'.format( params['num_epochs'] * 12 * order * order * nmaps * (NSIDE // order) ** 2)) return params
b5db72cd020115283f06d768920b55161a333139
3,634,129
import io def get_ccd_pos(filename, radec=None, verbose=True): """ Parameters ---------- """ astrom_file = io.filename_to_guider(filename) if len(astrom_file)==0: if verbose: print("No astrom file found for %s"%filename) return [np.NaN,np.NaN] astrom_file = astrom_file[0] with fits.open(filename) as f: # Get target coordinates header = f[0].header if radec is None: try: radec = coordinates.SkyCoord(header["OBJRA"],header["OBJDEC"], unit=(units.hourangle, units.deg)) # Format changed at some point except KeyError: radec = coordinates.SkyCoord(header["OBRA"], header["OBDEC"], unit=(units.hourangle, units.deg)) del header elif type(radec) != coordinates.SkyCoord: radec = coordinates.SkyCoord(*radec, unit=units.deg) # Convert it into ccd pixels with fits.open(astrom_file) as f: header = f[0].header wcs_ = wcs.WCS(header) xy = np.asarray(radec.to_pixel(wcs_)) del header del wcs_ del radec return xy
e112e6ccbda8b28d2ac23bba1ab5b23fc7a2aab9
3,634,130
def mode2(data,v0,v1, dmin=0.0): """ v0..v1 (both inclusive) are channel selections threshold on dmin for odd number of channels, center line in mode2 will be same as mode1 @todo the frequency axis is not properly calibrated here @todo a full 2D is slow, we only need the 1D version """ print "PVCorr mode2: v0,1=",v0,v1,"dmin=",dmin smin = data.min() s = data[:,v0:v1+1] if dmin==0.0: logging.warning("Using all data in crosscorr") f = s else: f = np.where(s>dmin,s,0) print "PVCorr dmin:",dmin f0 = np.where(s>smin,1,0) f1 = np.where(s>dmin,1,0) fmax = f.max() ffsum = (f*f).sum() print "PVCorr mode2:",f1.sum(),'/',f0.sum(),'min/max',smin,fmax out = scipy.signal.correlate2d(data,f,mode='same')/ffsum print 'PVCorr min/max:',out.min(),out.max() n1,m1,s1,n2,m2,s2 = stats.mystats(out.flatten()) print "PVCorr stats", n1,m1,s1,n2,m2,s2 rms_est = s2/np.sqrt(f1.sum()) return out,rms_est
b6d361cacfba244cc906a60ed5f4f5b637451554
3,634,131
def learning_curve(estimator, X, y, groups=None, train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None, exploit_incremental_learning=False, n_jobs=1, pre_dispatch="all", verbose=0, shuffle=False, random_state=None): """Learning curve. Determines cross-validated training and test scores for different training set sizes. A cross-validation generator splits the whole dataset k times in training and test data. Subsets of the training set with varying sizes will be used to train the estimator and a score for each training subset size and the test set will be computed. Afterwards, the scores will be averaged over all k runs for each training subset size. Read more in the :ref:`User Guide <learning_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. exploit_incremental_learning : boolean, optional, default: False If the estimator supports incremental learning, this will be used to speed up fitting for different training set sizes. n_jobs : integer, optional Number of jobs to run in parallel (default 1). pre_dispatch : integer or string, optional Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The string can be an expression like '2*n_jobs'. verbose : integer, optional Controls the verbosity: the higher, the more messages. shuffle : boolean, optional Whether to shuffle training data before taking prefixes of it based on``train_sizes``. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``shuffle`` == 'True'. ------- train_sizes_abs : array, shape = (n_unique_ticks,), dtype int Numbers of training examples that has been used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. train_scores : array, shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array, shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`examples/model_selection/plot_learning_curve.py <sphx_glr_auto_examples_model_selection_plot_learning_curve.py>` """ if exploit_incremental_learning and not hasattr(estimator, "partial_fit"): raise ValueError("An estimator must support the partial_fit interface " "to exploit incremental learning") X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) # Store it as list as we will be iterating over the list multiple times cv_iter = list(cv.split(X, y, groups)) scorer = check_scoring(estimator, scoring=scoring) n_max_training_samples = len(cv_iter[0][0]) # Because the lengths of folds can be significantly different, it is # not guaranteed that we use all of the available training data when we # use the first 'n_max_training_samples' samples. train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples) n_unique_ticks = train_sizes_abs.shape[0] if verbose > 0: print("[learning_curve] Training set sizes: " + str(train_sizes_abs)) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) if shuffle: rng = check_random_state(random_state) cv_iter = ((rng.permutation(train), test) for train, test in cv_iter) if exploit_incremental_learning: classes = np.unique(y) if is_classifier(estimator) else None out = parallel(delayed(_incremental_fit_estimator)( clone(estimator), X, y, classes, train, test, train_sizes_abs, scorer, verbose) for train, test in cv_iter) else: train_test_proportions = [] for train, test in cv_iter: for n_train_samples in train_sizes_abs: train_test_proportions.append((train[:n_train_samples], test)) out = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train, test, verbose, parameters=None, fit_params=None, return_train_score=True) for train, test in train_test_proportions) out = np.array(out) n_cv_folds = out.shape[0] // n_unique_ticks out = out.reshape(n_cv_folds, n_unique_ticks, 2) out = np.asarray(out).transpose((2, 1, 0)) return train_sizes_abs, out[0], out[1]
e8109033f2be16494c29c7d8626a3ad23646b9ed
3,634,132
def directed_dfs(digraph, start, end, max_total_dist, max_buildings): """ Finds the shortest path from start to end using a directed depth-first search. The total distance traveled on the path must not exceed max_total_dist, and the number of buildings on this path must not exceed max_buildings. Parameters: digraph: instance of Digraph or one of its subclasses The graph on which to carry out the search start: string Building number at which to start end: string Building number at which to end max_total_dist: int Maximum total distance on a path max_buildings: int Maximum number of buildingss a path can visit Returns: The shortest-path from start to end, represented by a list of building numbers (in strings). If there exists no path that satisfies max_total_dist and max_buildings constraints, then raises a ValueError. """ best_path = None path = [[start], 0, 1] # begin at start node with 0 distance best_dist, best_path = get_best_path(digraph, start, end, path, max_buildings, max_total_dist, best_path) if best_path is None: raise ValueError("No path from {} to {}".format(start, end)) return best_path
1a88077d9441fc246f3d93194bb1ce53e0e48383
3,634,133
def zeta(z, x, beta2): """ Eq. (6) from Ref[1] (constant term) Note that 'x' here corresponds to 'chi = x/rho', and 'z' here corresponds to 'xi = z/2/rho' in the paper. """ return 3 * (4* z**2 - beta2 * x**2) / 4 / beta2 / (1+x)
aa623a1876fbb13128132960840ea388dac67e85
3,634,134
def getCountry(user): """ Returns the object the view is displaying. """ # get users country from django cosign module user_countries = TolaUser.objects.all().filter(user__id=user.id).values('countries') get_countries = Country.objects.all().filter(id__in=user_countries) return get_countries
6d5825cf28729326626585ae566a221b6e90db47
3,634,135
def calculate_relative_enrichments(results, total_pathways_by_resource): """Calculate relative enrichment of pathways (enriched pathways/total pathways). :param dict results: result enrichment :param dict total_pathways_by_resource: resource to number of pathways :rtype: dict """ return { resource: len(enriched_pathways) / total_pathways_by_resource[resource] for resource, enriched_pathways in results.items() }
7060e032f2a619929cfcf123cf0946d7965b86de
3,634,136
def _import_config(config_dict, validating=False): """Applies a previously exported configuration to the current system. This method only exists to decouple the import logic from the atomic transaction so that this method can be reused for validation without making any permanent changes. :param config_dict: A dictionary of configuration changes to import. :type config_dict: dict :param validating: Flag to determine if running a validate or commit transaction. :type validating: bool :returns: A list of warnings discovered during the import. :rtype: list[:class:`port.schema.ValidationWarning`] :raises :class:`port.schema.InvalidConfiguration`: If any part of the configuration violates the specification. """ warnings = [] # Validate the top-level configuration structure config = Configuration(config_dict) # Build a map of row-locked models to import # Note the locking order matters here and must be high-level to low-level recipe_type_map = None if config.recipe_types: recipe_type_map = _build_recipe_type_map(config.recipe_types) job_type_map = None if config.job_types: job_type_map = _build_job_type_map(config.job_types) error_map = None if config.errors: error_map = _build_error_map(config.errors) # Attempt to create/edit the models if error_map: for error_dict in config.errors: error = error_map.get(error_dict.get('name')) warnings.extend(_import_error(error_dict, error)) if job_type_map: for job_type_dict in config.job_types: job_type_key = (job_type_dict.get('name'), job_type_dict.get('version')) job_type = job_type_map.get(job_type_key) warnings.extend(_import_job_type(job_type_dict, job_type, validating)) if recipe_type_map: for recipe_type_dict in config.recipe_types: recipe_type_key = (recipe_type_dict.get('name'), recipe_type_dict.get('version')) recipe_type = recipe_type_map.get(recipe_type_key) warnings.extend(_import_recipe_type(recipe_type_dict, recipe_type)) return warnings
f891437d68a05a0d98e489ac07fef4dbc77cfd25
3,634,137
def clean_name(name): """Clean a name string """ # flip if in last name, first name format tokens = name.split(',') if len(tokens) == 2: first, last = tokens[1], tokens[0] else: first, last = name.split(' ')[:2] # remove punctuation first_clean = first.strip().capitalize() last_clean = last.strip().capitalize() return f'{first_clean} {last_clean}'
ef5fe3e53ba1134c45c30f4b6342a0641e85f114
3,634,138
def find_prime(num_bits: int) -> int: """Find a prime represented with given number of bits. Generates random numbers of given size until one of them is deemed prime by a probabilistic primality check. Args: num_bits: size of the prime in terms of bits required for representing it Returns: a (probably) prime number with given number of bits """ while True: num = generate_prime_candidate(num_bits) if is_prime(num): return num
91b09571720fb181d4be135c767c9a9f7ca4c6e0
3,634,139
import struct def UnpackS8(buf, offset=0, endian='big'): """ Unpack an 8-bit signed integer into 1 byte. Parameters: buf - Input packed buffer. offset - Offset in buffer. endian - Byte order. Return: 2-tuple of unpacked value, new buffer offset. """ try: return (struct.unpack_from(_EndianCode[endian]+'b', buf, offset)[0], offset+1) except (KeyError, TypeError, DeprecationWarning, struct.error) as inst: _UnpackException('s8', offset, endian, inst)
e610b09e5e080634fbcbe3c37b65c8f12988db7e
3,634,140
def estimator(data): """ Provide the estimate calulations based on that data received """ #Collect required data from data imput impact = {} severeImpact = {} reportedCases = data['reportedCases'] periodType = data['periodType'] timeToElapse = data['timeToElapse'] totalHospitalBeds = data['totalHospitalBeds'] avgDailyIncomeInUSD = data['region']['avgDailyIncomeInUSD'] avgDailyIncomePopulation = data['region']['avgDailyIncomePopulation'] # Conversion of the duration of the time elapse in day duration = get_duration_in_day(periodType, timeToElapse) factor = duration//3 #----- #Result for Impact #CHALLENGE 1 impact['currentlyInfected'] = reportedCases * 10 impact['infectionsByRequestedTime'] = impact['currentlyInfected'] * (2**factor) #CHALLENGE 2 impact['severeCasesByRequestedTime'] = int(impact['infectionsByRequestedTime'] * 0.15) impact['hospitalBedsByRequestedTime'] = number_beds_avail(totalHospitalBeds, impact['severeCasesByRequestedTime']) #CHALLENGE 3 impact['casesForICUByRequestedTime'] = int(impact['infectionsByRequestedTime'] * 0.05) impact['casesForVentilatorsByRequestedTime'] = int(impact['infectionsByRequestedTime'] * 0.02) impact['dollarsInFlight'] =int((impact['infectionsByRequestedTime'] * avgDailyIncomePopulation * avgDailyIncomeInUSD)/ duration) #-------------------------------------------------------------------------------------------------- #Result for Severe Impact #CHALLENGE 1 severeImpact['currentlyInfected'] = reportedCases * 50 severeImpact['infectionsByRequestedTime'] = severeImpact['currentlyInfected']*(2**factor) #CHALLENGE 2 severeImpact['severeCasesByRequestedTime'] = int(severeImpact['infectionsByRequestedTime'] * 0.15) severeImpact['hospitalBedsByRequestedTime'] = number_beds_avail(totalHospitalBeds, severeImpact['severeCasesByRequestedTime']) #CHALLENGE 3 severeImpact['casesForICUByRequestedTime'] = int(severeImpact['infectionsByRequestedTime'] * 0.05) severeImpact['casesForVentilatorsByRequestedTime'] = int(severeImpact['infectionsByRequestedTime'] * 0.02) severeImpact['dollarsInFlight'] = int((severeImpact['infectionsByRequestedTime'] * avgDailyIncomePopulation * avgDailyIncomeInUSD) /duration) result = {'data':data, 'impact':impact, 'severeImpact': severeImpact} return result
82ad0497914ea038a365bc19357b37bf61af3fab
3,634,141
def diff_align(dfs, groupers): """ Align groupers to newly-diffed dataframes For groupby aggregations we keep historical values of the grouper along with historical values of the dataframes. The dataframes are kept in historical sync with the ``diff_loc`` and ``diff_iloc`` functions above. This function copies that functionality over to the secondary list of groupers. """ old = [] while len(dfs) < len(groupers): old.append(groupers.popleft()) if dfs: n = len(groupers[0]) - len(dfs[0]) if n: old.append(groupers[0][:n]) groupers[0] = groupers[0][n:] assert len(dfs) == len(groupers) for df, g in zip(dfs, groupers): assert len(df) == len(g) return old, groupers
2a92476cd913404b737dc941d51083f64ef70978
3,634,142
def get_usage_data( es_client, start_date, end_date, match_terms={}, addl_cols=[], index="path-schedd-*" ): """Returns rows of usage data""" default_cols = [ "Owner", "ScheddName", "GlobalJobId", "RecordTime", "RemoteWallClockTime", "RequestCpus", "CpusProvisioned", "RequestMemory", "MemoryProvisioned", "RequestGpus", "GpusProvisioned", "MachineAttrGLIDEIN_ResourceName0", "JobUniverse", ] cols = default_cols + addl_cols rows = [] for usage_info in query_usage( es_client, start_date, end_date, match_terms=match_terms, index=index ): row_in = usage_info["_source"] row_out = {} for col in cols: try: row_out[col] = row_in[col] except KeyError: row_out[col] = None rows.append(row_out) return rows
665601cab0ac7c12bbde5f702b99ac75bc679872
3,634,143
def check_email_address_validity(email_address): """Given a string, determine if it is a valid email address using Django's validate_email() function.""" try: validate_email(email_address) valid_email = True except ValidationError: valid_email = False return valid_email
809de97c1e87a08e2ebf68b50096fc6d11104c17
3,634,144
import collections def _GetHostConfigs(lab_config_pool, hosts_or_clusters): """Get host configs for clusters. Args: lab_config_pool: a lab config pool hosts_or_clusters: a list of hosts or clusters. Returns: a list of HostConfigs. """ if not hosts_or_clusters: return lab_config_pool.GetHostConfigs() host_configs = collections.OrderedDict() for host_or_cluster in hosts_or_clusters: host_config = lab_config_pool.GetHostConfig(host_or_cluster) if host_config: logger.debug('Found config for host %s.', host_or_cluster) host_configs[host_config.hostname] = host_config continue logger.debug('No host configured for %s.', host_or_cluster) host_configs_for_cluster = lab_config_pool.GetHostConfigs( host_or_cluster) if host_configs_for_cluster: logger.debug('Found config for cluster %s.', host_or_cluster) for host_config in host_configs_for_cluster: host_configs[host_config.hostname] = host_config continue logger.error('There is no config for %s, will skip.', host_or_cluster) return list(host_configs.values())
096ac69e70889ad4a9889caf09ee02457f309e22
3,634,145
def load_targets_file(input_file): """ Takes a string indicating a file name and reads the contents of the file. Returns a list containing each line of the file. Precondition: input_file should exist in the file system. """ with open(input_file, 'r') as f: f = f.readlines() out = [i.replace('\n','').replace('\r','') for i in f] return out
40d305e244264d6c3249bb9fb914cda3ebcda711
3,634,146
def plot_avg_profile(rad_dict, ylim=[0, None]): """ Function for plotting up average profiles including differences Parameters ---------- rad_dict : dict Dictionary of objects and variables to process. See example ylim : list ylimits to use Returns ------- ax : matplotlib ax handle Returns the axis handle for additional updates if needed """ # Set up figure fig, ax = plt.subplots(1, 2, figsize=(10, 5)) # Process each sub-dictionary in the dictionary passed in all_plat = [] all_mean = [] for d in rad_dict: all_plat.append(d) variable = rad_dict[d]['variable'] obj = rad_dict[d]['object'] # Get dimensions and use data from non-time dimension for y-axis dims = list(obj[variable].dims) height = [d for d in dims if 'time' not in d][0] # Plot average profiles ax[0].plot(obj[variable + '_avg_prof'], obj[height], label=d) # Add mean profiles to one array all_mean.append(obj[variable + '_avg_prof'].values) height_units = obj[height].attrs['units'] # Set up plot and add legend ax[0].set_ylim(ylim) ax[0].set_ylabel(height + ' (' + height_units + ')') ax[0].legend(fontsize=8) # Process and plot differences between each radar/object passed in diff_name = [] for i, p in enumerate(all_plat): for j, p2 in enumerate(all_plat): # If same object then continue if p == p2: continue # Track the comparisons and continue if already done diff_name.append([p, p2]) u, ind, ct = np.unique(diff_name, return_inverse=True, return_counts=True) if 1 not in ct: continue # Calculate differences, make label, and plot diff = all_mean[j] - all_mean[i] lab = ' '.join([p2, '-', p + ':', str(round(np.nanmean(diff), 2))]) ax[1].plot(diff, rad_dict[p]['object'][height], label=lab) # Set up plot and add legend ax[1].set_ylim(ylim) ax[1].legend(fontsize=8) plt.tight_layout() return ax
c79d28b289c3f403c49bc5695edc3b17215cf7f0
3,634,147
def localized_dt_string(dt, use_tz=None): """Convert datetime value to a string, localized for the specified timezone.""" if not dt.tzinfo and not use_tz: return dt.strftime(DT_NAIVE) if not dt.tzinfo: return dt.replace(tzinfo=use_tz).strftime(DT_AWARE) return dt.astimezone(use_tz).strftime(DT_AWARE) if use_tz else dt.strftime(DT_AWARE)
6ae8fd12d93c360e9f23ec3e4e5080474dc7ea59
3,634,148
import getopt import sys import pkg_resources import os def parse_command_line(): """Parses the command line """ try: opts, remaining_args = getopt.getopt(sys.argv[1:], "ihvosmdextn", ["input=", "help", "verbose", "version", "output=", "strict", "dump-missed-files=", "search=", "explicit-dependencies=", "match-executable-by-name-only", "strip-dirname", "no-empty-recipes"]) except getopt.GetoptError as err: # print help information and exit: print(str(err)) # will print something like "option -a not recognized" usage() # will exit program global verbose version = False input_rpm = "" output_dep = "" search_dirs = "" missed_list_outfile = "" explicit_deps = "" strict = False strip_dirname = False generate_empty_recipes = True match_exec_by_filename_only = False for o, a in opts: if o in ("-i", "--input"): input_rpm = a elif o in ("-h", "--help"): usage() elif o in ("-v", "--verbose"): verbose = True elif o in ("--version"): version = True elif o in ("-s", "--strict"): strict = True elif o in ("-o", "--output"): output_dep = a elif o in ("-t", "--strip-dirname"): strip_dirname = True elif o in ("-d", "--search"): search_dirs = a elif o in ("-m", "--dump-missed-files"): missed_list_outfile = a elif o in ("-e", "--explicit-dependencies"): explicit_deps = a elif o in ("-n", "--no-empty-recipes"): generate_empty_recipes = False elif o in ("-x", "--match-executable-by-name-only"): match_exec_by_filename_only = True else: assert False, "unhandled option " + o + a if version: version = pkg_resources.require("rpm_make_rules_dependency_lister")[0].version print("{}".format(version)) sys.exit(0) if input_rpm == "": print("Please provide --input option (it is a required option)") sys.exit(os.EX_USAGE) abs_input_rpm = input_rpm if not os.path.isabs(input_rpm): abs_input_rpm = os.path.join(os.getcwd(), input_rpm) return {'spec_files': remaining_args, 'input_rpm' : input_rpm, 'abs_input_rpm' : abs_input_rpm, 'output_dep' : output_dep, 'search_dirs' : search_dirs, 'strict': strict, 'strip_dirname': strip_dirname, 'missed_list_outfile': missed_list_outfile, "explicit_dependencies":explicit_deps, "generate_empty_recipes":generate_empty_recipes, "nameonly_check_for_exec_files":match_exec_by_filename_only }
3d7f6771df17af7a3cb92a6dd83da1fbf5cc6d36
3,634,149
def dissolve_project_data(project_data): """ This functions uses the unionCascaded function to return a dissolved MultiPolygon geometry from several Single Part Polygon geometries. """ multipolygon_geometry = ogr.Geometry(ogr.wkbMultiPolygon) for item in project_data: polygon = ogr.CreateGeometryFromWkt(item["wkt"]) multipolygon_geometry.AddGeometry(polygon) dissolved_geometry = multipolygon_geometry.UnionCascaded() return dissolved_geometry
1e657087a564e9e134b11bfa382f337c65efa6d2
3,634,150
import os def get_visit_exposure_footprints(visit_file='j1000p0210_visits.npy', check_paths=['./', '../RAW'], simplify=1.e-6): """ Add exposure-level footprints to the visit dictionary Parameters ---------- visit_file : str File produced by `parse_visits` (`visits`, `all_groups`, `info`). check_paths : list Look for the individual exposures in `visits[i]['files']` in these paths. simplify : float Shapely `simplify` parameter the visit footprint polygon. Returns ------- visits : dict """ visits, all_groups, info = np.load(visit_file) fps = {} for visit in visits: visit['footprints'] = [] visit_fp = None for file in visit['files']: fp_i = None for path in check_paths: pfile = os.path.join(path, file) if os.path.exists(pfile): fp_i = utils.get_flt_footprint(flt_file=pfile) if visit_fp is None: visit_fp = fp_i else: visit_fp = visit_fp.union(fp_i).buffer(0.05/3600) break visit['footprints'].append(fp_i) if visit_fp is not None: if simplify > 0: visit['footprint'] = visit_fp.simplify(simplify) else: visit['footprint'] = visit_fp fps[file] = fp_i ### ToDo: also update visits in all_groups with `fps` # Resave the file np.save(visit_file, [visits, all_groups, info]) return visits
ea687e5dea3909874c5a70d259e198a91f7bf1a2
3,634,151
def pega_salada_sobremesa_suco(items): """ Funcao auxiliar que popula os atributos salada, sobremesa e suco do cardapio da refeicao fornecida.""" alimentos = ["salada", "suco", "sobremesa"] cardapio = {} for alim in alimentos: tag = alim.upper() + ":" # tag para procurar o cardapio dos alimentos acima dentro do vetor items valor = [s.replace(tag, "") for s in items if tag in s][0] # pega o valor do alimento e ja tira a tag (e.g. "SOBREMESA:") cardapio[alim] = valor.capitalize() # lowercase eh melhor para exibir. items = [s for s in items if tag not in s] return cardapio, items
4ccf2907a4e828d1357e16e827ad587e4a50a287
3,634,152
def readRGBImg(datapath, driver="GTiff"): """ Reads image path and returns 3-D numpy matrix of RGB image. :param datapath: Path/String Image path that is opened by rasterio. :param driver: String GDAL driver for opening the image. Default: 'GTiff'. :return: rasterio data, Ndarray Tuple with opened rasterio data and 3-D numpy matrix of image. """ src_img = rasterio.open(datapath, driver=driver) r = src_img.read(1) g = src_img.read(2) b = src_img.read(3) return src_img, np.dstack((r, g, b))
9f20ea8ca8c9594ddad570a650e5a496cf87e1ad
3,634,153
def show_hidden_word(secret_word, old_letters_guessed): """ :param secret_word: :param old_letters_guessed: :return: String of the hidden word except the letters already guessed """ new_string = "" for letter in secret_word: if letter in old_letters_guessed: new_string = new_string + letter else: new_string = new_string + ' _ ' return new_string
2b3618619dcde2875da9dc8600be334e7aaadaad
3,634,154
import logging import os def parse_args(args: list) -> dict: """ Create a parser for command line attributes and parses them :param args: the arguments to parse :return: parsed arguments """ parser = SmartArgumentParser( description="Triggers some bug in listed programs", parents=[arguments.get_verbosity_parser()] ) plugin_parser = parser.add_subparsers(metavar="plugin") parser.add_argument("bugs", nargs="+", type=str, help="one of {} or all".format(", ".join(PROGRAMS)), metavar="bug", choices=PROGRAMS+["all"]) register_for_trigger(parser=parser, subparser=plugin_parser) parsed_args = parser.parse_args(args) # noinspection PyUnresolvedReferences logging.getLogger().setLevel(parsed_args.logging_level) _bugs = parsed_args.bugs if "all" in _bugs: parsed_args.bugs = [ program for program in PROGRAMS if os.path.exists(get_trigger_conf(program).get("install_directory")) ] else: for _ in _bugs: if _ not in PROGRAMS: parser.print_help() exit(PROGRAM_ARGUMENT_ERROR) return vars(parsed_args)
7e669dc3c9af1fb56b6f78b74393af1b0959e2e2
3,634,155
import uuid def get_cas_user(tree): """ Callback invoked by the CAS module that ensures that the user signing in via CAS has a valid Django User associated with them. Primary responsibility is to create a Django User / Participant if none existed, or to associate the CAS login id with the given User. This needs to be done *before* the CAS module creates a User object so that we don't end up creating duplicate users with a different username and the same email address. 1. If no Django user exists with the given username (institutional username), get details from the ASU web directory (FIXME: this is brittle and specific to ASU, will need to update if we ever roll CAS login out for other institutions) and populate a Django user / vcweb Participant with those details 2. If a Django user does exist with the given institutional username (e.g., asurite) there are a few corner cases to consider: a. the account could have been created before CAS was implemented, so there is no institutional username set (or it's set to the email address instead of the ASURITE id). In this case we need to set the username to the ASURITE id b. easy case, the account was created via CAS and all the fields are correct To make it working for any specific institution you'll need to change the CAS settings in the settings.py file Following settings are important and required by the VCWEB and are university specific 1. CAS_UNIVERSITY_NAME - Institutional CAS provider name 2. CAS_UNIVERSITY_URL - Institutional CAS provider URL 3. WEB_DIRECTORY_URL - Web directory service URL providing basic user details based on institutional username 4. CAS_SERVER_URL - CAS URL provided by the institution to centrally authenticate users 5. CAS_REDIRECT_URL - The relative url where the user should be re-directed after successful authentication 6. CAS_RESPONSE_CALLBACKS - Callback invoked after successful authentication by CAS """ username = tree[0][0].text.lower() logger.debug("cas tree: %s", tree) try: user = User.objects.get(username=username) except User.DoesNotExist: user = create_cas_participant(username, tree) if user: set_authentication_token(user, uuid.uuid4().hex) return user
e30be40d4bd2ebb92bc264f769c853d11aac6bf3
3,634,156
def get_total_gap(data : np.ndarray) -> float: """ Computes the total gap in time units for a given dataset :param data: datset of the lightcurve :return: total gap in units of time """ values,counts,most_common = get_diff_values_counts_most_common(data) values[values - most_common < 10**-5] = 0 return float(np.sum(values*counts))
9cba80b47c430b38236f9160e6b62a8eb2ba9cab
3,634,157
def print_srt_line(i, elms): """Print a subtitle in srt format.""" return "{}\n{} --> {}\n{}\n\n".format(i, format_srt_time(elms[0]), format_srt_time(float(elms[0]) + float(elms[1])), convert_html(elms[2]))
cd089bdc06417f3f0915f97272ba7ec0bdc7d153
3,634,158
def depthwise_separable_conv(inputs, num_pwc_filters, width_multiplier, scope, downsample=False): """Depth-wise separable convolution.""" num_pwc_filters = round(num_pwc_filters * width_multiplier) _stride = 2 if downsample else 1 # skip pointwise by setting num_outputs=None depthwise_conv = slim.separable_convolution2d( inputs, num_outputs=None, stride=_stride, depth_multiplier=1, kernel_size=[3, 3], scope=scope + '/depthwise_conv') bn = slim.batch_norm(depthwise_conv, scope=scope + '/dw_batch_norm') pointwise_conv = slim.convolution2d( bn, num_pwc_filters, kernel_size=[1, 1], scope=scope + '/pointwise_conv') bn = slim.batch_norm(pointwise_conv, scope=scope + '/pw_batch_norm') return bn
d6de489b766800957dceba05b1ba76f337974b04
3,634,159
def filter_packages(packages: list, key: str) -> list: """Filter out packages based on the given category.""" return [p for p in packages if p["category"] == key]
46f11f5a8269eceb9665ae99bdddfef8c62295a2
3,634,160
def ptlinear(x, W, b=None, b2=None, is_pre_training=False, activation=None): """Pre trainable Linear function, or affine transformation. It accepts two or three arguments: an input minibatch ``x``, a weight matrix ``W``, and optionally a bias vector ``b``. It computes :math:`Y = xW^\top + b`. Args: x (~chainer.Variable): Input variable. Its first dimension is assumed to be the *minibatch dimension*. The other dimensions are treated as concatenated one dimension whose size must be ``N``. W (~chainer.Variable): Weight variable of shape ``(M, N)``. b (~chainer.Variable): Bias variable (optional) of shape ``(M,)``.. is_pre_training: boolean activation: function for pre-training auto encoder. Returns: ~chainer.Variable: Output variable. .. seealso:: :class:`~chainer.links.Linear` """ if b is None: return PTLinearFunction()(x, W) else: return PTLinearFunction()(x, W, b, b2)
2a3ae59b1c7e5e15506ec251fa336b89ff1e5f48
3,634,161
def cubic_bezier(pts, t): """ :param pts: :param t: :return: """ p0, p1, p2, p3 = pts p0 = pylab.array(p0) p1 = pylab.array(p1) p2 = pylab.array(p2) p3 = pylab.array(p3) return p0 * (1 - t) ** 3 + 3 * t * p1 * (1 - t) ** 2 + \ 3 * t ** 2 * (1 - t) * p2 + t ** 3 * p3
3239f0afcda78605d3ea2cf3e77bd3ee3827b358
3,634,162
from typing import List def _GetServerComponentArgs(config_path: str) -> List[str]: """Returns a set of command line arguments for server components. Args: config_path: Path to a config path generated by self_contained_config_writer. Returns: An iterable with command line arguments to use. """ primary_config_path = package.ResourcePath( "grr-response-core", "install_data/etc/grr-server.yaml") secondary_config_path = package.ResourcePath( "grr-response-test", "grr_response_test/test_data/grr_test.yaml") monitoring_port = portpicker.pick_unused_port() return [ "--config", primary_config_path, "--secondary_configs", ",".join([secondary_config_path, config_path]), "-p", f"Monitoring.http_port={monitoring_port}", "-p", f"Monitoring.http_port_max={monitoring_port+10}", "-p", "AdminUI.webauth_manager=NullWebAuthManager", ]
7f768c8eaa6dc47dc2be3da5297cf17550e26896
3,634,163
import random import string def random_string(length=4): """Generates a random string based on the length given Keyword Arguments: length {int} -- The amount of the characters to generate (default: {4}) Returns: string """ return "".join( random.choice(string.ascii_uppercase + string.digits) for _ in range(length) )
ad9816e22a898e1e7d17d1bc9f0e56265bb09ac6
3,634,164
def FindBySummaryName(name): """ Find the first instance of a virtual machine with the specified name. """ vms = GetAll() for vm in vms: try: summary = vm.GetSummary() config = summary.GetConfig() if config != None and config.GetName() == name: return vm except Vmodl.Fault.ManagedObjectNotFound: pass return None
981d8e13b6bdf39302f5a192453bbb1ea1f4e943
3,634,165
def get_potential_trace_fields(poly,sln=2): """Given a minimal polynomial of a trace field, returns a list of minimal polynomials of the potential invariant trace fields.""" pol = pari(poly) try: return [str(rec[0].polredabs()) for rec in pol.nfsubfields()[1:] if _knmiss(rec[0].poldegree(),pol.poldegree(),sln=sln)] # poldegree returns int except: # we want cypari.gen.PariError, but no idea how to reference; fortunately, anything else will just raise again try: pol = pol.polredabs() except: # actually except PariError again print 'When running trace field '+poly+' polredabs couldn\'t handle it.' return [poly] # between this return and the above print statement, we should know when the above error happened. return get_potential_trace_fields(str(pol),sln=sln)
a952fefdd98f38b0c23d3ca3962b85584daa70be
3,634,166
import os def get_movielens(path=None, variant="ml-25m"): """Gets the movielens dataset for use with merlin-models This function will return a tuple of train/test merlin.io.Dataset objects for the movielens dataset. This will download the movielens dataset locally if needed, and run a ETL pipeline with NVTabular to make this dataset ready for use with merlin-models. Parameters ---------- path : str The path to download the files locally to. If not set will default to the 'merlin-models-data` directory in your home folder variant : "ml-25m" or "ml-100k" Which variant of the movielens dataset to use. Must be either "ml-25m" or "ml-100k" Returns ------- tuple A tuple consisting of a merlin.io.Dataset for the training dataset and validation dataset """ if path is None: path = os.environ.get( "INPUT_DATA_DIR", os.path.expanduser("~/merlin-models-data/movielens/") ) variant_path = os.path.join(path, variant) if not os.path.exists(variant_path): os.makedirs(variant_path) movielens_download_etl(path, variant) train = merlin.io.Dataset(os.path.join(variant_path, "train"), engine="parquet") valid = merlin.io.Dataset(os.path.join(variant_path, "valid"), engine="parquet") return train, valid
e59a3fe560264ad43451cdd0e0ef457a25cbfbd6
3,634,167
def launch(): """Initialize the module.""" return UERRCMeasurementsWorker(UERRCMeasurements, PRT_UE_RRC_MEASUREMENTS_RESPONSE)
d26a4e28b5e541eaba9543211b477ba3734a5082
3,634,168
def enlarge_histogram(file = None,filename = None): """ Arguments: file: an image file that is going to be processed filename: a filename of the file to be processed Returns: The same input image but with its histogram enlarged. """ if file is None and filename is None: return "Error, both arguments cant be None" if file is None: file = ImageOps.grayscale(Image.open(filename)) #Creates a figure to plot the values fig = plt.figure(figsize=(10, 7)) rows = 2 columns = 2 image = np.asarray(file) original_shape = image.shape backup = image image = image.ravel() s_values, bin_idx, s_counts = np.unique(image, return_inverse=True,return_counts=True) r_max = np.amax(image) r_min = np.amin(image) if(r_min == 0): r_min = 1 zeros_matrix = np.zeros(original_shape) #Enlarges the histogram B = (255/(r_max - r_min)) * backup B = B.astype(np.uint8) #Calculates the new histogram b_values, bin_idx, b_counts = np.unique(B.ravel(), return_inverse=True,return_counts=True) #plots the images fig.add_subplot(rows, columns, 1) plt.imshow(backup, cmap = 'gray',vmin = 0,vmax = 255) plt.title("Original") plt.axis('off') fig.add_subplot(rows, columns, 2) plt.imshow(B, cmap = 'gray',vmin = 0,vmax = 300) plt.title("Enlarged image") plt.axis('off') fig.add_subplot(rows, columns, 3) plt.bar(s_values,s_counts,align='center') plt.title("Original Histogram") plt.xlabel("Grey scale") plt.ylabel("Repetitions") fig.add_subplot(rows, columns, 4) plt.bar(b_values,b_counts,align='center') plt.title("Enlarged Histogram") plt.xlabel("Grey scale") plt.ylabel("Repetitions") plt.show() return B
5997647820fb4cee16f0c4e3d6e588859ee2ea77
3,634,169
def task(n): """Return 2 to the n'th power""" return 2 ** n
5780e22d4916664d66279d8ad8afed3b176d9adb
3,634,170
import os def create_splits(dataframe, split_path, n_splits = 10) : """ Should i reset index ? """ length = int(dataframe.shape[0] / int(n_splits)) for i in range(n_splits) : frame = dataframe.iloc[i*length:(i+1)*length] if i == n_splits-1 : frame = dataframe.iloc[i*length:] name = split_path + f'split-{i}.csv.gz' if os.path.exists(name) : print(f'File {name} already exits! Exiting...') return 0 print(frame.columns) frame = frame.drop(['Unnamed: 0'], axis = 1) frame.to_csv(name, index = False, compression = 'gzip')
fd6c8e31fe271a957028ff7471a1294a84ee62be
3,634,171
def get_relationship(context, user_object): """caches the relationship between the logged in user and another user""" user = context["request"].user return get_or_set( f"relationship-{user.id}-{user_object.id}", get_relationship_name, user, user_object, timeout=259200, )
5c640f51b8319ad918e6c176be4ca5fab143de1c
3,634,172
def _ts_midpoint(x1, d: int): """moving midpoint: (ts_max + ts_min) / 2""" return _ts_max(x1, d) + _ts_min(x1, d)
b8916ea7bb347a828fc504bbd19bf5eeeed57e5c
3,634,173
import numbers def maybe_delivery_mode( v, modes=DELIVERY_MODES, default=PERSISTENT_DELIVERY_MODE): """Get delivery mode by name (or none if undefined).""" if v: return v if isinstance(v, numbers.Integral) else modes[v] return default
20221a11f9af378e2b877cf76941f2f05ff2c8da
3,634,174
def load_pairs(path: str) -> list: """ Loads the pairs specified in a file in the format of "word1 word2" separated by new line. :param path: Path to the file containing the pairs. :return: The list of unique tuples contained in the file, but not their inverse counterpart as opposed to load_constraints. """ pairs = list() with open(file=path, mode="r", encoding="utf-8") as pairs_file: for line in pairs_file: w0, w1 = line.replace("\n", "").strip().split(" ") pairs.append((w0, w1)) pairs_file.close() return unique(pairs)
7f98937c14315d00feb32db79c54e6c79fa32e3a
3,634,175
def load_image_into_numpy_array(path): """Load an image from file into a numpy array. Puts image into numpy array to feed into tensorflow graph. Note that by convention we put it into a numpy array with shape (height, width, channels), where channels=3 for RGB. Args: path: the file path to the image Returns: uint8 numpy array with shape (img_height, img_width, 3) """ img_data = tf.io.gfile.GFile(path, 'rb').read() image = Image.open(BytesIO(img_data)) (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8)
b85dd2ee866231a0db53bfab8151fdad9e875ff8
3,634,176
def is_illumina_run(run_dir): """ Detects signature files in the run directory (eg RunInfo.xml) to detemine if it's likely to be an Illumina sequencing run or not. :param run_dir: The path to the run. :type run_dir: str :return: True if it looks like an Illumina run. :rtype: bool """ # Ideas: Detecting RunInfo.xml is probably good enough, but we could # look for other files, and analyse the directory name for the # {date}_{instrument_id}_{run_number}_{flowcell_id} pattern too if exists(join(run_dir, 'RunInfo.xml')): return True return False
7069e29c977da3d4f23bf6e3321ec3ebc7b44d9f
3,634,177
import os def get_data_filepath(filename): """Construct filepath for a file in the test/data directory Args: filename: name of file Returns: full path to file """ return os.path.join(os.path.dirname(__file__), 'data', filename)
d3d83cbf83d32b0252658f77b7bbb6fbdb99845f
3,634,178
from datetime import datetime import requests def polo_return_chart_data(currency_pair, start_unix=None, end_unix=None, period_unix=14400, format_dates=True, to_frame=True): """ Returns public exchange data for the given currency_pair between the start_unix and end_unix timeframes. :param currency_pair: The curreny pair :param start_unix: (int, str) unix timestamp corresponding to the start date/time. :param end_unix: (int, str) unix timestamp corresponding to the end date/time. :param period_unix: (int, str) unix timestamp corresponding to the start period. :param format_dates: (bool, default True) True formats the 'date' values into pandas.Timestamp rather than unix date/times. :param to_frame: (bool, default True) True returns a pandas.DataFrame with the data contained False returns a list of dictionary objects containing the data for each period. :return: - close (float) - date (pd.Timestamp) - high (float) - low (float) - open (float) - quoteVolume (float) - volume (float) - weightedAverage (float) """ if not start_unix: start_unix = timestamp_to_utc(datetime.now() - pd.DateOffset(years=10)) if not end_unix: end_unix = timestamp_to_utc(datetime.now()) params = {'currencyPair': currency_pair, 'start': start_unix, 'end': end_unix, 'period': str(period_unix)} res = requests.get('https://poloniex.com/public?' 'command=returnChartData', params=params).json() if hasattr(res, 'get'): error = res.get('error', None) if error: raise Exception('{}: {}'.format(error, currency_pair)) if format_dates is True and not isinstance(res, str): for r in res: r[DATE] = timestamp_from_utc(r[DATE]) if to_frame is True and not isinstance(res, str): res = pd.DataFrame(data=res, index=range(len(res))) return res
51f8ecfce81359132ede56ff5b37f106e54183b2
3,634,179
def ergtoboatspeed(min,sec,ratio,crew,rigging,erg): """ Calculates boat speed, given an erg split for given crew, boat, erg """ res = ergtopower(min,sec,ratio,crew,erg) pw = res[0] res = constantwatt(pw,crew,rigging) return res
d192c0e9edc1ef46468dc4762c22d45320ad36a9
3,634,180
import argparse def parse_args(args): """Parse command line arguments. """ parser = argparse.ArgumentParser(description='Generate Shadow Hashes') parser.add_argument('-m', '--method', default='SHA512', choices=shadow.HASH_METHODS, help='Hashing method to use, default is SHA512') parser.add_argument('password', nargs='*', help='Password to generate hashes for.') return parser.parse_args(args)
a64f037f10d7fad2a0231dd5f7d280bd5e14965a
3,634,181
def interval_range( start=None, end=None, periods=None, freq=None, name=None, closed="right", ) -> "IntervalIndex": """ Returns a fixed frequency IntervalIndex. Parameters ---------- start : numeric, default None Left bound for generating intervals. end : numeric , default None Right bound for generating intervals. periods : int, default None Number of periods to generate freq : numeric, default None The length of each interval. Must be consistent with the type of start and end name : str, default None Name of the resulting IntervalIndex. closed : {"left", "right", "both", "neither"}, default "right" Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- IntervalIndex Examples -------- >>> import cudf >>> import pandas as pd >>> cudf.interval_range(start=0,end=5) IntervalIndex([(0, 0], (1, 1], (2, 2], (3, 3], (4, 4], (5, 5]], ...closed='right',dtype='interval') >>> cudf.interval_range(start=0,end=10, freq=2,closed='left') IntervalIndex([[0, 2), [2, 4), [4, 6), [6, 8), [8, 10)], ...closed='left',dtype='interval') >>> cudf.interval_range(start=0,end=10, periods=3,closed='left') ...IntervalIndex([[0.0, 3.3333333333333335), [3.3333333333333335, 6.666666666666667), [6.666666666666667, 10.0)], closed='left', dtype='interval') """ if freq and periods and start and end: raise ValueError( "Of the four parameters: start, end, periods, and " "freq, exactly three must be specified" ) args = [ cudf.Scalar(x) if x is not None else None for x in (start, end, freq, periods) ] if any( not _is_non_decimal_numeric_dtype(x.dtype) if x is not None else False for x in args ): raise ValueError("start, end, periods, freq must be numeric values.") *rargs, periods = args common_dtype = find_common_type([x.dtype for x in rargs if x]) start, end, freq = rargs periods = periods.astype("int64") if periods is not None else None if periods and not freq: # if statement for mypy to pass if end is not None and start is not None: # divmod only supported on host side scalars quotient, remainder = divmod((end - start).value, periods.value) if remainder: freq_step = cudf.Scalar((end - start) / periods) else: freq_step = cudf.Scalar(quotient) if start.dtype != freq_step.dtype: start = start.astype(freq_step.dtype) bin_edges = sequence( size=periods + 1, init=start.device_value, step=freq_step.device_value, ) left_col = bin_edges[:-1] right_col = bin_edges[1:] elif freq and periods: if end: start = end - (freq * periods) if start: end = freq * periods + start if end is not None and start is not None: left_col = arange( start.value, end.value, freq.value, dtype=common_dtype ) end = end + 1 start = start + freq right_col = arange( start.value, end.value, freq.value, dtype=common_dtype ) elif freq and not periods: if end is not None and start is not None: end = end - freq + 1 left_col = arange( start.value, end.value, freq.value, dtype=common_dtype ) end = end + freq + 1 start = start + freq right_col = arange( start.value, end.value, freq.value, dtype=common_dtype ) elif start is not None and end is not None: # if statements for mypy to pass if freq: left_col = arange( start.value, end.value, freq.value, dtype=common_dtype ) else: left_col = arange(start.value, end.value, dtype=common_dtype) start = start + 1 end = end + 1 if freq: right_col = arange( start.value, end.value, freq.value, dtype=common_dtype ) else: right_col = arange(start.value, end.value, dtype=common_dtype) else: raise ValueError( "Of the four parameters: start, end, periods, and " "freq, at least two must be specified" ) if len(right_col) == 0 or len(left_col) == 0: dtype = IntervalDtype("int64", closed) data = column.column_empty_like_same_mask(left_col, dtype) return cudf.IntervalIndex(data, closed=closed) interval_col = column.build_interval_column( left_col, right_col, closed=closed ) return IntervalIndex(interval_col)
a2873a34780da22c8955278c358f45d0432b1f53
3,634,182
def get_summary_description(node_def): """Given a TensorSummary node_def, retrieve its SummaryDescription. When a Summary op is instantiated, a SummaryDescription of associated metadata is stored in its NodeDef. This method retrieves the description. Args: node_def: the node_def_pb2.NodeDef of a TensorSummary op Returns: a summary_pb2.SummaryDescription Raises: ValueError: if the node is not a summary op. """ if node_def.op != 'TensorSummary': raise ValueError("Can't get_summary_description on %s" % node_def.op) description_str = _compat.as_str_any(node_def.attr['description'].s) summary_description = SummaryDescription() _json_format.Parse(description_str, summary_description) return summary_description
80df9bd63c23aa9f1f92d5d3a1f49dd54c4f0737
3,634,183
def _einsum_kronecker_product(*trans_mats): """Compute a Kronecker product of multiple matrices with :func:`numpy.einsum`. The reshape is necessary because :func:`numpy.einsum` produces a matrix with as many dimensions as transition probability matrices. Each dimension has as many values as rows or columns in the transition matrix. The ordering of letters in the :func:`numpy.einsum` signature for the result ensure that the reshape to a two-dimensional matrix does produce the correct Kronecker product. """ n_groups = np.prod([i.shape[0] for i in trans_mats]) signature = _generate_einsum_signature(len(trans_mats)) kronecker_product = np.einsum( signature, *trans_mats, dtype=DTYPE_GROUP_TRANSITION_PROBABILITIES, casting="same_kind", ) kronecker_product = kronecker_product.reshape(n_groups, n_groups) return kronecker_product
39622c94ea9138b4cb2511922166178998231b2e
3,634,184
def calc_loss_class(true_box_conf, CLASS_SCALE, true_box_class, pred_box_class): """ == input == true_box_conf : tensor of shape (N batch, N grid h, N grid w, N anchor) true_box_class : tensor of shape (N batch, N grid h, N grid w, N anchor), containing class index pred_box_class : tensor of shape (N batch, N grid h, N grid w, N anchor, N class) CLASS_SCALE : 1.0 == output == class_mask if object exists in this (grid_cell, anchor) pair and the class object receive nonzero weight class_mask[iframe, igridy, igridx, ianchor] = 1 else: 0 """ class_mask = true_box_conf * CLASS_SCALE # L_{i,j}^obj * lambda_class nb_class_box = tf.reduce_sum(tf.cast(class_mask > 0.0, tf.float32)) loss_class = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class) loss_class = tf.reduce_sum(loss_class * class_mask) / (nb_class_box + 1e-6) return loss_class
3971cdce266fc0af85f9a3d56e0266ecb31ff0da
3,634,185
def unmixGradProjMatrixNNLS(image, A, tolerance=1e-4, maxiter=100): """ Performs NNLS via Gradient Projection of the primal problem. Terminates when duality gap falls below tolerance """ if image.ndim == 2: (n1, n3) = image.shape n2 = 1; elif image.ndim == 3: (n1, n2, n3) = image.shape k = A.shape[1] # Reshape to n3 x n1*n2 matrix image = image.reshape(n1*n2,n3).T # Precompute Quantities ATA = np.dot(A.T,A) pinvA = np.linalg.pinv(A) ATimage = np.dot(A.T,image) alpha = np.linalg.norm(ATA,ord=2) # Start with thresholded pseudo-inverse X = np.dot(pinvA, image) X[X < 0] = 0 # See if meets convergence criterion grad = np.dot(ATA,X) - ATimage gradthresh = np.array(grad) gradthresh[gradthresh < 0] = 0 gap = np.tensordot(X, gradthresh)/(n1*n2*k) iter = 0 while (gap > tolerance) and (iter < maxiter): iter += 1 # Gradient Step X = X - grad/alpha # Projection X[X < 0] = 0 # See if meets convergence criterion grad = np.dot(ATA,X) - ATimage gradthresh = np.array(grad) gradthresh[gradthresh < 0] = 0 gap = np.tensordot(X, gradthresh)/(n1*n2*k) # Reshape back to n1 x n2 x k image X = X.T.reshape(n1,n2,k) return X
ac6bb769e9343f49095166a751fa1c40e2e6ed06
3,634,186
def seismic(): """Benchmark Seismic object.""" # coords = [{"x": np.arange(10)}, {"y": np.arange(10)}, {"z": np.arange(100)}] coords = [("x", np.arange(10)), ("y", np.arange(10)), ("z", np.arange(100))] cube = segyio.tools.cube("../data/test.segy") # seis = from_segy("tests/data/test.segy") return Seismic(cube, coords=coords)
556aa9407162951756754be9c24c80986e475cb5
3,634,187
import math def sterrmean(s, n, N=None): """sterrmean(s, n [, N]) -> standard error of the mean. Return the standard error of the mean, optionally with a correction for finite population. Arguments given are: s: the standard deviation of the sample n: the size of the sample N (optional): the size of the population, or None If the sample size n is larger than (approximately) 5% of the population, it is necessary to make a finite population correction. To do so, give the argument N, which must be larger than or equal to n. >>> sterrmean(2, 16) 0.5 >>> sterrmean(2, 16, 21) 0.25 """ if N is not None and N < n: raise StatsError('population size must be at least sample size') if n < 0: raise StatsError('cannot have negative sample size') if s < 0.0: raise StatsError('cannot have negative standard deviation') if n == 0: if N == 0: return float('nan') else: return float('inf') sem = s/math.sqrt(n) if N is not None: # Finite population correction. f = (N - n)/(N - 1) # FPC squared. assert 0 <= f <= 1 sem *= math.sqrt(f) return sem
30ad7b9b184b1a86b8d9bf03ee515b34aab3b368
3,634,188
def change_device_status(dispatcher, device_name, status): """Set the status of a device in Nautobot.""" if menu_item_check(device_name): prompt_for_device( "nautobot change-device-status", "Change Nautobot Device Status", dispatcher, offset=menu_offset_value(device_name), ) return False # command did not run to completion and therefore should not be logged try: device = Device.objects.get(name=device_name) except Device.DoesNotExist: dispatcher.send_error(f"I don't know device '{device_name}'") prompt_for_device("nautobot change-device-status", "Change Nautobot Device Status", dispatcher) return False # command did not run to completion and therefore should not be logged if menu_item_check(status): dispatcher.prompt_from_menu( f"nautobot change-device-status {device_name}", f"Change Nautobot Device Status for {device_name}", [(choice[1], choice[0]) for choice in DeviceStatusChoices.CHOICES], default=(device.status.name, device.status.slug), confirm=True, offset=menu_offset_value(status), ) return False # command did not run to completion and therefore should not be logged device.status = Status.objects.get_for_model(Device).get(slug=status) try: device.clean_fields() except ValidationError: dispatcher.send_error(f"I'm sorry, but {status} is not a valid device status value.") return (CommandStatusChoices.STATUS_FAILED, f'Invalid status value "{status}"') device.save() dispatcher.send_blocks( [ *dispatcher.command_response_header( "nautobot", "change-device-status", [("Device name", device_name), ("Status", status)], "device status change", nautobot_logo(dispatcher), ), dispatcher.markdown_block( f"Nautobot status for {dispatcher.bold(device_name)} successfully changed to {dispatcher.monospace(status)}." ), ] ) return CommandStatusChoices.STATUS_SUCCEEDED
6f8e7d7637fed71b501aa89791c638d80caa1eab
3,634,189
def text_to_vector(sentences): """ #使用one-hot方法将文本转为向量, 例如: Sentence1 不 知道 你 在 说 什么 。 Sentence2 我 就 知道 你 不 知道 。 词表: 不 就 你 什么 我 说 知道 在 。 S1 [1 0 1 1 0 1 1 1 1] S2 [1 1 1 0 1 0 2 0 1] 即得到分词后的句子之后,先得到词表 每个词对应一个位置,如“不”对应第一个位置,等等 如果s1出现了不一次,就把s1的第一个位置设为1,如果没有出现就是0, 如果出现了两次“不”,那么s1的第一个位置就是2,以此类推 """ # 先将所有句子合成一个超长列表 print(sentences) all_list = [] for i in range(len(sentences)): print(sentences[i]) all_list += sentences[i] #然后统计每一个词出现的频率 word_count = dict() for i in range(len(all_list)): word = all_list[i] if word in word_count: word_count[word] = word_count[word] + 1 else: word_count[word] = 1 #这样所有词语都在这个列表all_list里面,然后进行编号,利用一个类似hash的方法进行映射 #遍历每个词语,如果他在前面出现了,就跳过,否则,就给他一个递增的编号 #这里需要用到python 的dict,每一个key对应一个元素 #可以以O(1)的方法查找一个元素key对应的value #具体dict的用法可以自行学习 word_to_id = dict() for i in range(len(all_list)): # 得到一个词语 word = all_list[i] # 查看word是否在word_to_id这个字典中出现 if word in word_to_id: continue #这一行的作用是过滤低频单词, #如果word的频率小于最低频率,就忽略掉这个单词 if word_count[word] < min_freq: continue #没有出现,那么给他一个id,假设前面已经有若干个词语["我","不能","是"] # 被编码为[0, 1, 2],即:{'我':0, '不能':1,'是':2} # 那么就给word编码为3,正好等于word_to_id的长度e word_index = len(word_to_id) word_to_id[word] = word_index #得到每一个单词对应的编号,就可以对句子进行编码 #首先我们知道每一个句子对应的向量长度等于词表的大小 #所以先初始化一个全0的向量 onehot_result = [] for i in range(len(sentences)): sentence = sentences[i] vector = [0 for i in range(len(word_to_id))] for j in range(len(sentence)): word = sentence[j] idx = word_to_id[word] vector[idx] += 1 onehot_result.append(vector) return onehot_result
a065b6f99c0083473b76d20c13a3722326d29587
3,634,190
from typing import List from typing import Tuple import os import random import math def _list_valid_filenames_in_directory( base_directory:str, search_class:str, white_list_formats:List[str], split:float, follow_links:bool, shuffle_index_directory:str ) -> Tuple[str, List[str]]: """File all files in the search directory for the specified class if shuffle_index_directory is None: then sort the filenames alphabetically and save to the list file: <base_directory>/.index/<search_class>.txt else: then randomly shuffle the files and save to the list file: <shuffle_index_directory>/.index/<search_class>.txt """ file_list = [] base_directory = base_directory.replace('\\', '/') if shuffle_index_directory is None: index_path = f'{base_directory}/.index/{search_class}.txt' else: index_path = f'{shuffle_index_directory}/.index/{search_class}.txt' # If the index file exists, then read it if os.path.exists(index_path): with open(index_path, 'r') as f: for line in f: filename = line.strip() filepath = f'{base_directory}/{filename}' if not os.path.exists(filepath): get_mltk_logger().warning(f'File {filepath} not found in existing index, re-generating index') file_list = [] break file_list.append(filename) if len(file_list) == 0: get_mltk_logger().info(f'Generating index: {index_path} ...') # Else find all files for the given class in the search directory # NOTE: The dataset directory structure should be: # <dataset base dir>/<class1>/ # <dataset base dir>/<class1>/sample1.jpg # <dataset base dir>/<class1>/sample2.jpg # <dataset base dir>/<class1>/subfolder1/sample3.jpg # <dataset base dir>/<class1>/subfolder2/sample4.jpg # <dataset base dir>/<class2>/... # <dataset base dir>/<class3>/... # # This will recursively return all sample files under <dataset base dir>/<class x> class_base_dir = f'{base_directory}/{search_class}' for root, _, files in os.walk(base_directory, followlinks=follow_links): root = root.replace('\\', '/') if not root.startswith(class_base_dir): continue for fname in files: if not fname.lower().endswith(white_list_formats): continue abs_path = os.path.join(root, fname) rel_path = os.path.relpath(abs_path, base_directory) file_list.append(rel_path.replace('\\', '/')) # Randomly shuffle the list if necessary if shuffle_index_directory is not None: random.shuffle(file_list) else: # Otherwise sort it alphabetically file_list = sorted(file_list) # Write the file list file os.makedirs(os.path.dirname(index_path), exist_ok=True) with open(index_path, 'w') as f: for p in file_list: f.write(p + '\n') if split: num_files = len(file_list) if split[0] == 0: start = 0 stop = math.ceil(split[1] * num_files) else: start = math.ceil(split[0] * num_files) stop = num_files filenames = file_list[start:stop] else: filenames = file_list return search_class, filenames
96cfc428815cc5d0625c1bc66f92784dcf5c5b11
3,634,191
import os def benchmark_memory(nb_registers, element_width, nb_elements, nb_operations, write_op=False): """ This method generate the P4 program to benchmark memory consumption :param nb_registers: the number of registers included in the program :type nb_registers: int :param element_width: the size of each register element :type element_width: int :param nb_elements: the number of elements in each register :type nb_elements: int :param nb_elements: the number of operations to the registers :type nb_elements: int :returns: bool -- True if there is no error """ udp_dport = 0x9091 out_dir = 'output' if not os.path.exists(out_dir): os.makedirs(out_dir) fwd_tbl = 'forward_table' program = p4_define(14) + ethernet() + ipv4() + tcp() header_type_name = 'memtest_t' header_name = 'memtest' parser_state_name = 'parse_memtest' field_dec = add_header_field('register_op', 4, 14) field_dec += add_header_field('index', 12, 14) field_dec += add_header_field('data', element_width, 14) program += udp(select_case(udp_dport, parser_state_name)) program += add_header(header_type_name, field_dec, 14) program += add_parser_without_select(header_type_name, header_name, parser_state_name, 'ingress') # metadata = 'mem_metadata' # program += add_metadata_instance(header_type_name, metadata) field = '%s.data' % header_name index = '%s.index' % header_name program += nop_action() program += add_registers(nb_registers, element_width, nb_elements, nb_operations, field, index) match_field = '%s.register_op' % header_name matches = '%s : exact;' % match_field actions = 'get_value; put_value; _nop;' table_name = 'register_table' program += add_table(table_name, matches, actions, 3, 14) applies = apply_table(table_name) program += forward_table() program += control(fwd_tbl, applies) with open ('%s/main.p4' % out_dir, 'w') as out: out.write(program) commands = '' commands += cli_commands(fwd_tbl) commands += add_rule(table_name, '_nop', 0) commands += add_rule(table_name, 'get_value', 1) commands += add_rule(table_name, 'put_value', 2) with open ('%s/commands.txt' % out_dir, 'w') as out: out.write(commands) copy_scripts(out_dir) if write_op: get_write_state_pcap(udp_dport, out_dir) else: get_read_state_pcap(udp_dport, out_dir) return True
e814328510f890cd5f9bb03421f047da0560fd66
3,634,192
from snntoolbox.utils.utils import binary_sigmoid, binary_tanh, ClampedReLU def get_custom_activations_dict(): """ Import all implemented custom activation functions so they can be used when loading a Keras model. """ # Todo: We should be able to load a different activation for each layer. # Need to remove this hack: activation_str = 'relu_Q1.4' activation = get_quantized_activation_function_from_string(activation_str) return {'binary_sigmoid': binary_sigmoid, 'binary_tanh': binary_tanh, 'clamped_relu': ClampedReLU(), # Todo: This should work regardless of the specific attributes of the ClampedReLU class used during training. activation_str: activation}
5ef380613087020815fcf9642caa6809ef8eaeff
3,634,193
import tqdm def block_solve_agd( r_j, A_j, a_1_j, a_2_j, m, b_j_init, t_init=None, ls_beta=None, max_iters=None, rel_tol=1e-6, verbose=False, zero_thresh=1e-6, zero_fill=1e-3, ): """Solve the optimization problem for a single block with accelerated gradient descent.""" b_j = b_j_init b_j_prev = b_j k = 1 # iteration number t = 1 # initial step length (used if t_init is None) pbar_stats = {} # stats for the progress bar pbar = tqdm.tqdm(desc="Solving block with AGD", disable=not verbose, leave=False) while True: # Compute the v terms. mom = (k - 2) / (k + 1.0) v_j = b_j + mom * (b_j - b_j_prev) q_v_j = r_j - A_j @ v_j v_j_norm = v_j.norm(p=2) f_v_j = _f_j(q_v_j, v_j_norm, a_1_j, a_2_j, m) grad_v_j = _grad_j(q_v_j, A_j, v_j, v_j_norm, a_1_j, a_2_j, m) b_j_prev = b_j # Adjust the step size with backtracking line search. if t_init is not None: t = t_init while True: b_j = v_j - t * grad_v_j # gradient descent update if ls_beta is None: # Don't perform line search. break # Line search: exit when f_j(b_j) <= f_j(v_j) + grad_v_j'@(b_j - # v_j) + (1/2t)||b_j - v_j||^2. q_b_j = r_j - A_j @ b_j b_j_norm = b_j.norm(p=2) f_b_j = _f_j(q_b_j, b_j_norm, a_1_j, a_2_j, m) b_v_diff = b_j - v_j c2 = grad_v_j @ b_v_diff c3 = b_v_diff @ b_v_diff / 2.0 if t * f_b_j <= t * (f_v_j + c2) + c3: break t *= ls_beta # Make b_j non-zero if it is 0. if len((b_j.abs() < zero_thresh).nonzero()) == len(b_j): b_j.fill_(zero_fill) b_j_norm = b_j.norm(p=2) b_diff_norm = (b_j - b_j_prev).norm(p=2) pbar_stats["t"] = "{:.2g}".format(t) pbar_stats["rel change"] = "{:.2g}".format(b_diff_norm / b_j_norm) pbar.set_postfix(pbar_stats) pbar.update() # Check max iterations exit criterion. if max_iters is not None and k == max_iters: break k += 1 # Check tolerance exit criterion. Exit when the relative change is less # than the tolerance. # k > 2 ensures that at least 2 iterations are completed. if b_diff_norm <= rel_tol * b_j_norm and k > 2: break pbar.close() return b_j
168be883091592c86f7c2cd38f7d1980abd3f4c5
3,634,194
import os def get_jars_location(): """ Return the location of the JAR files for installed library. """ root_dir = os.path.dirname(flexneuart.__file__) return os.path.join(root_dir, 'resources/jars/')
d4d397c6031079c85954a5a36d699c67e5b2882d
3,634,195
def bin_array_max(arr, bin_size, pad_value=0): """ Given a NumPy array, returns a binned version of the array along the last dimension, where each bin contains the maximum value of its constituent elements. If the array is not a length that is a multiple of the bin size, then the given pad will be used at the end. """ num_bins = int(np.ceil(arr.shape[-1] / bin_size)) pad_amount = (num_bins * bin_size) - arr.shape[-1] if pad_amount: arr = np.pad( arr, ([(0, 0)] * (arr.ndim - 1)) + [(0, pad_amount)], constant_values=pad_value ) new_shape = arr.shape[:-1] + (num_bins, bin_size) return np.max(np.reshape(arr, new_shape), axis=-1)
db16540a8d5e4ac91948dab6a0c94b86398635b5
3,634,196
def harmony(img, center, angle=None): """Harmonize the pattern by exploiting symmetry If the shape of the pattern is not anymore odd after the rotation has been performed the pattern is padded with zeros such that its shape is odd. :param img: pattern :param center: center coordinates in pattern :param angle: rotate image by angle, in degrees in counter-clockwise direction :return: harmonized pattern """ if angle is not None: ext_img = extend_image(img, center) new_center = midpnt(ext_img) ext_img['map'] = sitransform.rotate(ext_img['map'], angle, center=new_center, cval=0, resize=1, preserve_range=1) # after rotation the image shape may not be odd anymore so we append zeros if not ext_img['map'].shape[0] % 2: fill = np.zeros((1, ext_img['map'].shape[1])) ext_img['map'] = np.vstack((fill, ext_img['map'])) if not ext_img['map'].shape[1] % 2: fill = np.zeros((ext_img['map'].shape[0], 1)) ext_img['map'] = np.hstack((fill, ext_img['map'])) harmonized = harmonize_image(ext_img) else: harmonized = harmonize_image(extend_image(img, center)) harmonized['beam_position'] = midpnt(harmonized) return harmonized
e2a2ddab67d28d34210aaf8926595670a0e046d8
3,634,197
import os import errno def getClimateDataForStation(config, outputDir, outFilename, stationID, overwrite=True): """Fetch climate timeseries data for a GHCN daily station @param config A Python ConfigParser (not currently used) @param outputDir String representing the absolute/relative path of the directory into which output DEM should be written @param outDEMFilename String representing the name of the DEM file to be written @param stationID String representing unique identifier of station @param overwrite Boolean value indicating whether or not the file indicated by filename should be overwritten. If False and filename exists, IOError exception will be thrown with errno.EEXIST @param overwrite Boolean value indicating whether or not the file indicated by filename should be overwritten. If False and filename exists, IOError exception will be thrown with errno.EEXIST @raise IOError if outputDir is not a writable directory @raise IOError if outFilename already exists and overwrite is False (see above) @return True if timeseries data were fetched and False if not """ dataFetched = False if not os.path.isdir(outputDir): raise IOError(errno.ENOTDIR, "Output directory %s is not a directory" % (outputDir,)) if not os.access(outputDir, os.W_OK): raise IOError(errno.EACCES, "Not allowed to write to output directory %s" % (outputDir,)) outputDir = os.path.abspath(outputDir) outFilepath = os.path.join(outputDir, outFilename) if os.path.exists(outFilepath): if overwrite: os.unlink(outFilepath) else: raise IOError(errno.EEXIST, "File %s already exists" % outFilepath) url = URL_PROTO.format(station_id=stationID) conn = httplib.HTTPConnection(HOST) conn.request('GET', url) res = conn.getresponse(buffering=True) assert(200 == res.status) data = res.read(_BUFF_LEN) if data: dataOut = open(outFilepath, 'wb') dataFetched = True while data: dataOut.write(data) data = res.read(_BUFF_LEN) dataOut.close() return dataFetched
31b8006bce6eb0abaf5f4a4af285130dadd1b291
3,634,198
def is_following(request, author) -> bool: """Checks if this author is in the user's subscriptions""" if Follow.objects.filter(user=request.user, author=author): return True return False
32a81b0d6482d5fd99d6d1b55e518b59238d87bd
3,634,199