content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import torch def spgr(pd, r1, r2s=None, mt=None, transmit=None, receive=None, gfactor=None, te=0, tr=25e-3, fa=20, sigma=None, device=None): """Simulate data generated by a Spoiled Gradient-Echo (SPGR/FLASH) sequence. Tissue parameters ----------------- pd : tensor_like Proton density r1 : tensor_like Longitudinal relaxation rate, in 1/sec r2s : tensor_like, optional Transverse relaxation rate, in 1/sec. Mandatory if any `te > 0`. mt : tensor_like, optional MTsat. Mandatory if any `mtpulse == True`. Fields ------ transmit : tensor_like, optional Transmit B1 field receive : tensor_like, optional Receive B1 field gfactor : tensor_like, optional G-factor map. If provided and `sigma` is not `None`, the g-factor map is used to sample non-stationary noise. Sequence parameters ------------------- te : float, default=0 Echo time, in sec tr : float default=2.5e-3 Repetition time, in sec fa : float, default=20 Flip angle, in deg Noise ----- sigma : float, optional Standard-deviation of the sampled Rician noise (no sampling if `None`) Returns ------- sim : tensor Simulated SPGR image """ pd, r1, r2s, mt, transmit, receive, gfactor \ = utils.to_max_backend(pd, r1, r2s, mt, transmit, receive, gfactor) pd, r1, r2s, mt, transmit, receive, gfactor \ = utils.to(pd, r1, r2s, mt, transmit, receive, gfactor, device=device) backend = utils.backend(pd) fa = fa * constants.pi / 180. if transmit is not None: fa = fa * transmit del transmit fa = torch.as_tensor(fa, **backend) if receive is not None: pd = pd * receive del receive pd = pd * fa.sin() fa = fa.cos() e1, r1 = r1.mul(tr).neg_().exp(), None signal = pd * (1 - e1) if mt is not None: omt = mt.neg().add_(1) signal *= omt signal /= (1 - fa * omt * e1) del omt else: signal /= (1 - fa * e1) if r2s is not None: e2, r2s = r2s.mul(te).neg_().exp(), None signal *= e2 del e2 # noise signal = add_noise(signal, std=sigma) return signal
78cfa5f7b264fd85603a5e66ec77974558f05d21
3,634,900
import argparse def setup_options(): """ add logging options to cmd-line, but surpress them, so that they don't clobber up the help-messages """ my_argparser = argparse.ArgumentParser(add_help=False) my_argparser.add_argument("--LogLevel_Model", default = "", \ help = argparse.SUPPRESS ) return my_argparser
dfbd38e14720f1bbe8806b1d9dab47694c7a0610
3,634,901
def create_subject(request): """ This is a form """ template = loader.get_template('form.html') form = SubjectForm() return HttpResponse(template.render({'form':form, 'redirect': '/subject', 'submit':'Create Subject and add to class'}, request))
8bea2cb4e6ba26a030a9b3008377c126af551491
3,634,902
def text2number(text): """ Convert arabic text into number, for example convert تسعة و عشرون = >29. Example: >>> text2number(u"خمسمئة و ثلاث و عشرون") 523 @param text: input text @return: number extracted from text @rtype: integer """ number_names = WORDS_NUMBERS.copy() #the result total is 0 total = 0 # the partial total for the three number partial = 0 """Remove white spaces from the given text""" #string = text.lstrip() """Split the text into array of words""" words = text.split() for word in words: if word and word == 'و': words.remove(word) for word in words: if word in number_names: actualnumber = number_names[word] if actualnumber % 1000 == 0: # the case of 1000 or 1 million if partial == 0: partial = 1 total += partial* actualnumber #re-initiate the partial total partial = 0 else: partial += number_names[word] # add the final partial to total total += partial return total
9a0b28810549efab956d720695534e82aecf2c4f
3,634,903
def parse_prototype(prototype, additional_definitions={}): """Given a single annotated C function prototypes (see module docstring), for syntax), and an optional dict of non-base type definitions, return the following information that ctypes needs to load and annotate a function: function_name: str return_type: str name of ctypes type arg_types: tuple of str names of ctypes types param_flags: tuple same length of arg_types with (arg_intent, arg_name) values (see ctypes documentation for 'paramflags') errcheck: errcheck function name (if provided via annotation, or if necessary to pack up output args and return values) errcheck_def: None, or the definition of an errcheck function to pack output args and reurn values in_args: list of (arg_name, arg_type) tuples, where arg_name is a string and arg_type is a str name of a ctypes type. out_args: as in_args, but for output arguments. """ results = function_prototype.parseString(prototype) function_decl = results[0] args = results[1] # deal with the return type and error check function if present return_type, function_name = function_decl[0], function_decl[-1] py_return_type = resolve_type(return_type, additional_definitions) errcheck = None if len(function_decl) == 3: errcheck = function_decl[1] # process the arguments arg_types, param_flags = [], [] in_args, out_args = [], [] out_arg_indices = [] for i, arg in enumerate(args): arg_type, arg_name = arg[:2] py_arg_type = resolve_type(arg_type, additional_definitions) arg_types.append(py_arg_type) if len(arg) == 3 and arg[2] == 'output': param_intent = 2 # ctypes flag for 'output' parameter out_arg_indices.append(i) output_arg_type = resolve_type([arg_type[0]]+arg_type[2:], additional_definitions) # strip off a level of pointers out_args.append((arg_name, output_arg_type)) else: param_intent = 1 # ctypes flag for 'input' parameter in_args.append((arg_name, py_arg_type)) param_flags.append((param_intent, arg_name)) # deal with possibility of multiple outputs if out_args and py_return_type is not 'None' and not errcheck: # if we have both pass-by-reference output arguments and a non-void return type, # AND if there's no custom error check function, we need to make our # own to bundle all the output arguments errcheck = '_errcheck_{}'.format(function_name) out_arg_unpack = ', '.join(['arguments[{}].value'.format(i) for i in out_arg_indices]) errcheck_def ="""def {}(result, func, arguments): return (result, {})""".format(errcheck, out_arg_unpack) out_args = [('c_return_value', py_return_type)] + out_args else: errcheck_def = None return function_name, py_return_type, tuple(arg_types), tuple(param_flags), errcheck, errcheck_def, in_args, out_args
93d79407bf0b4175f51dfef0fb39fa288eaa1512
3,634,904
def create_resource(): """QonoS resource factory method.""" return wsgi.Resource(SchedulesController())
caca6c4a6d8b3e67db79eee1937ae0479de1ab05
3,634,905
def compute_heatmap(model, saved_model, image, pred_index, last_conv_layer): """ construct our gradient model by supplying (1) the inputs to our pre-trained model, (2) the output of the (presumably) final 4D layer in the network, and (3) the output of the softmax activations from the model """ gradModel = Model( inputs=[model.inputs], outputs=[model.get_layer(last_conv_layer).output, model.output] ) # record operations for automatic differentiation with tf.GradientTape() as tape: """ cast the image tensor to a float-32 data type, pass the image through the gradient model, and grab the loss associated with the specific class index """ print(pred_index) inputs = tf.cast(image, tf.float32) print(image.shape) last_conv_layer_output, preds = gradModel(inputs) print(preds) print(preds.shape) # class_channel = preds[:, pred_index] class_channel = preds # use automatic differentiation to compute the gradients grads = tape.gradient(class_channel, last_conv_layer_output) """ This is a vector where each entry is the mean intensity of the gradient over a specific feature map channel """ pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2)) """ We multiply each channel in the feature map array by "how important this channel is" with regard to the top predicted class then sum all the channels to obtain the heatmap class activation """ last_conv_layer_output = last_conv_layer_output[0] heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis] heatmap = tf.squeeze(heatmap) # For visualization purpose, we will also normalize the heatmap between 0 & 1 heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap) heatmap = heatmap.numpy() return heatmap
c412a1ddb7300434334b2276df257cda51072f96
3,634,906
def get_all_orders(): """Returns a list of ALL the orders for rooms and AGs, with reader and approver specified for each.""" # Get a list of all the orders of access groups of all responsible approvers. ag_relation = ApprovesAgRequest.query \ .join(AccessGroupRequest, AccessGroupRequest.id == ApprovesAgRequest.ag_request_id) \ .join(Reader, Reader.id == AccessGroupRequest.reader_id) \ .join(AccessGroup, AccessGroup.id == AccessGroupRequest.ag_id).all() ag_orders = [] for ag in ag_relation: # Gets all the rooms in the access group room_relation = Room.query \ .join(CardReader, CardReader.room_b_id == Room.id) \ .join(gives_access_to, gives_access_to.c.cr_id == CardReader.id) \ .filter_by(ag_id=ag.ag_request.ag.id) json = { "type": "AG", "rooms": [room.text_id for room in room_relation], "reader": { "email": ag.ag_request.reader.email, "name": ag.ag_request.reader.name, "surname": ag.ag_request.reader.surname }, "approver": { "email": ag.ag_request.request_approver.approver.email, "name": ag.ag_request.request_approver.approver.name, "surname": ag.ag_request.request_approver.approver.surname }, "access_name": ag.ag_request.ag.name, "request_id": ag.ag_request.id, "ag_id": ag.ag_request.ag.id, "justification": ag.ag_request.justification, "requested_datetime": ag.ag_request.datetime_requested.strftime('%Y-%m-%d') } ag_orders.append(json) # Get a list of all the orders of rooms this approver is responsible for. room_relation = ApprovesRoomRequest.query \ .join(RoomRequest, RoomRequest.id == ApprovesRoomRequest.room_request_id) \ .join(Reader, Reader.id == RoomRequest.reader_id) \ .join(Room, Room.id == RoomRequest.room_id).all() room_orders = [ { "type": "Room", "reader": { "email": x.room_request.reader.email, "name": x.room_request.reader.name, "surname": x.room_request.reader.surname, }, "approver": { "email": x.room_request.request_approver.approver.email, "name": x.room_request.request_approver.approver.name, "surname": x.room_request.request_approver.approver.surname }, "access_name": x.room_request.room.name, "request_id": x.room_request.id, "room_id": x.room_request.room.text_id, "justification": x.room_request.justification, "requested_datetime": x.room_request.datetime_requested.strftime('%Y-%m-%d') } for x in room_relation] return ok({"orders": room_orders + ag_orders})
798c7347e0db7f3f7ba2f66364cbc55be3a641ed
3,634,907
def isAnomaly(lowBand, highBand, value): """Condition for anomaly on a certain row""" if value < lowBand or value > highBand: return True return False
552513115ec9c98c40cd6487b6a33f497c562c87
3,634,908
def initWeights(positive_count, negative_count, trainingSamples): """ Initialise weights. :param positive_count: Number of positive samples. :param negative_count: Number of negative samples. :param trainingSamples: Training samples. :return: Weights """ positiveSampleInitialWeight = 1/(2*positive_count) negativeSampleInitialWeight = 1/(2*negative_count) weights = [] for sample in trainingSamples: if sample[1] == 1: weights.append(positiveSampleInitialWeight) else: weights.append(negativeSampleInitialWeight) return np.array(weights)
640fb32b62fc461b9c638d3a647c53db69424fd9
3,634,909
def get_distance_calculator(name, *args, **kwargs): """returns a pairwise distance calculator name is converted to lower case""" name = name.lower() if "moltype" in kwargs and kwargs.get("moltype") is None: kwargs.pop("moltype") if name not in _calculators: raise ValueError('Unknown pairwise distance calculator "%s"' % name) calc = _calculators[name] return calc(*args, **kwargs)
c8d1d0fca18e7f71742fb8a45829ac4e8f84077b
3,634,910
import os def git_ls_dirs(root=None): """ List all folders tracked by git. """ dirs = set() for fn in git_ls_files(root): dirs.add(os.path.dirname(fn)) return list(dirs)
d746543c028e00ff05154d48d198c3f6117ccd36
3,634,911
def compute_escape_peak(spectrum, ratio, params, escape_e=1.73998): """ Calculate the escape peak for given detector. Parameters ---------- spectrum : array original, uncorrected spectrum ratio : float ratio of shadow to full spectrum param : dict fitting parameters escape_e : float Units: keV By default, 1.73998 (Ka1 line of Si) Returns ------- array: x after shift, and adjusted y """ x = np.arange(len(spectrum)) x = (params['e_offset']['value'] + params['e_linear']['value'] * x + params['e_quadratic']['value'] * x**2) result = x - escape_e, spectrum * ratio return result
c6948547a2bb34f32cb423b5ab0cd8d9f9f695e4
3,634,912
def create_transaction(wallet, purchase): """ creates a transaction of crypto from the PSP wallet to a user who has paid in fiat Note: Ideally this will be done for more than 1 tx at a time.. would have a basket of tx ready to go and then they will all go out in one TX with all the outputs. Then the system wont have to wait for the TX to clear to get its change back before doing the next batch Args: wallet (neo.Implementations.Wallets.peewee.UserWallet.UserWallet): The user wallet of the PSP purchase (blockchain.models.Purchase): A purchase object representing a user wishing to purchase crypto Returns: neo.Core.TX.Transaction.Transaction: A transaction from the PSP to the user who has purchased crypto """ try: asset_id = Blockchain.Default().SystemCoin().Hash amount = Fixed8.FromDecimal(purchase.amount) to_script_hash = Helper.AddrStrToScriptHash(purchase.neo_address) output = TransactionOutput( AssetId=asset_id, Value=amount, script_hash=to_script_hash, ) tx = ContractTransaction() tx.outputs = [output] tx = wallet.MakeTransaction(tx) tx.Attributes = [ TransactionAttribute( TransactionAttributeUsage.Remark1, b'Sent by Payment Service Provider'), TransactionAttribute(TransactionAttributeUsage.Remark2, ('Purchase Price USD %0.2f' % ( purchase.total,)).encode('utf-8')) ] context = ContractParametersContext(tx) wallet.Sign(context) if context.Completed: tx.scripts = context.GetScripts() relayed = NodeLeader.Instance().Relay(tx) if relayed: wallet.SaveTransaction(tx) return tx return None except Exception as e: print("Could not create transaction %s " % e)
d6e31bb698782128e58ccf73e8b62e34f8937377
3,634,913
import numpy def _create_globio_lulc_op( lulc_array, potential_vegetation_array, pasture_array, ffqi, globio_nodata, pasture_threshold, primary_threshold): """Construct GLOBIO lulc given relevant biophysical parameters.""" result = numpy.empty_like(lulc_array, dtype=numpy.int16) result[:] = globio_nodata valid_mask = lulc_array != globio_nodata valid_result = result[valid_mask] # Split Shrublands and grasslands into primary vegetations, # livestock grazing areas, and man-made pastures. Landcover # 131 represents grassland/shrubland in the GLOBIO classification. grass_shrub_mask = lulc_array[valid_mask] == 131 grass_shrub_result = valid_result[grass_shrub_mask] # fill with livestock grazing, then re-assign to pasture, primary veg. grass_shrub_result[:] = 5 # man-made pasture valid_pasture_mask = potential_vegetation_array[valid_mask][grass_shrub_mask] <= 8 grass_shrub_result[valid_pasture_mask] = 6 # primary vegetation valid_primary_veg_mask = ~valid_pasture_mask & ( pasture_array[valid_mask][grass_shrub_mask] < pasture_threshold) grass_shrub_result[valid_primary_veg_mask] = 1 valid_result[grass_shrub_mask] = grass_shrub_result # Outside of the grass/shrub categories, carry over the original codes: valid_result[~grass_shrub_mask] = lulc_array[valid_mask][~grass_shrub_mask] # Step 1.4a: Split Forests into Primary, Secondary # 1 is primary forest # 3 is secondary forest valid_modis_forest_mask = lulc_array[valid_mask] == 130 forest_result = valid_result[valid_modis_forest_mask] forest_result[:] = 1 forest_result[ ffqi[valid_mask][valid_modis_forest_mask] < primary_threshold] = 3 valid_result[valid_modis_forest_mask] = forest_result # Classify all ag classes as a new LULC value "12" per our custom design # of agriculture. Landcover 132 represents agriculture landcover types # in the GLOBIO classification scheme valid_ag_mask = lulc_array[valid_mask] == 132 valid_result[valid_ag_mask] = 12 result[valid_mask] = valid_result return result
1a6ca959e8da30767a264bc529f16ab6f2626851
3,634,914
import random def temperature_dist(): """Random normal variated temperature around RT_MEAN with RT_SIGMA""" return random.normalvariate(RT_MEAN, RT_SIGMA)
971b2296878780e9af568784a454d08dfead0e2c
3,634,915
def pair_keys_to_items(items, key): """ Convert the list of key:value dicts (nics or disks) into a dict. The key for the new dict is one value of the current dict identified by the key parameter. If it does not exist, then the key is the order number in the list. """ new_items = {} for i, item in enumerate(items): new_items[item.get(key, i)] = item return new_items
92c66bfbb298e767b3fedbfcfd48ad87ac1162ef
3,634,916
import os def load_results(model_no=0): """ Loads saved results if exists """ losses_path = "./data/test_losses_per_epoch_%d.pkl" % model_no accuracy_path = "./data/test_accuracy_per_epoch_%d.pkl" % model_no train_accuracy_path = "./data/train_accuracy_per_epoch_%d.pkl" % model_no if os.path.isfile(losses_path) and os.path.isfile(accuracy_path) and os.path.isfile(train_accuracy_path): losses_per_epoch = load_pickle("test_losses_per_epoch_%d.pkl" % model_no) accuracy_per_epoch = load_pickle("test_accuracy_per_epoch_%d.pkl" % model_no) train_accuracy_per_epoch = load_pickle("train_accuracy_per_epoch_%d.pkl" % model_no) logger.info("Loaded results buffer") else: losses_per_epoch, train_accuracy_per_epoch, accuracy_per_epoch = [], [], [] return losses_per_epoch, train_accuracy_per_epoch, accuracy_per_epoch
79406c231d548667afb24dee2f48809f83ea1a5a
3,634,917
def rotateStructure(structure): """ Rotate a structure randomly """ angle_rad = _np.random.rand(1) * 2 * _np.pi newstructure = _np.array( [ (structure[0, :]) * _np.cos(angle_rad) - (structure[1, :]) * _np.sin(angle_rad), (structure[0, :]) * _np.sin(angle_rad) + (structure[1, :]) * _np.cos(angle_rad), structure[2, :], structure[3, :], ] ) return newstructure
9df411e82caa5827499ae5648a1ec7d24e27cff5
3,634,918
async def iamalive(name, **params): """ Accept services promotions """ await state.alive_service(name) return OK
82e0e8243dd8eadfc27cab484157c2d1c6db63b1
3,634,919
def add_wind_rotation_info(res: str, ds: xr.Dataset) -> xr.Dataset: """ Add wind rotation information to the dataset Args: res: grid resolution, format as f'c{number cells in tile}' """ rotation = _load_wind_rotation_matrix(res).drop_vars("tile", errors="ignore") common_coords = {"x": ds["x"].values, "y": ds["y"].values} rotation = rotation.assign_coords(common_coords) return ds.merge(rotation, compat="override")
242293904998367fe75b112adc1298ead39152a7
3,634,920
def get_human_size(size): """Return a string describing the size in bytes""" if size < 1024: return '{} B'.format(size) if size < 1024 * 1024: return '{:.2f} KB'.format(float(size) / 1024) if size < 1024 * 1024 * 1024: return '{:.2f} MB'.format(float(size) / (1024 * 1024)) if size < 1024 * 1024 * 1024 * 1024: return '{:.2f} GB'.format(float(size) / (1024 * 1024 * 1024)) return '{:.2f} TB'.format(float(size) / (1024 * 1024 * 1024 * 1024))
48cee8ca55717d6fb48c5c1dc06becff71c58f0e
3,634,921
def do_authentication(environ, start_response, authn_context, key, redirect_uri, headers=None): """ Display the login form """ logger.debug("Do authentication") auth_info = AUTHN_BROKER.pick(authn_context) if len(auth_info): method, reference = auth_info[0] logger.debug("Authn chosen: %s (ref=%s)", method, reference) return method(environ, start_response, reference, key, redirect_uri, headers) else: resp = Unauthorized("No usable authentication method") return resp(environ, start_response)
ee3980e67d683b2038208780fc8920c4978e8c7e
3,634,922
def prepare_m2m_data(model, ctx_dict, fields, parent_ids): """ Recebe os argumentos e devolve um dicionário com os dados necessários para montar a lista """ #print('in_prepare_m2m_data') name = ctx_dict.get('name') record = ctx_dict.get('record') edit_ok = ctx_dict.get('edit_ok') popup = False if ctx_dict.get('window_status') == 'popup': popup = True title = ctx_dict.get('title') rights = ctx_dict.get('rights') columns = [] #print('before get options') options = model.get_options()#final_filter #print( 'my options {var1}'.format(var1=str(options))) #isto pode ter que passar para o form o model claro data = model.get() #print( 'my data is {var1}'.format(var1=str(data))) disabled = '' if edit_ok == False: disabled = 'disabled' #cols=source #field_name=model.__title__, cols=cols, name=self.name, disabled=disabled) #for option in options: # pass #value=option[0], description=option[1] #cols='4' footer_vals_ordered = [] if fields: #estes fields e quando eu quero neste many2many ver apenas alguns campos ver depois se faz falta na lista for f in fields: for field in get_model_fields(model): if f == field[0]: if field[1].onlist == True: #name=field[1].name footer_vals_ordered.append(field[0]) if field[1].__class__.__name__ in ['integer_field', 'float_field', 'decimal_field', 'percent_field', 'currency_field', 'function_field']: columns.append((field[0], field[1].name, 'right')) else: columns.append((field[0], field[1].name, 'left')) else: for field in get_model_fields(model): if field[1].onlist == True: #name=field[1].name footer_vals_ordered.append(field[0]) if field[1].__class__.__name__ in ['integer_field', 'float_field', 'decimal_field', 'percent_field', 'currency_field', 'function_field']: columns.append((field[0], field[1].name, 'right')) else: columns.append((field[0], field[1].name, 'left')) #name=self.name footer_vals = {} count_of_parent_ids = {} for parent_id in set(parent_ids): count_of_parent_ids[parent_id] = parent_ids.count(parent_id) #print( 'count_of_parent_ids {var1}'.format(var1=str(count_of_parent_ids))) line_color = False rows = [] for d in data: if d['id'] in count_of_parent_ids: count_parent = count_of_parent_ids[d['id']] else: count_parent = 0 #print( 'count_parent {var1}'.format(var1 = str(count_parent))) for count in range(0, count_parent):# o count parent ´e um truque que permite que se a opç~ao exisitr v´arias vezes aparece s´o uma depois deve ser melhorado text_color = 'black' text_weight = 'normal' #print('5') if hasattr(model, '__record_colors__'): #print('{var1}'.format(var1 = str(model.__record_colors__))) for rc in model.__record_colors__: for color in rc[1]: if d[rc[0]] == color: if rc[1][color] not in ['bold']: text_color = rc[1][color] else: text_weight = rc[1][color] fields = {} for field in get_model_fields(model): if hasattr(field[1], 'onlist') and field[1].onlist == True: field_res = get_field_value(d, field, model) field_value = field_res['field_value'] #print( 'field_value {var1}'.format(var1 = str(field_value))) if isinstance(field_value, tuple): if field[1].__class__.__name__ == 'parent_field': field_value = '{a},{b}'.format(a=field_value[0], b=field_value[1]) else: field_value = field_value[1] #options = field_res['options'] #print( 'options {var1}'.format(var1 = str(options))) #if hasattr(field[1], 'sum') and field[1].sum == True: if hasattr(field[1], 'sum') and field[1].sum == True and field_value not in ['None', None,'']: if field[0] in footer_vals: footer_vals[field[0]] += float(field_value) else: footer_vals[field[0]] = float(field_value) else: footer_vals[field[0]] = '' field_name = field[0] percent = '' if field[1].__class__.__name__ == 'percent_field': percent = '%' if field[1].__class__.__name__ in ['integer_field', 'float_field', 'decimal_field', 'percent_field', 'currency_field', 'function_field']: text_align = 'right' else: text_align = 'left' #print('novo field_value {var1}'.format(var1 = str(field_value))) fields[field[0]] = (field_name, field_value, percent, text_align) row = [fields, (text_color, text_weight, d['id'])] #print( 'row {var1}, {var2}'.format(var1 = str(row), var2 = str(len(rows)))) rows.append(row) #print( 'columns {var1}, {var2}'.format(var1=str(columns), var2=str(rows))) result = {'name':model.__name__, #'check_box':check_box, 'field_name':title, 'columns':columns, 'rows':rows, #'add_ok':add_ok, #'delete_ok':delete_ok, 'edit_ok':edit_ok, #'list_edit_mode':model.__list_edit_mode__, 'footer_vals':footer_vals, #'limit':limit, #'offset':offset, #'page':page, #'rows_count':rows_count, 'options':options #'fields':fields } #print( 'result {var1}'.format(var1 = str(result))) #result.update(ctx_dict) #print ('fim do prepare_m2m_data') return result
87ec8e992e63ecf53a3e0c878d1254d70c957d11
3,634,923
def empty_when_none(_string=None): """If _string if None, return an empty string, otherwise return string. """ if _string is None: return "" else: return str(_string)
402186ee7b4ba9c3968f81bee23134067d0f260e
3,634,924
def visualizeFrame(x, y, z, translation, size = 0.1): """ Input: x - numpy array (x, y, z) or column vector y - numpy array (x, y, z) or column vector z - numpy array (x, y, z) or column vector translation - numpy array (x, y, z) or column vector the center point of the frame size - float, length of lines in meters output: lines - open3d.geometry.LineSet() """ translation = translation.flatten() x = (x.flatten() * size) + translation y = (y.flatten() * size) + translation z = (z.flatten() * size) + translation points = [translation, x, y, z] print(points) lines_to_viz = [[0, 1], [0, 2], [0, 3]] colors = [[1, 0 , 0], [0, 1, 0], [0, 0, 1]] line_set = o3d.geometry.LineSet() line_set.points = o3d.utility.Vector3dVector(points) line_set.lines = o3d.utility.Vector2iVector(lines_to_viz) line_set.colors = o3d.utility.Vector3dVector(colors) return line_set
2ad8aaf1e16488d4b87d155c5115185a42f68fee
3,634,925
def baseline_dwt( array, max_iter, level=None, wavelet="sym6", background_regions=None, mask=None, mode="constant", axis=-1, ): """ Iterative method of baseline determination, based on the discrete wavelet transform. Parameters ---------- array : `~numpy.ndarray` Data with background. max_iter : int Number of iterations to perform. level : int or None, optional Decomposition level. A higher `level` will result in a coarser approximation of the input signal (read: a lower frequency baseline). If None (default), the maximum level possible is used. wavelet : PyWavelet.Wavelet object or str, optional Wavelet with which to perform the algorithm. See PyWavelet documentation for available values. Default is 'sym6'. background_regions : iterable or None, optional Indices of the array values that are known to be purely background. Depending on the dimensions of array, the format is different: * 1D signal: `background_regions` is a list of ints (indices) or slices, e.g. ``[0, 7, slice(534, 1000)]``. * 2D signal: `background_regions` is a list of tuples of ints (indices) or tuples of slices, e.g. ``[(14, 19), (slice(59, 82), slice(81,23))]``. Default is empty list. mask : `~numpy.ndarray`, dtype bool, optional Mask array that evaluates to True for pixels that are invalid. Useful to determine which pixels are masked by a beam block. mode : str, optional Signal extension mode, see pywt.Modes. axis : int or tuple, optional Axis over which to compute the wavelet transform. Can also be a 2-tuple of ints for 2D baseline Returns ------- baseline : `~numpy.ndarray`, shape (M,N) Baseline of the input array. References ---------- .. [#] Galloway et al. 'An Iterative Algorithm for Background Removal in Spectroscopy by Wavelet Transforms', Applied Spectroscopy pp. 1370 - 1376, September 2009. """ if isinstance(axis, int): axis = (axis,) approx_rec_func = {1: _dwt_approx_rec, 2: _dwt_approx_rec2} return _iterative_baseline( array, max_iter=max_iter, background_regions=background_regions, mask=mask, axes=axis, approx_rec_func=approx_rec_func[len(axis)], func_kwargs={"level": level, "wavelet": wavelet, "axis": axis, "mode": mode}, )
41edf5cc884592ee8c480881702fd02206cd55bd
3,634,926
def get_ego_polygon(ego_state: StateSE2, vehicle: VehicleParameters) -> Polygon: """ Return Shapely polygon correspoding to Ego :param ego_state: x, y (center of rear axle) and heading :param vehicle: Parameters of the vehicle :return: Shapely polygon for ego """ ego_rectangle = construct_ego_rectangle(ego_state, vehicle) # Convert points to a list of [x, y] xy_points = [[point.x, point.y] for point in ego_rectangle] return Polygon(xy_points)
eb9d3cee0a6c27fe76bbc5525c06c9e094584850
3,634,927
import hashlib def file_hashes(f, bufsize=16000000): """ computes md5, sha1, sha256 from a file obj. intended for large files. returns 3-tuple of hexstrings """ md5 = hashlib.md5() sha1 = hashlib.sha1() sha256 = hashlib.sha256() while True: buf = f.read(bufsize) if len(buf) == 0: break md5.update(buf) sha1.update(buf) sha256.update(buf) return (md5.hexdigest(), sha1.hexdigest(), sha256.hexdigest())
4e23a0d99cda07325ba3a14675bfb515c12d2950
3,634,928
def paginate_update(update): """ attempts to get next and previous on updates """ time = update.pub_time event = update.event try: next = Update.objects.filter(event=event, pub_time__gt=time).order_by('pub_time').only('title')[0] except: next = None try: previous = Update.objects.filter(event=event, pub_time__lt=time).order_by('-pub_time').only('title')[0] except: previous = None return {'next': next, 'previous': previous, 'event': event}
eda885cfdb538f6e609c097d146c3803b17fb1f8
3,634,929
def get_timestep(all_params, stuff_for_time_loop): """ Gets the full VFP + logging timestep :param all_params: (dictionary) contains input parameters for simulation :param stuff_for_time_loop: (dictionary) contains derived parameters for simulation :return: a function with the above values initialized as static variables """ vp_step = get_vlasov_poisson_step( all_params=all_params, stuff_for_time_loop=stuff_for_time_loop ) fp_step = get_collision_step( all_params=all_params, stuff_for_time_loop=stuff_for_time_loop ) storage_step = get_storage_step(stuff_for_time_loop=stuff_for_time_loop) def timestep(temp_storage, i): """ This function performs one single timestep It does the 1 - the VP step 2 - the FP step 3 - the storage step :param temp_storage: (dictionary) contains input distribution function and other stored quantities :param i: (int) iteration of the time-step :return: (dictionary) contains updated distribution function and other stored quantities """ e = temp_storage["e"] f = temp_storage["f"] t = temp_storage["time_batch"][i] de = temp_storage["driver_array_batch"][i] e, f = vp_step(e=e, f=f, t=t) f = fp_step(f=f) temp_storage = storage_step(temp_storage=temp_storage, e=e, de=de, f=f, i=i) return temp_storage, i return timestep
0ca048c07b9fe230469c7e16e0a847b9b155fc50
3,634,930
from typing import Any def _create_entities( device: hm_device.HmDevice, device_address: str, custom_entity_class: type, device_enum: EntityDefinition, device_def: dict[str, Any], entity_def: dict[int, set[str]], channel_no: int | None = None, ) -> list[hm_entity.BaseEntity]: """Create custom entities.""" entities: list[hm_entity.BaseEntity] = [] unique_id = generate_unique_id( domain=device.central.domain, instance_name=device.central.instance_name, address=f"{device_address}:{channel_no}", ) if unique_id in device.central.hm_entities: _LOGGER.debug("make_custom_entity: Skipping %s (already exists)", unique_id) return entities entity = custom_entity_class( device=device, device_address=device_address, unique_id=unique_id, device_enum=device_enum, device_def=device_def, entity_def=entity_def, channel_no=channel_no, ) if len(entity.data_entities) > 0: entity.add_to_collections() entities.append(entity) return entities
d4726593e2e2db6fdfb27e5f619b634aa1aa3ec0
3,634,931
def TDF_ComparisonTool_SourceUnbound(*args): """ * Finds from <aRefDataSet> all the keys not bound into <aRelocationTable> and put them into <aDiffDataSet>. Returns True if the difference contains at least one key. (A key is a source object). <anOption> may take the following values: 1 : labels treatment only; 2 : attributes treatment only (default value); 3 : both labels & attributes treatment. :param aRefDataSet: :type aRefDataSet: Handle_TDF_DataSet & :param aRelocationTable: :type aRelocationTable: Handle_TDF_RelocationTable & :param aFilter: :type aFilter: TDF_IDFilter & :param aDiffDataSet: :type aDiffDataSet: Handle_TDF_DataSet & :param anOption: default value is 2 :type anOption: int :rtype: bool """ return _TDF.TDF_ComparisonTool_SourceUnbound(*args)
4f1ad0bbb33ff34f5bbdac37a43bf8ab8c1bd6bd
3,634,932
def sorted_qubits(qbs: Qubits) -> Qubits: """Return a sorted list of unique qubits in canonical order. Qubits can be of different types, so we sort first by type (as a string), then within types. """ return tuple(sorted(list(set(qbs)), key=lambda x: (str(type(x)), x)))
fcff2d13d22875118348566ce6432834c31ba644
3,634,933
def lu(a: np.ndarray, permute_l: bool = False, overwrite_a: bool = False, check_finite: bool = True, is_lapack_piv: bool = True): """ Compute pivoted LU decomposition of a matrix. """ if overwrite_a: pivot, LU = _lu_internal(a, permute_l=permute_l, overwrite_a=overwrite_a, is_lapack_piv=is_lapack_piv) return _convert_pivot_matrix(pivot), LU else: pivot, lower, upper = _lu_internal(a, permute_l=permute_l, overwrite_a=overwrite_a) return _convert_pivot_matrix(pivot), lower, upper
c417a0ec96c92f6662f2dac78b9369ca7feb18f0
3,634,934
def compute_referendum_result_by_regions(referendum_and_areas): """Return a table with the absolute count for each region. The return DataFrame should be indexed by `code_reg` and have columns: ['name_reg', 'Registered', 'Abstentions', 'Null', 'Choice A', 'Choice B'] """ tempdf = referendum_and_areas[["code_reg", "name_reg"]] tempdf = tempdf.drop_duplicates() df = referendum_and_areas.groupby("code_reg").sum() df = df.drop(["Town code"], axis="columns") df = df.merge(tempdf, on = "code_reg") df = df.set_index("code_reg") return df
afd55a1c514c59cdac2a507b9f90eb382f2845ee
3,634,935
import logging import os import shutil def get_nvsmi_win(): """Function that probes the nvsmi in windows :return: the path of nvsmi or `None` """ logging.info("Detected System: Windows") nvsmi_exe = nvsmi + ".exe" for p in WIN_PATHS: loc_path = os.path.join(p, nvsmi_exe) if shutil.which(loc_path) is not None: return loc_path return None
7d230e9a3f0b52dfa79c6bda4c447db7ab212bcf
3,634,936
from typing import List import json def get_edfi_payloads(context, dbt_run_result, table_reference: str) -> List: """ Extract BigQUery table and return the resulting JSON as a dict. """ df = context.resources.warehouse.download_table(table_reference) df_json = df.to_json(orient="records", date_format="iso") df_dict = json.loads(df_json) return df_dict
c2ad0026ad4e56a256a824a4c1fae0762aaa51b7
3,634,937
from typing import List def deinterleaver(pseudo_rand_array: List[int], array: np.ndarray) -> np.ndarray: """Random permutations for deinterleaving an array according to a pseudo random sequence""" dims = array.shape flat_array = array.flatten() matrix = np.zeros(array.size, int) for index, position in enumerate(pseudo_rand_array): matrix[position] = flat_array[index] matrix = matrix.reshape(dims) return matrix
8fc1a59921875de3e647f09ba93f6d645d2829c8
3,634,938
def add_rnn_encoder_arguments(group): """Define arguments for RNN encoder.""" group.add_argument( "--elayers", default=4, type=int, help="Number of encoder layers (for shared recognition part " "in multi-speaker asr mode)", ) group.add_argument( "--eunits", "-u", default=300, type=int, help="Number of encoder hidden units", ) group.add_argument( "--eprojs", default=320, type=int, help="Number of encoder projection units" ) group.add_argument( "--subsample", default="1", type=str, help="Subsample input frames x_y_z means subsample every x frame " "at 1st layer, every y frame at 2nd layer etc.", ) return group
64a65bd496402dedfe98c4bd0d5bbc516c87a398
3,634,939
def sum_sample(attrfx='merge'): """Returns a mapper that computes the sum sample of a dataset. Parameters ---------- attrfx : 'merge' or callable, optional Callable that is used to determine the sample attributes of the computed sum samples. By default this will be a string representation of all unique value of a particular attribute in any sample group. If there is only a single value in a group it will be used as the new attribute value. Returns ------- FxMapper instance. """ return FxMapper('samples', np.sum, attrfx=attrfx)
3a86b1bd169d75a573261313181bb5ca75df3c89
3,634,940
def match_profile_to_preset(profile_lookup, colour_preset_lookup): """Get a list of preset names that match the current profile colour scheme. """ match_list = [] for preset_name, preset_colours in colour_preset_lookup.iteritems(): match = True for colour_name, colour_val in preset_colours.iteritems(): if colour_name in profile_lookup: if get_rgb_colour(colour_val) != get_rgb_colour(profile_lookup[colour_name]): match = False break if match: match_list.append(preset_name) return match_list
7ac13ee2d29970f284269c6ca0e32ed6de27e8f7
3,634,941
import json def jsonpify( data, callback ): """ Helper to support JSONP """ try: output = json.dumps( data, sort_keys=True, indent=2 ) if callback: output = '%s(%s)' % ( callback, output ) return output except Exception as e: message = 'exception jsonifying output, ```%s```' % e log.error( message ) raise Exception( message )
6b41ca031b45f835aa57bc08e738dbd6c68d59c5
3,634,942
def localize_gaspari_cohn(dist,c): """ Gaspari-Cohn correlation function Arguments: - z: Points to be evaluated - c: Cutoff value """ # Initialize localization array localization=np.zeros(dist.shape) # Mask for mid-distance points mid_mask=(dist<=2*c) # c for mid-distance points (array indexed by mid_mask, or # fallback to scalar) try: c_mid=c[mid_mask] except TypeError: c_mid=c # Compute weights for mid-distance points close_weights=gaspari_cohn_mid(dist[mid_mask],c_mid) localization[mid_mask]=close_weights # Mask for close points close_mask=(dist<=c)&(dist>0) # c for mid-distance points (array indexed by close_mask, or # fallback to scalar) try: c_close=c[close_mask] except TypeError: c_close=c localization[close_mask]=gaspari_cohn_close(dist[close_mask],c_close) localization[dist==0]=1 return localization
a817962324e53beb411c4f05cd263a6970dda873
3,634,943
def download_spectra(table, data_dir, save_raw=True, raw_dir=None): """ Downloads SDSS spectra Parameters ---------- table : AstroPy.Table Table with coordinates data_dir : str Specifies directory where the data is saved save_raw : bool Specifies whether the raw spectra (i.e. sdss fits) should be saved raw_dir : str, optional Specifies save directory for the raw spectra. If raw_dir=None and save_raw is True it defaults to data_dir Returns ------- spectra : list List of HDU objects filenames : list List of filenames to be used when saving linemeasurements """ select = zip(table['mjd'], table['plate'], table['fiberID']) spectra = [] filenames = [] for mjd, plate, fiberID in select: # Get the spectrum from the SDSS spec = SDSS.get_spectra_async(mjd=mjd, plate=plate, fiberID=fiberID) # Load the HDU object fits_object = spec[0].get_fits() spectra.append(fits_object) # Save it to file filename = './temp/{}_{}_{}.fits'.format(mjd, plate, fiberID) filenames.append('{}_{}_{}'.format(mjd, plate, fiberID)) try: fits_object.writeto(filename) except OSError: print('Spectrum already saved. Skipping') _log('Spectrum {} already saved. Skipping'.format(filename)) return spectra, filenames
ef4cfc10f84fc3489ed5e129fa7e642bfd52e779
3,634,944
def serialize_gs_channel(gs_channel, exclude_fields=None): """JSON serializer Serializes the given groundstation channel. :param gs_channel: The Ground Station channel object to be serialized :param exclude_fields: List of fields to be excluded from the object :return: JSON serialization """ if not exclude_fields: exclude_fields = ['id', 'groundstation'] return serialize( gs_channel, camelcase=True, exclude=exclude_fields, related={ 'band': { 'fields': [ 'IARU_allocation_minimum_frequency', 'IARU_allocation_maximum_frequency', 'uplink', 'downlink' ], 'aliases': { 'min_freq': 'IARU_allocation_minimum_frequency', 'max_freq': 'IARU_allocation_maximum_frequency' } }, 'modulations': { 'fields': ['modulation'] }, 'bandwidths': { 'fields': ['bandwidth'] }, 'bitrates': { 'fields': ['bitrate'] }, 'polarizations': { 'fields': ['polarization'] } } )
a57dc6b67939e23b853f60e847ed9a4aac27338d
3,634,945
def get_is_valid_node_name(name): """get_is_valid_node_name(std::string name) -> bool""" return _RMF.get_is_valid_node_name(name)
0dda27e75692c876888b3c72135664d82dccbbdf
3,634,946
def split_multi_expr_clause(s): """ Transforms "abc, (123 + 1) * 2, f(a,b)" into ["abc", "(123 + 1) * 2", "f(a,b)"] """ sin = list(s) sep = [-1] rb = 0 # () cb = 0 # {} sb = 0 # [] for i in range(len(sin)): c = sin[i] if c == "(": rb = rb + 1 elif c == ")": rb = rb - 1 elif c == "{": cb = cb + 1 elif c == "}": cb = cb - 1 elif c == "[": sb = sb + 1 elif c == "]": sb = sb - 1 elif c == "," and rb == 0 and cb == 0 and sb == 0: sep.append(i) sep.append(None) parts = [s[sep[i] + 1 : sep[i + 1]].strip() for i in range(len(sep) - 1)] return parts
d0adb1334715afa63f7ad453492ab2ef524046c0
3,634,947
import os import torch import pickle def load(experiment: Experiment, key: str) -> list: """Load kwarg from experiments. Parameters ---------- experiment: Experiment. Experiment meata-data. key: str. Key to load. Returns ------- data: list of data. """ save_dir = experiment.log_dir values = [] files = list(filter(lambda x: key in x, os.listdir(save_dir))) for file_name in files: if key == 'model': if file_name[-2:] == 'pt': if experiment.configs == {}: configs = load(experiment, 'experiment')[0].configs else: configs = experiment.configs configs.get('model', {}).pop('name', {}) dataset = get_dataset(experiment.dataset) model = get_model(experiment.model, dataset.dim_outputs, dataset.dim_inputs, **configs.get('model', {})) model.load_state_dict(torch.load(save_dir + file_name)) values.append(model) else: with open(save_dir + file_name, 'rb') as file: val = pickle.load(file) values.append(val) return values
add72e7ecdaf59bda8a7f1c4aaa1b399b48c0a92
3,634,948
def setRCmatrix(m, p): """Random generate matrix ids to set zeros. Parameters ---------- m : integer Number of rows/columns of square matrix p : float (0. < p < 1.0) percent of entries in matrix that set to zero. Returns ------- rowsZeros, columnsZeros : Tuple Rows and columns id of matrix that will be set to zero """ idZeros = np.random.choice(np.arange(m*m), int(p*m*m), replace=False) rowsZeros = [] columnsZeros = [] for i in idZeros: if i > m-1: rowsZeros.append(i//m) columnsZeros.append(i%m) else: rowsZeros.append(i) columnsZeros.append(1) return (rowsZeros, columnsZeros)
aa56003f7b916cd9fcffb3db71c0bcc4cf6f8f55
3,634,949
def sections(parsed): """Calculates number of every type of section""" num_small_sections = 0 num_medium_sections = 0 num_big_sections = 0 for fence in parsed.fences: if not fence.isRemoval: num_big_sections += (fence.length/12) // 8 if (fence.length/12) % 8 < 6 and (fence.length/12) % 8 > 0: num_small_sections += 1 if (fence.length/12) % 8 > 6: num_medium_sections += 1 num_sections = num_small_sections + num_medium_sections + num_big_sections return num_small_sections, num_medium_sections, num_big_sections, num_sections
67bf9328af627234d7dd2fc4bf6dfb11911f9985
3,634,950
def idn(sp): """ Identity channel sp -> sp on space sp; it does nothing """ return Channel(np.eye(_prod(sp.shape)), sp, sp)
d74c6a413e89604dae70244d746018c0ebf749c8
3,634,951
def permutation_test(x1, x2, times=1000, sides=2, metrics="mean", seed=None): """ Permutation test: whether group x1 has equal [mean|median] as group x2 Parameters ---------- x1: array or list of int/float samples for variable x1 x2: array or list of int/float samples for variable x2 times: int number of iterations, better <= 10^5 sides: int, 1 or 2 one-sided p value or two-sided p-value metrics: str the metrics: mean, median seed: int or None the seed for permutation Returns ------- (deta, pval): float, float the difference between two groups and the p value Examples -------- >>>x1 = np.random.normal(0,1,100) >>>x2 = np.random.normal(1,2,100) >>>permutation_test(x1, x2) """ x1 = np.array(x1) x2 = np.array(x2) x1 = x1[x1 == x1] x2 = x2[x2 == x2] if metrics == "median": diff_obs = np.median(x1) - np.median(x2) else: diff_obs = np.mean(x1) - np.mean(x2) if seed is not None: np.random.seed(seed) cnt = 0 xx = np.append(x1, x2) diff_all = np.zeros(times) for i in range(times): # np.random.shuffle(xx) xx = np.random.permutation(xx) if metrics == "median": diff_all[i] = np.median(xx[:len(x1)]) - np.median(xx[len(x1):]) else: diff_all[i] = np.mean(xx[:len(x1)]) - np.mean(xx[len(x1):]) if sides == 2: pval = np.mean(np.abs(diff_all) >= np.abs(diff_obs)) else: pval = np.mean(diff_all >= np.abs(diff_obs)) return diff_obs, pval
2faada99276d65bbf92beabd7ef54fa475731efd
3,634,952
from pathlib import Path def build_tourism(image_set, args): """ image_set: whether to return train, val or test dataset """ all_imagepaths = sorted(Path(args.image_folder).glob("*.jpg")) all_poses = np.load(args.c2w_path) all_kinvs = np.load(args.kinv_path) all_bounds = np.load(args.bound_path) # first 20 images are test, next 5 for validation and the rest for training. # https://github.com/tancik/learnit/issues/3 splits = { "test": (all_imagepaths[:20], all_poses[:20], all_kinvs[:20], all_bounds[:20]), "val": (all_imagepaths[20:25], all_poses[20:25], all_kinvs[20:25], all_bounds[20:25]), "train": (all_imagepaths[25:], all_poses[25:], all_kinvs[25:], all_bounds[25:]) } imagepaths, poses, kinvs, bounds = splits[image_set] dataset = TourismDataset(imagepaths, poses, kinvs, bounds) return dataset
186568140fb52ddc39d428f0abb748e87470b5af
3,634,953
def find_package(app_name, version, revision): """Check for a specific package version""" # NOTE: Originally this method also used 'pkg_type' (the 'builder' # column in the 'packages' table) to filter; this may need to be # re-added at some point. pkg_def = find_package_definition(app_name) if pkg_def is None: return None try: return (Session.query(Package) .filter_by(pkg_name=pkg_def.pkg_name) .filter_by(version=version) .filter_by(revision=revision) .one()) except sqlalchemy.orm.exc.NoResultFound: return None
1d0865aa055640d541a4ce6cdcb78fa66a30785f
3,634,954
from typing import List def _group_by_internal_name(ports: List[WrapperPort]): """Group ports by their 'internal_name' attribute return a list of (internal_name, group) where group is a list of ports """ ports.sort(key=lambda x: x.internal_name) instances = [(name, list(group)) for name, group in groupby(ports, lambda x: x.internal_name)] return instances
112929ccaac00d6b6fdb6ae912c3ad37ed2d8470
3,634,955
def homography_warp(patch, dst_H_src, dsize, points=None, padding_mode='zeros'): """ .. note:: Functional API for :class:`torgeometry.HomographyWarper` Warps patches by homographies. Args: patch (Tensor): The image or tensor to warp. Should be from source. dst_homo_src (Tensor): The homography or stack of homographies from source to destination. dsize (tuple): The height and width of the image to warp. points (Tensor): Tensor[3, N] of homogeneous points in normalized image space [-1, 1] to sample. Optional parameter. padding_mode (string): Either 'zeros' to replace out of bounds with zeros or 'border' to choose the closest border data. Return: Tensor: Patch sampled at locations from source to destination. Shape: - Input: :math:`(N, C, H, W)` and :math:`(N, 3, 3)` - Output: :math:`(N, C, H, W)` Example: >>> input = torch.rand(1, 3, 32, 32) >>> homography = torch.eye(3).view(1, 3, 3) >>> output = tgm.homography_warp(input, homography, (32, 32)) # NxCxHxW """ height, width = dsize warper = HomographyWarper(height, width, points) return warper(patch, dst_H_src, padding_mode)
2a984b0900ecba6e0754312d59e371e4dafc3b67
3,634,956
def html2text(value): """ Uses html2text to convert HTML to text... """ return _html2text.html2text(value)
94d630eafc5433c702805e4dcdf189b6cbe8e318
3,634,957
def column_metadata(column, table_proxy, table_schema, chunks, exemplar_row=0): """ Infers column metadata for the purposes of creating dask arrays that reference their contents. Parameters ---------- column : string Table column table_proxy : string CASA Table path table_schema : dict Table schema chunks : dict of tuple of ints :code:`{dim: chunks}` mapping exemplar_row : int, optional Table row accessed when inferring a shape and dtype from a getcol. Returns ------- shape : tuple Shape of column (excluding the row dimension). For example :code:`(16, 4)` dims : tuple Dask dimension schema. For example :code:`("chan", "corr")` dim_chunks : list of tuples Dimension chunks. For example :code:`[chan_chunks, corr_chunks]`. dtype : :class:`numpy.dtype` Column data type (numpy) Raises ------ ColumnMetadataError Raised if inferring metadata failed. """ coldesc = table_proxy.getcoldesc(column).result() dtype = infer_dtype(column, coldesc) # missing ndim implies only row dimension ndim = coldesc.get('ndim', 'row') try: option = coldesc['option'] except KeyError: raise ColumnMetadataError("Column '%s' has no option " "in the column descriptor" % column) # Each row is a scalar # TODO(sjperkins) # Probably could be handled by getCell/putCell calls, # but the effort may not be worth it if ndim == 0: raise ColumnMetadataError("Scalars in column '%s' " "(ndim == %d) are not currently handled" % (column, ndim)) # Only row dimensions elif ndim == 'row': shape = () # FixedShape elif option & 4: try: shape = tuple(coldesc['shape']) except KeyError: raise ColumnMetadataError("'%s' column descriptor option '%d' " "specifies a FixedShape but no 'shape' " "attribute was found in the " "column descriptor" % (column, option)) # Variably shaped... else: try: # Get an exemplar row and infer the shape exemplar = table_proxy.getcell(column, exemplar_row).result() except Exception as e: raise ColumnMetadataError("Unable to infer shape of " "column '%s' due to:\n'%s'" % (column, str(e))) # Try figure out the shape if isinstance(exemplar, np.ndarray): shape = exemplar.shape # Double-check the dtype if dtype != exemplar.dtype: raise ColumnMetadataError("Inferred dtype '%s' does not match " "the exemplar dtype '%s'" % (dtype, exemplar.dtype)) elif isinstance(exemplar, list): shape = (len(exemplar),) assert dtype == object else: raise ColumnMetadataError(f"Unhandled exemplar type " f"'{type(exemplar)}'") # NOTE(sjperkins) # -1 implies each row can be any shape whatsoever # Log a warning if ndim == -1: log.warning("The shape of column '%s' is unconstrained " "(ndim == -1). Assuming shape is %s from " "exemplar", column, shape) # Otherwise confirm the shape and ndim elif len(shape) != ndim: raise ColumnMetadataError("'ndim=%d' in column descriptor doesn't " "match shape of exemplar=%s" % (ndim, shape)) # Extract dimension schema try: dims = table_schema[column]['dims'] except KeyError: dims = tuple("%s-%d" % (column, i) for i in range(1, len(shape) + 1)) dim_chunks = [] # Infer chunking for the dimension for s, d in zip(shape, dims): try: dc = chunks[d] except KeyError: # No chunk for this dimension, set to the full extent dim_chunks.append((s,)) else: dc = da.core.normalize_chunks(dc, shape=(s,)) dim_chunks.append(dc[0]) if not (len(shape) == len(dims) == len(dim_chunks)): raise ColumnMetadataError("The length of shape '%s' dims '%s' and " "dim_chunks '%s' do not agree." % (shape, dims, dim_chunks)) return ColumnMetadata(shape, dims, dim_chunks, dtype)
bdee9efff59b404f5da37500b494b561b93552bb
3,634,958
from datetime import datetime import pytz def generate_setup(): """Used to initially populate the database with sample data""" user1 = User(username="user", password=pbkdf2_sha256.hash("pass")) user2 = User(username="user2", password=pbkdf2_sha256.hash("pass")) user3 = User(username="user3", password=pbkdf2_sha256.hash("pass")) feed_dt = datetime(year=2020, month=9, day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=pytz.utc) item_dt = datetime(year=2020, month=9, day=9, hour=0, minute=0, second=0, microsecond=0, tzinfo=pytz.utc) feed1 = Feed(id=1, url="https://feeds.feedburner.com/tweakers/mixed", parser="html5lib", time_format="%a, %d %b %Y %H:%M:%S %Z", last_updated=feed_dt) feed2 = Feed(id=2, url="http://www.nu.nl/rss/Algemeen", parser="lxml", time_format="%a, %d %b %Y %H:%M:%S %z", last_updated=feed_dt) follows1 = Follows(username=user1.username, feed_id=feed1.id) follows2 = Follows(username=user2.username, feed_id=feed2.id) item1 = FeedItem( id=1, url="https://stackoverflow.com/", title="Item 1", description="Desc 1", feed_id=1, published=item_dt) item2 = FeedItem( id=2, url="https://www.quora.com/", title="Item 2", description="Desc 2", feed_id=1, published=item_dt) item3 = FeedItem( id=3, url="https://stackoverflow.com/", title="Item 3", description="Desc 3", feed_id=2, published=item_dt) item4 = FeedItem( id=4, url="https://www.quora.com/", title="Item 4", description="Desc 4", feed_id=2, published=item_dt) unread1 = Unread(username=user1.username, item_id=1, feed_id=1) read1 = Read(username=user1.username, item_id=2, feed_id=1) unread2 = Unread(username=user2.username, item_id=3, feed_id=2) read2 = Read(username=user2.username, item_id=4, feed_id=2) return [ user1, user2, user3, feed1, feed2, follows1, follows2, item1, item2, item3, item4, unread1, unread2, read1,read2]
5b8e5b405fde7c00e14a9f39a028b9cf59bdb036
3,634,959
def GPS_VO_Merge_plot(T_v_dict, utm_dict): """ Plot the VO and GPS trajectories. The GPS trajectory is rotated and translated to the origin in order to obtain a visual comparison between both trajectories.""" k = T_v_dict.keys() + utm_dict.keys() k = [i for i in unique_everseen([i for i in k if k.count(i) > 1])] T_v, gps_orig = [], [] for key in k: T_v.append(T_v_dict[key]) gps_orig.append(utm_dict[key]) # Retrieving the GPS coordinates into a list # Shifting the trajectory to the origin utm_dx = gps_orig[0][0] utm_dy = gps_orig[0][1] gps = [(u[0] - utm_dx, u[1] - utm_dy) for u in gps_orig] # Scale factor from GPS to VO last_gps = gps[len(gps) - 1] last_vo = T_v[len(T_v) - 1] d_gps = math.sqrt((last_gps[0] ** 2) + (last_gps[1] ** 2)) d_VO = math.sqrt((last_vo[0] ** 2) + (last_vo[1] ** 2)) scale = d_gps / d_VO # Apply scale factor to the translation vectors T_v = [np.array(t) * scale for t in T_v] for i, t in enumerate(T_v): magn = 0 if i != 0: magn = np.linalg.norm((t - T_v[i - 1])) print i, t, math.sqrt((t[0] ** 2) + (t[1] ** 2)), magn # Obtaining the angle between the first points of each list: VO list and GPS list rotate_idx = min(len(T_v)-1, len(gps)-1) VO_v = np.array(T_v[rotate_idx]) GPS_v = np.array(gps[rotate_idx]) # Distance between points. d1 = math.sqrt((VO_v[0] - GPS_v[0]) ** 2 + (VO_v[1] - GPS_v[1]) ** 2) # Obtain the angle assuming the two points are vectors angle = math.acos((VO_v.dot(GPS_v)) / (np.linalg.norm(VO_v) * np.linalg.norm(GPS_v))) # Rotates the GPS point only for verification VO_v = rotateFunct([VO_v], angle) # Distance between points after rotation. d2 = math.sqrt((VO_v[0][0] - GPS_v[0]) ** 2 + (VO_v[0][1] - GPS_v[1]) ** 2) # Verify if points are closer after rotation if not rotate the other way if d2 < d1: sign = -1 else: sign = 1 # Rotating the GPS function so it aligns with the VO function T_v = rotateFunct(T_v, sign * angle) T_v = [np.array((t[0] + utm_dx, t[1] + utm_dy)) for t in T_v] # Dictionary key: image path, value: vo utm coordinates VO_dict = OrderedDict() for key, value in zip(k, T_v): VO_dict[key] = value # -------------------------------------------------- # Plotting the VO and GPS trajectories plt.figure(1) GPS, = plt.plot(*zip(*gps_orig), color='red', marker='o', label='GPS') pyMVO, = plt.plot(*zip(*T_v), marker='o', color='b', label='py-MVO') plt.legend(handles=[pyMVO, GPS]) # Set plot parameters and show it plt.axis('equal') plt.grid() plt.show() return VO_dict
5ee5425745219255d76c01aeb4a09384ac001642
3,634,960
import json def serialize_payload(payload): """Serialize a payload to a JSON string.""" return json.dumps(payload, default=_pack)
8b9392259532cb2e18bec201d9f4258a08ee26e7
3,634,961
def get_app_users(appname): """List users who has permissions to the specified app .. todo:: * write tests for this API * add example response """ app = _get_app(appname) return app.list_users()
4fbaed9e19086a0c354fd9e1a660e39975ca4c61
3,634,962
def is_power(num, return_decomposition=False): """ Check if num is a perfect power in O(n^3) time, n=ceil(logN) """ b = 2 while (2 ** b) <= num: a = 1 c = num while (c - a) >= 2: m = int((a + c) / 2) if (m ** b) < (num + 1): p = int(m ** b) else: p = int(num + 1) if int(p) == int(num): if return_decomposition: return True, int(m), int(b) else: return True if p < num: a = int(m) else: c = int(m) b = b + 1 if return_decomposition: return False, num, 1 else: return False
f12a3d5559e68eb72d8a920ee1e3fdfb9c813d3f
3,634,963
import ray from ray import tune from datetime import datetime import pytz def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" class Args: def __init__(self): self.horizon = 400 self.algo = 'PPO' self.randomize_vehicles = True args = Args() flow_params = submodule.make_flow_params(args, pedestrians=True) flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) policies_to_train = getattr(submodule, "policies_to_train", None) alg_run, gym_name, config = setup_exps_rllib( flow_params, flags.num_cpus, flags.num_rollouts, flags, policy_graphs, policy_mapping_fn, policies_to_train) config['num_workers'] = flags.num_cpus config['env'] = gym_name # create a custom string that makes looking at the experiment names easier def trial_str_creator(trial): return "{}_{}".format(trial.trainable_name, trial.experiment_tag) if flags.local_mode: print("LOCAL MODE") ray.init(local_mode=True) else: ray.init() exp_dict = { "run_or_experiment": alg_run, "name": flags.exp_title, "config": config, "checkpoint_freq": flags.checkpoint_freq, "checkpoint_at_end": True, 'trial_name_creator': trial_str_creator, "max_failures": 0, "stop": { "training_iteration": flags.num_iterations, }, } date = datetime.now(tz=pytz.utc) date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") s3_string = "s3://i210.experiments/i210/" \ + date + '/' + flags.exp_title if flags.use_s3: exp_dict['upload_dir'] = s3_string tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False)
7fce6f77d2a31b372be64c576f48dd9fb5e3430a
3,634,964
import re def validate_bucket_name(bucket_name): """ Validate bucket name Bucket name must be compatible with DNS name (RFC 1123): - Less than 63 characters - Valid character set [a-z0-9-] - Can not begin and end with "-" Returns Trues if valid, False otherwise """ if len(bucket_name) < 6 or len(bucket_name) > 63: return False if bucket_name.startswith("-") or bucket_name.endswith("-"): return False pattern = re.compile("^[0-9a-z]([0-9a-z-]{0,61})[0-9a-z]$") if not pattern.match(bucket_name): return False return True
1d759408d097143b93b0af172bf8e73fe02e283a
3,634,965
def format_gro_box(box): """ Print a line corresponding to the box vector in accordance with .gro file format @param[in] box Box NamedTuple """ if box.alpha == 90.0 and box.beta == 90.0 and box.gamma == 90.0: return ' '.join(["% 13.9f" % (i/10) for i in [box.a, box.b, box.c]]) else: return ' '.join(["% 13.9f" % (i/10) for i in [box.A[0], box.B[1], box.C[2], box.A[1], box.A[2], box.B[0], box.B[2], box.C[0], box.C[1]]])
61fd32e7bc9eb9a81b8276afd3e35eb1b32150a5
3,634,966
def bot_has_permissions(**perms: bool) -> AC: """Similar to :func:`.has_permissions` except checks if the bot itself has the permissions listed. This check raises a special exception, :exc:`.ApplicationBotMissingPermissions` that is inherited from :exc:`.ApplicationCheckFailure`. If this check is called in a DM context, it will raise an exception, :exc:`.ApplicationNoPrivateMessage`. """ invalid = set(perms) - set(nextcord.Permissions.VALID_FLAGS) if invalid: raise TypeError(f"Invalid permission(s): {', '.join(invalid)}") def predicate(interaction: Interaction) -> bool: guild = interaction.guild me = guild.me if guild is not None else interaction.client.user ch = interaction.channel try: permissions = ch.permissions_for(me) # type: ignore except AttributeError: raise ApplicationNoPrivateMessage() missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value] if not missing: return True raise ApplicationBotMissingPermissions(missing) return check(predicate)
8cc393bc5599f6234ea2cad3d316f09b1f725cb9
3,634,967
def get_ilsvrc_xception_trainner(config_file_path='./config/ilsvrc_2012_xception.yaml'): """ :param config_file_path: :return: """ cfg = config_utils.get_config(config_file_path=config_file_path) return base_trainner.BaseClsTrainner(cfg=cfg)
f12e83c5bf8e9a82d126ab01c0c4a15f95d6f295
3,634,968
def get_bpag(model: pd.DataFrame) -> tuple: """Calculate test statistics for heteroscedasticity Parameters ---------- model : OLS Model Model containing residual values. Returns ------- Test results from the Breusch-Pagan Test """ lm_stat, p_value, f_stat, fp_value = het_breuschpagan(model.resid, model.model.exog) return lm_stat, p_value, f_stat, fp_value
c371d663365e9b18b383e86bd4502a81a063438b
3,634,969
def name2link(name: str): """Used for hyperlink anchors""" if not isinstance(name, str): name = str(name) return "-".join([s.lower() for s in name.split(" ")])
357496a291dcb16a86f830551350ff77ca9de81c
3,634,970
def window_rolling(origin_data, window_size): """Rolling data over 0-dim. :param origin_data: ndarray of [n_records, ...] :param window_size: window_size :return: [n_records - window_size + 1, window_size, ...] """ n_records = len(origin_data) if n_records < window_size: return None data = origin_data[:, None] all_data = [] for i in range(window_size): all_data.append(data[i: (n_records - window_size + i + 1)]) # shape -> [n_records - window_size + 1, window_size, ...] rolling_data = np.hstack(all_data) return rolling_data
e5a8e30272098ea01ce939d21b245e7fc4a21018
3,634,971
def get_neighbors(grid, structure_num, proximity): """ Given a grid of structures, returns the closest proximity neighbors to the given structure params: - Grid: 2D numpy array - structure_num: int - proximity: int :returns - A list of neighboring structures to the current structure_num """ # Get the number of columns for ease of access width = len(grid) height = len(grid[0]) # We'll make it a set initially to avoid duplicate neighbors neighbors = set() for i in range(-proximity, proximity + 1): for j in range(-proximity, proximity + 1): if not (i == 0 and j == 0): x = min(max((structure_num // height) - i, 0), width - 1) y = min(max((structure_num % height) - j, 0), height - 1) if grid[x][y] != structure_num: neighbors.add(grid[x][y]) return list(neighbors)
4f62fb8f01beaeea32b8ae0b496e4e972e4cc74b
3,634,972
import re def vgg19_bn(pretrained=False, **kwargs): """VGG 19-layer model (configuration 'E') with batch normalization Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs) if pretrained: pretrained_dict = model_zoo.load_url(model_urls['vgg19_bn']) model_dict = model.state_dict() pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and not re.match(k, 'classifier.0.weight') and not re.match(k, 'classifier.6.weight') and not re.match(k, 'classifier.6.bias') } model_dict.update(pretrained_dict) model.load_state_dict(model_dict) return model
17427df13b43f456bfb7cce1d4e2be94c2673b85
3,634,973
def init_ss_model() -> SentenceTransformer: """Load RoBERTa-base model.""" return SentenceTransformer( "usc-isi/sbert-roberta-large-anli-mnli-snli", cache_folder=str(PRETRAINED_MODEL_DIR) )
1cbc11b0def3a81edbd06d8297964f9360713243
3,634,974
def flatten_dic(dic): """ Flatten dictionnary with nested keys into a single level : usable in a dataframe Args: dic -- a dictionnary Returns: out -- the flatenned dictionnary (df(out) is a Series) """ out = {} def flatten(x, name=""): """ Recursive axuiliary function Args: x -- an element (a dict, a list, or a value) name -- The inherited parent naming """ if type(x) is dict: for a in x: flatten(x[a], name + a + "_") elif type(x) is list: i = 0 for a in x: flatten(a, name + str(i) + "_") i += 1 else: out[name[:-1]] = x flatten(dic) return out
60f7caa27a2cf909ad426336bda06fcd2da127f6
3,634,975
def policy_eval(policy, env, discount_factor=1.0, epsilon=0.00001): """ Evaluate a policy given an environment and a full description of the environment's dynamics. Args: policy: [S, A] shaped matrix representing the policy. env: OpenAI env. env.P represents the transition probabilities of the environment. env.P[s][a] is a list of transition tuples (prob, next_state, reward, done). env.nS is a number of states in the environment. env.nA is a number of actions in the environment. theta: We stop evaluation once our value function change is less than theta for all states. discount_factor: Gamma discount factor. Returns: Vector of length env.nS representing the value function. """ # Start with a random (all 0) value function V = np.zeros(env.nS) while True: #old value function V_old = np.zeros(env.nS) #stopping condition delta = 0 #loop over state space for s in range(env.nS): #To accumelate bellman expectation eqn Q = 0 #get probability distribution over actions action_probs = policy[s] #loop over possible actions for a in range(env.nA): #get transitions [(prob, next_state, reward, done)] = env.P[s][a] #apply bellman expectatoin eqn Q += action_probs[a] * (reward + discount_factor * V[next_state]) #get the biggest difference over state space delta = max(delta, abs(Q - V[s])) #update state-value V_old[s] = Q #the new value function V = V_old #if true value function if(delta < epsilon): break return np.array(V)
ce36895abdb0e176f8f3af9b3a72501479cbec3a
3,634,976
def split_data_target(element, device, logger=None): """Split elements in dataloader according to pre-defined rules.""" if not (isinstance(element, list) or isinstance(element, tuple)): msg = ( "Invalid dataloader, please check if the input dataloder is valid." ) if logger: logger.error(msg) raise ValueError(msg) if len(element) == 2: # Dataloader with one input and one target data, target = element[0], element[1] return [data.to(device)], target.to(device) # tensor -> list elif len(element) > 2: # Dataloader with multiple inputs and one target data, target = element[:-1], element[-1] data_device = [tensor.to(device) for tensor in data] return data_device, target.to(device) else: # Dataloader with invalid input msg = ( "The input dataloader should at least contain two tensors - data" " and target." ) if logger: logger.error(msg) raise ValueError(msg)
2aa0a5c4d80aae2dc237ba9f87c11a7fc7e206fd
3,634,977
from typing import List def get_dataset_access_list(dataset: str, access_type: str) -> List[str]: """Get the comma-separated list of members of a dataset's {access_type} group.""" deploy_config = get_deploy_config() membership_key = f"{dataset}-{access_type}-members-cache" group_membership = deploy_config.read_dataset_config(dataset, membership_key) return group_membership.split(",")
3081952eedff20393dca3e56290cbb65cdd8230a
3,634,978
from typing import List def deploy_whitelist_to_constraints( deploy_whitelist: DeployWhitelist, ) -> List[Constraint]: """Converts a whitelist of locations into marathon appropriate constraints https://mesosphere.github.io/marathon/docs/constraints.html#like-operator :param deploy_whitelist: List of lists of locations to whitelist :returns: List of lists of constraints """ if deploy_whitelist is not None: (region_type, regions) = deploy_whitelist regionstr = "|".join(regions) return [[region_type, "LIKE", regionstr]] return []
f20bd167b938ada0e4cee0c613c07f3d59c72063
3,634,979
def coerce_types(T1, T2): """Coerce types T1 and T2 to a common type. Coercion is performed according to this table, where "N/A" means that a TypeError exception is raised. +----------+-----------+-----------+-----------+----------+ | | int | Fraction | Decimal | float | +----------+-----------+-----------+-----------+----------+ | int | int | Fraction | Decimal | float | | Fraction | Fraction | Fraction | N/A | float | | Decimal | Decimal | N/A | Decimal | float | | float | float | float | float | float | +----------+-----------+-----------+-----------+----------+ Subclasses trump their parent class; two subclasses of the same base class will be coerced to the second of the two. """ # Get the common/fast cases out of the way first. if T1 is T2: return T1 if T1 is int: return T2 if T2 is int: return T1 # Subclasses trump their parent class. if issubclass(T2, T1): return T2 if issubclass(T1, T2): return T1 # Floats trump everything else. if issubclass(T2, float): return T2 if issubclass(T1, float): return T1 # Subclasses of the same base class give priority to the second. if T1.__base__ is T2.__base__: return T2 # Otherwise, just give up. raise TypeError('cannot coerce types %r and %r' % (T1, T2))
7d412df0182ca6e1f43bfc6ce8e7c6ce1a738bed
3,634,980
def read_vecstim_protocol(protocol_name, protocol_definition, recordings, syn_locs): """Read Vecstim protocol from definitions. Args: protocol_name (str): name of the protocol protocol_definition (dict): dict containing the protocol data recordings (bluepyopt.ephys.recordings.CompRecording): recordings to use with this protocol syn_locs (list of ephys.locations.NrnPointProcessLocation): locations of the synapses Returns: emodelrunner.protocols.SweepProtocolCustom: a protocol containing Vecstim stimulus activating synapses """ stim_definition = protocol_definition["stimuli"] if stim_definition["vecstim_random"] not in [ "python", "neuron", ]: logger.warning( "vecstim random not set to 'python' nor to 'neuron' in config file." "vecstim random will be re-set to 'python'." ) stim_definition["vecstim_random"] = "python" stim = NrnVecStimStimulusCustom( syn_locs, stim_definition["syn_start"], stim_definition["syn_stop"], stim_definition["syn_stim_seed"], stim_definition["vecstim_random"], ) return SweepProtocolCustom(protocol_name, [stim], recordings)
05f23ac1e3c903796799cb088f83b9f0194d30ed
3,634,981
from typing import Tuple def get_pair_elements(pair: str) -> Tuple[str, str, str]: """ Get a currency pair's base, quote, and trade base pair. Eg. If the global trade base is 'USDT' and the pair is 'BTC-ETH', returns ('BTC', 'ETH', 'USDT-BTC'). Arguments: pair: The currency pair eg. 'BTC-ETH' Returns: A tuple containing: (str): The pair's base currency. (str): The pair's quote currency. (str): The pair's trade base pair. """ pair_split = pair.split('-') base = pair_split[0] quote = pair_split[1] trade_base_pair = '{}-{}'.format(config['trade_base'], base) return (base, quote, trade_base_pair)
abc838c50aaadd93d56b5bd3717aafaca4158ef2
3,634,982
from . import extensions from . import modules def create_app(**kwargs): """ Entry point to the Flask RESTful Server application. """ # Initialize the Flas-App app: Flask = Flask(__name__, **kwargs) # Load the config file app.config.from_object('config.DevelopmentConfig') # Initialize the API extensions extensions.init_app(app) # Initialize the actual API routes modules.init_app(app) return app
790c82eed799ebe8347e4cb6d9732604a22cd817
3,634,983
import os def GetCoastalDpaZones(kml_path=None): """Gets Coastal DPA zones. Coastal DPA zones are Dynamic Protection Area monitored through the use of ESC sensors. Args: kml_path: Optional path to the Coastal DPA KML. If unspecified, use the default one from the `data/ntia/` folder. Returns: A dict of DPA struct keyed by their names, each one holding following attributes: geometry: A |shapely.Polygon or Point| defining the DPA. protectionCritDbmPer10MHz: The protection threshold (dBm/10MHz). refHeightMeters: The radar antenna height (meters). antennaBeamwidthDeg: The antenna beamwidth (degrees). minAzimuthDeg: The radar min azimuth (degrees). maxAzimuthDeg: The radar max azimuth (degrees). catBNeighborDist: The CatB neighboring distance (km). """ global _coastal_dpa_zones global _coastal_dpa_path if _coastal_dpa_zones is None or kml_path != _coastal_dpa_path: _coastal_dpa_path = kml_path if kml_path is None: kml_path = os.path.join(CONFIG.GetNtiaDir(), COASTAL_DPA_ZONE_FILE) _coastal_dpa_zones = _LoadDpaZones(kml_path, COASTAL_DPA_PROPERTIES, fix_invalid=False) # fix_invalid to False to auto-detect issues with provided KML. return _coastal_dpa_zones
de27b33fae6f5ae259896023b41e43022606995c
3,634,984
def dy4(vector, g, m1, m2, L1, L2): """ Abbreviations M = m0 + m1 S = sin(y1 - y2) C = cos(y1 - y2) s1 = sin(y1) s2 = sin(y2) Equation y4' = g*M*[s2 - s1*C] - S*[M * L1 * y3^2 + C * m2 * L2 * y4^2] ------------------------------------------------------------- L2*[m2*C^2 - M] """ y1, y2, y3, y4 = vector M, S, C, s1, s2 = abbreviate(m1, m2, y1, y2) # Split up these awful equations for readability num_a = g*M*(s2-s1*C) num_b = S*(M*L1*y3**2 + C*m2*L2*y4**2) # WHY den = L2*(m2*C**2 - M) return (num_a - num_b)/den
680b253ced9c1faafb357eef83450d262391c885
3,634,985
from typing import OrderedDict def sort_dict(od, d): """Sort parameters (same order as xsd:sequence)""" if isinstance(od, dict): ret = OrderedDict() for k in od.keys(): v = d.get(k) # don't append null tags! if v is not None: if isinstance(v, dict): v = sort_dict(od[k], v) elif isinstance(v, list): v = [sort_dict(od[k][0], v1) for v1 in v] ret[k] = v return ret else: return d
6211a98d30e29ac9b5d0dcaeeec3ef76e9c95713
3,634,986
from matplotlib.colors import LinearSegmentedColormap def _center_cmap(cmap, vmin, vmax, name="cmap_centered"): """ Center given colormap (ranging from vmin to vmax) at value 0. Taken from MNE-Python v0.24, as it will be removed in MNE-Python v1.0. Parameters ---------- cmap : matplotlib.colors.Colormap The colormap to center around 0. vmin : float Minimum value in the data to map to the lower end of the colormap. vmax : float Maximum value in the data to map to the upper end of the colormap. name : str Name of the new colormap. Defaults to 'cmap_centered'. Returns ------- cmap_centered : matplotlib.colors.Colormap The new colormap centered around 0. Notes ----- This function can be used in situations where vmin and vmax are not symmetric around zero. Normally, this results in the value zero not being mapped to white anymore in many colormaps. Using this function, the value zero will be mapped to white even for asymmetric positive and negative value ranges. Note that this could also be achieved by re-normalizing a given colormap by subclassing matplotlib.colors.Normalize as described here: https://matplotlib.org/users/colormapnorms.html#custom-normalization-two-linear-ranges """ vzero = abs(vmin) / float(vmax - vmin) index_old = np.linspace(0, 1, cmap.N) index_new = np.hstack([ np.linspace(0, vzero, cmap.N // 2, endpoint=False), np.linspace(vzero, 1, cmap.N // 2), ]) colors = ("red", "green", "blue", "alpha") cdict = {name: [] for name in colors} for old, new in zip(index_old, index_new): for color, name in zip(cmap(old), colors): cdict[name].append((new, color, color)) return LinearSegmentedColormap(name, cdict)
de13fb933e16d3179f6b02be9c02862f353464b1
3,634,987
def port_number(worker_id): """A fixture that returns a different port for each parallel worker.""" i = 0 if worker_id != "master": i = int("".join([c for c in worker_id if c.isdigit()])) return PORTS[i]
8bf4e5936d5e2f83a0ca2bf973dfd24d0af1f950
3,634,988
def get_loss_f(**kwargs_parse): """Return the loss function given the argparse arguments.""" return Loss(lamlSum=kwargs_parse["lamlSum"], lamhSum=kwargs_parse["lamhSum"], lamL2norm=kwargs_parse["lamL2norm"], lamCMF=kwargs_parse["lamCMF"], lamConv=kwargs_parse["lamConv"], lamL1wave=kwargs_parse["lamL1wave"], lamL1attr=kwargs_parse["lamL1attr"])
03db5b8934ae9263bf3f0668f97d77c124bf58fb
3,634,989
def assert_greater_equal_v2(x, y, message=None, summarize=None, name=None): """Assert the condition `x >= y` holds element-wise. This Op checks that `x[i] >= y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If `x` is not greater or equal to `y` element-wise, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to "assert_greater_equal". Returns: Op that raises `InvalidArgumentError` if `x >= y` is False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x >= y` is False. The check can be performed immediately during eager execution or if `x` and `y` are statically known. """ return assert_greater_equal(x=x, y=y, summarize=summarize, message=message, name=name)
bc0ef67602cd0be4e6868971f821eacd48f5fb72
3,634,990
import itertools def pad_ends( sequence, pad_left=True, left_pad_symbol="<s>", right_pad_symbol="</s>" ): """ Pad sentence ends with start- and end-of-sentence tokens In speech recognition, it is important to predict the end of sentence and use the start of sentence to condition predictions. Typically this is done by adding special tokens (usually <s> and </s>) at the ends of each sentence. The <s> token should not be predicted, so some special care needs to be taken for unigrams. Arguments --------- sequence : iterator The sequence (any iterable type) to pad. pad_left : bool Whether to pad on the left side as well. True by default. left_pad_symbol : any The token to use for left side padding. "<s>" by default. right_pad_symbol : any The token to use for right side padding. "</s>" by deault. Returns ------- generator A generator that yields the padded sequence. Example ------- >>> for token in pad_ends(["Speech", "Brain"]): ... print(token) <s> Speech Brain </s> """ if pad_left: return itertools.chain( (left_pad_symbol,), tuple(sequence), (right_pad_symbol,) ) else: return itertools.chain(tuple(sequence), (right_pad_symbol,))
e4a341d1e777adab36ec0c0e7996e23203c53478
3,634,991
def client(): """ Create a client with authentication settings. """ cfg = get_config() url = f"ldap://{cfg['SERVER']['hostname']}:{cfg['SERVER']['port']}" client = LDAPClient(url) client.set_credentials( "SIMPLE", user=cfg["SIMPLEAUTH"]["user"], password=cfg["SIMPLEAUTH"]["password"] ) return client
911f56339f1b995d9addf0466f01dd6f6bf6ff4e
3,634,992
def _check_and_fire_deploy(job): """ Validates pre-conditions for deploy (hook status returned successfully) and triggers deploy for enabled deployers. :param job: Dictionary containing job parameters :return: job or AsyncResult """ # Check and fires deploy job_id = job['meta-info']['job-id'] search_params = create_search_parameters(job) store = get_store() check = check_ready(job) if check.get('failed'): store.update_state(job_id, JOB_STATE_FAILED) raise HooksFailed(check['failed']) elif check['pending']: store.add_event(EVENT_PENDING_HOOK, details={ 'pending-hooks': check['pending'] }, search_params=search_params) return job else: job_config = job['config'] deployers = job_config.get('deployers', {}) return chord( group( _deploy.si(job, deployer_name) for deployer_name, deployer in deployers.items() if deployer.get('enabled') and deployer.get('url') ), _job_complete.si(job), ).apply_async(interval=TASK_SETTINGS['DEPLOY_WAIT_RETRY_DELAY'])
cc38e88c8e2e43878eb9ce4bfe722c0018931320
3,634,993
def mark_errors_flipping(events): """ Marks error fractions """ single_errors = np.zeros(len(events) - 1) double_errors = np.zeros(len(events) - 2) for i in range(len(events) - 1): # A single error is associated with a qubit error if events[i] == events[i + 1]: single_errors[i] = 1 if i < (len(events) - 2): # two identical outcomes equal to one if events[i] == events[i + 2]: double_errors[i] = 1 return single_errors, double_errors
14e71e1e6947bca4382fd22c0ae714e359174476
3,634,994
import numpy def split_with_minimum_rt_distance(rts, min_rt_delta=0, random_state=None): """ Sample from a set ot retention times, so that the sampled rts have a minimum rt differences. :param rts: :param min_rt_delta: :param random_state: :return: """ # if min_rt_delta == 0: # return list(range(len(rts))) # Store old random state and set random state rs_old = numpy.random.get_state() numpy.random.seed(random_state) last_rt = -numpy.inf idc = [] for rt in numpy.unique(rts): if last_rt + min_rt_delta <= rt: sel = numpy.where(rts == rt)[0] idc.append (sel[numpy.random.randint(0,len(sel))]) last_rt = rt # Restore old random state numpy.random.set_state(rs_old) return idc
026adc9b8dc7f3be513a93275fb0ef0d4b7de615
3,634,995
from django.apps import apps def create_proxy_model(name, model_mixins, base_model, attrs=None, module=None): """ Create a Django Proxy Model on the fly, to be used by any Cascade Plugin. """ class Meta: proxy = True app_label = 'cmsplugin_cascade' name = str(name + 'Model') try: Model = apps.get_registered_model(Meta.app_label, name) except LookupError: bases = model_mixins + (base_model,) attrs = dict(attrs or {}, Meta=Meta, __module__=module) Model = type(name, bases, attrs) fake_proxy_models[name] = bases return Model
ff0b8216ff83ced0cd46da1adc237614fc9e6d85
3,634,996
from django.apps import apps def get_embed_video_model(): """ Get the embed video model from the ``WAGTAILEMBEDVIDEOS_EMBEDVIDEO_MODEL`` setting. Useful for developers making Wagtail plugins that need the embed video model. Defaults to the standard :class:`~wagtail_embed_videos.models.EmbedVideo` model if no custom model is defined. """ model_string = get_embed_video_model_string() try: return apps.get_model(model_string, require_ready=False) except ValueError: raise ImproperlyConfigured("WAGTAILEMBEDVIDEOS_EMBEDVIDEO_MODEL must be of the form 'app_label.model_name'") except LookupError: raise ImproperlyConfigured( "WAGTAILEMBEDVIDEOS_EMBEDVIDEO_MODEL refers to model '%s' that has not been installed" % model_string )
d691a0d14f209297338c2eb3e9542c5d5c5a9d61
3,634,997
def get_filetypes(key='type'): """Gets the list of possible filetypes from the filetype table Parameters ---------- key : {'type', 'filetype_id'}, optional Defaults to "type". Determines the format of the returned dict. Returns ------- dict If `key` is "type", dict is of the form {type: filetype_id} If `key` is "filetype_id", dict is of the form {filetype_id: type} """ con = SQLConnectionHandler() if key == 'type': cols = 'type, filetype_id' elif key == 'filetype_id': cols = 'filetype_id, type' else: raise QiitaDBColumnError("Unknown key. Pass either 'type' or " "'filetype_id'.") sql = 'select {} from qiita.filetype'.format(cols) return dict(con.execute_fetchall(sql))
0915680135b9460be44bbf3d127ca248637ff96b
3,634,998
def make_dataloader(folder_names, data_path, batch_size, task, isTrain = False): """This function takes in a list of folders with images in them, the root directory of these images, and a batchsize and turns them into a dataloader""" # added flag isTrain - only augment/transform training set, not validation/test set # Declare the transforms preprocessing_transforms = transforms.Compose([transforms.ToTensor()]) # Create the datasets pairs = build_image_label_pairs(folder_names, data_path, task) dataset = CarDataset(pairs, preprocessing_transforms, isTrain) # Create the dataloaders return DataLoader( dataset, batch_size=batch_size, num_workers=int(batch_size/2), shuffle=True )
1e73d30481aeca43f81a656799377fe5852e9e67
3,634,999