content
stringlengths
22
815k
id
int64
0
4.91M
def skip(line): """Returns true if line is all whitespace or shebang.""" stripped = line.lstrip() return stripped == '' or stripped.startswith('#!')
5,329,300
def get_dss_client(deployment_stage: str): """ Returns appropriate DSSClient for deployment_stage. """ dss_env = MATRIX_ENV_TO_DSS_ENV[deployment_stage] if dss_env == "prod": swagger_url = "https://dss.data.humancellatlas.org/v1/swagger.json" else: swagger_url = f"https://dss.{dss_env}.data.humancellatlas.org/v1/swagger.json" logger.info(f"ETL: Hitting DSS with Swagger URL: {swagger_url}") dss_config = hca.HCAConfig() dss_config['DSSClient'] = {} dss_config['DSSClient']['swagger_url'] = swagger_url client = hca.dss.DSSClient(config=dss_config) return client
5,329,301
def bounce_off(bounce_obj_rect: Rect, bounce_obj_speed, hit_obj_rect: Rect, hit_obj_speed): """ The alternative version of `bounce_off_ip`. The function returns the result instead of updating the value of `bounce_obj_rect` and `bounce_obj_speed`. @return A tuple (`new_bounce_obj_rect`, `new_bounce_obj_speed`) """ new_bounce_obj_rect = bounce_obj_rect.copy() new_bounce_obj_speed = bounce_obj_speed.copy() bounce_off_ip(new_bounce_obj_rect, new_bounce_obj_speed, hit_obj_rect, hit_obj_speed) return new_bounce_obj_rect, new_bounce_obj_speed
5,329,302
async def test_storage_is_removed_idempotent(hass): """Test entity map storage removal is idempotent.""" await setup_platform(hass) entity_map = hass.data[ENTITY_MAP] hkid = '00:00:00:00:00:01' assert hkid not in entity_map.storage_data entity_map.async_delete_map(hkid) assert hkid not in entity_map.storage_data
5,329,303
def Landsat_Reflect(Bands,input_folder,Name_Landsat_Image,output_folder,shape_lsc,ClipLandsat,Lmax,Lmin,ESUN_L5,ESUN_L7,ESUN_L8,cos_zn,dr,Landsat_nr, proyDEM_fileName): """ This function calculates and returns the reflectance and spectral radiation from the landsat image. """ Spec_Rad = np.zeros((shape_lsc[1], shape_lsc[0], 7)) Reflect = np.zeros((shape_lsc[1], shape_lsc[0], 7)) for band in Bands[:-(len(Bands)-6)]: # Open original Landsat image for the band number src_FileName = os.path.join(input_folder, '%s_B%1d.TIF' % (Name_Landsat_Image, band)) ls_data=Open_landsat(src_FileName, proyDEM_fileName) ls_data = ls_data*ClipLandsat # stats = band_data.GetStatistics(0, 1) index = np.where(Bands[:-(len(Bands)-6)] == band)[0][0] if Landsat_nr == 8: # Spectral radiance for each band: L_lambda = Landsat_L_lambda(Lmin, Lmax, ls_data, index, Landsat_nr) # Reflectivity for each band: rho_lambda = Landsat_rho_lambda(L_lambda, ESUN_L8, index, cos_zn, dr) elif Landsat_nr == 7: # Spectral radiance for each band: L_lambda=Landsat_L_lambda(Lmin, Lmax, ls_data, index, Landsat_nr) # Reflectivity for each band: rho_lambda = Landsat_rho_lambda(L_lambda, ESUN_L7, index, cos_zn, dr) elif Landsat_nr == 5: # Spectral radiance for each band: L_lambda=Landsat_L_lambda(Lmin, Lmax, ls_data, index, Landsat_nr) # Reflectivity for each band: rho_lambda =Landsat_rho_lambda(L_lambda, ESUN_L5, index, cos_zn, dr) else: print('Landsat image not supported, use Landsat 5, 7 or 8') Spec_Rad[:, :, index] = L_lambda Reflect[:, :, index] = rho_lambda Reflect = Reflect.clip(0.0, 1.0) return(Reflect,Spec_Rad)
5,329,304
def plot_footprint(img_file_name, camera_file, reference_dem, output_directory=None, basemap='ctx', cam_on=True, verbose=False): # TODO # - Add tsai camera plotting. """ Function to plot camera footprints. """ out_dir_abs = bare.io.create_dir(output_directory) img_base_name = os.path.splitext(os.path.split(img_file_name)[-1])[0] cam_extension = os.path.splitext(camera_file)[-1] footprint_polygon = prepare_footprint(img_file_name, camera_file, reference_dem) if type(footprint_polygon) == gpd.geodataframe.GeoDataFrame: print('Plotting camera footprint.') if basemap == 'ctx': footprint_polygon = footprint_polygon.to_crs(epsg=3857) footprint_polygon = bare.geospatial.extract_polygon_centers(footprint_polygon) fig, ax = plt.subplots(1,figsize=(10,10)) footprint_polygon.plot(ax=ax, facecolor="none", edgecolor='b') if cam_on == True: if cam_extension == '.xml': ax.set_title('camera footprint and scanner positions') camera_positions = bare.core.wv_xml_to_gdf(camera_file) if basemap == 'ctx': camera_positions = camera_positions.to_crs(epsg=3857) # add coordinates as seperate columns to gdf bare.geospatial.extract_gpd_geometry(camera_positions) # annotate start and end of aquisition plt.annotate(s='start', xy=(camera_positions.iloc[0].x, camera_positions.iloc[0].y), horizontalalignment='center') plt.annotate(s='end', xy=(camera_positions.iloc[-1].x, camera_positions.iloc[-1].y), horizontalalignment='center') elif cam_extension == '.tsai': ax.set_title('camera footprint and position') camera_positions = bare.core.tsai_to_gdf(camera_file) if basemap == 'ctx': camera_positions = camera_positions.to_crs(epsg=3857) # # Not sure if this is useful to be labeled for tsai. # # add coordinates as seperate columns to gdf # bare.geospatial.extract_gpd_geometry(camera_positions) # # annotate camera position # plt.annotate(s='camera position', # xy=(camera_positions.iloc[-1].x, camera_positions.iloc[-1].y), # horizontalalignment='center') if basemap == 'ctx': camera_positions = camera_positions.to_crs(epsg=3857) camera_positions.plot(ax=ax,marker='.',color='b') line0, line1, line2, line3 = plot_cam(footprint_polygon, camera_positions, basemap=basemap, camera_type='.xml') line0.plot(ax=ax,color='b') line1.plot(ax=ax,color='b') line2.plot(ax=ax,color='b') line3.plot(ax=ax,color='b') else: ax.set_title('camera footprint') if basemap == 'ctx': ctx.add_basemap(ax) for idx, row in footprint_polygon.iterrows(): plt.annotate(s=row['file_name'], xy=row['polygon_center'], horizontalalignment='center') if out_dir_abs is not None: out = os.path.join(out_dir_abs, img_base_name+'_footprint.png') fig.savefig(out, bbox_inches = "tight") plt.close() else: plt.show() else: pass
5,329,305
def second(lst): """Same as first(nxt(lst)). """ return first(nxt(lst))
5,329,306
def gaussian_noise(height, width): """ Create a background with Gaussian noise (to mimic paper) """ # We create an all white image image = np.ones((height, width)) * 255 # We add gaussian noise cv2.randn(image, 235, 10) return Image.fromarray(image).convert("RGBA")
5,329,307
def draw_box(image, box, color): """Draw 3-pixel width bounding boxes on the given image array. color: list of 3 int values for RGB. """ y1, x1, y2, x2 = box image[y1:y1 + 1, x1:x2] = color image[y2:y2 + 1, x1:(x2+1)] = color image[y1:y2, x1:x1 + 1] = color image[y1:y2, x2:x2 + 1] = color return image
5,329,308
def get_statuses_one_page(weibo_client, max_id=None): """获取一页发布的微博 """ if max_id: statuses = weibo_client.statuses.user_timeline.get(max_id=max_id) else: statuses = weibo_client.statuses.user_timeline.get() return statuses
5,329,309
def do_nothing(ax): """Do not add any watermark.""" return ax
5,329,310
def exec_lm_pipe(taskstr): """ Input: taskstr contains LM calls separated by ; Used for execute config callback parameters (IRQs and BootHook) """ try: # Handle config default empty value (do nothing) if taskstr.startswith('n/a'): return True # Execute individual commands - msgobj->"/dev/null" for cmd in (cmd.strip().split() for cmd in taskstr.split(';')): if not exec_lm_core_schedule(cmd): console_write("|-[LM-PIPE] task error: {}".format(cmd)) except Exception as e: console_write("[IRQ-PIPE] error: {}\n{}".format(taskstr, e)) errlog_add('exec_lm_pipe error: {}'.format(e)) return False return True
5,329,311
def term_size(): """Print out a sequence of ANSI escape code which will report back the size of the window. """ # ESC 7 - Save cursor position # ESC 8 - Restore cursor position # ESC [r - Enable scrolling for entire display # ESC [row;colH - Move to cursor position # ESC [6n - Device Status Report - send ESC [row;colR repl= None if 'repl_source' in dir(pyb): repl = pyb.repl_source() if repl is None: repl = pyb.USB_VCP() repl.send(b'\x1b7\x1b[r\x1b[999;999H\x1b[6n') pos = b'' while True: char = repl.recv(1) if char == b'R': break if char != b'\x1b' and char != b'[': pos += char repl.send(b'\x1b8') (height, width) = [int(i, 10) for i in pos.split(b';')] return height, width
5,329,312
def fix_whitespace(fname): """ Fix whitespace in a file """ with open(fname, "rb") as fo: original_contents = fo.read() # "rU" Universal line endings to Unix with open(fname, "rU") as fo: contents = fo.read() lines = contents.split("\n") fixed = 0 for k, line in enumerate(lines): new_line = line.rstrip() if len(line) != len(new_line): lines[k] = new_line fixed += 1 with open(fname, "wb") as fo: fo.write("\n".join(lines)) if fixed or contents != original_contents: print("************* %s" % os.path.basename(fname)) if fixed: slines = "lines" if fixed > 1 else "line" print("Fixed trailing whitespace on %d %s" % (fixed, slines)) if contents != original_contents: print("Fixed line endings to Unix (\\n)")
5,329,313
def group_sharing(msg): """ 处理群分享 :param msg: :return: """ if not (config.IS_LISTEN_GROUP and config.IS_LISTEN_SHARING): return group = msg.chat.name if group not in DataBase.all_listen_group(): return sender = msg.member.name msg.forward(bot.file_helper, prefix='【{0}】:【{1}】群成员【{2}】分享了:'.format( msg.receive_time.strftime("%Y-%m-%d %H:%M:%S"), group, sender)) return
5,329,314
def get_dashboard(request, project_id): """ Load Project Dashboard to display Latest Cost Estimate and List of Changes """ project = get_object_or_404(Project, id=project_id) # required to determine permission of user, # if not a project user then project owner try: project_user = ProjectUser.objects.get( project=project, project_user=request.user) except ProjectUser.DoesNotExist: project_user = None form = ChangeForm() attachmentsForm = ChangeAttachmentsForm() changes = Change.objects.filter(project_id=project_id) # Calculations to display on dashboard original_estimate = project.original_estimate accepted_changes = Change.objects.filter( project_id=project_id, change_status="A").aggregate( Sum('change_cost'))['change_cost__sum'] if accepted_changes is None: accepted_changes = 0 pending_changes = Change.objects.filter( project_id=project_id, change_status="P").aggregate( Sum('change_cost'))['change_cost__sum'] if pending_changes is None: pending_changes = 0 wip_changes = Change.objects.filter( project_id=project_id, change_status="WiP").aggregate( Sum('change_cost'))['change_cost__sum'] if wip_changes is None: wip_changes = 0 rejected_changes = Change.objects.filter( project_id=project_id, change_status="R").aggregate( Sum('change_cost'))['change_cost__sum'] if rejected_changes is None: rejected_changes = 0 subtotal = original_estimate + accepted_changes total = subtotal + pending_changes + wip_changes context = { 'project': project, 'project_user': project_user, 'form': form, 'attachmentsForm': attachmentsForm, 'changes': changes, 'original_estimate': original_estimate, 'accepted_changes': accepted_changes, 'pending_changes': pending_changes, 'wip_changes': wip_changes, 'rejected_changes': rejected_changes, 'subtotal': subtotal, 'total': total, } return render(request, 'dashboard/project.html', context)
5,329,315
def _scan_real_end_loop(bytecode, setuploop_inst): """Find the end of loop. Return the instruction offset. """ start = setuploop_inst.next end = start + setuploop_inst.arg offset = start depth = 0 while offset < end: inst = bytecode[offset] depth += inst.block_effect if depth < 0: return inst.next offset = inst.next
5,329,316
def six_node_range_5_to_0_bst(): """Six nodes covering range five to zero.""" b = BST([5, 4, 3, 2, 1, 0]) return b
5,329,317
def hello_datamine(name: str = 'student') -> None: """Prints a hello message to a Data Mine student. Args: str (name, optional): The name of a student. Defaults to 'student'. """ msg = f'Hello {name}! Welcome to The Data Mine!' print(msg)
5,329,318
def IndividualsInAlphabeticOrder(filename): """Checks if the names are in alphabetic order""" with open(filename, 'r') as f: lines = f.readlines() individual_header = '# Individuals:\n' if individual_header in lines: individual_authors = lines[lines.index(individual_header) + 1:] sorted_authors = sorted(individual_authors, key=str.casefold) if sorted_authors == individual_authors: print("Individual authors are sorted alphabetically.") return True else: print("Individual authors are not sorted alphabetically." " The expected order is:") print(''.join(sorted_authors)) return False else: print("Cannot find line '# Individuals:' in file.") return False
5,329,319
def preprocess_img_imagenet(img_path): """Preprocessing required for ImageNet classification. Reference: https://github.com/onnx/models/tree/master/vision/classification/vgg """ import mxnet from mxnet.gluon.data.vision import transforms from PIL import Image img = Image.open(img_path) img = mxnet.ndarray.array(img) transform_fn = transforms.Compose( [ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ] ) img = transform_fn(img) img = img.expand_dims(axis=0) # Batchify. return img.asnumpy()
5,329,320
def create_lambertian(color): """ create a lambertion material """ material = bpy.data.materials.new(name="Lambertian") material.use_nodes = True nodes = material.node_tree.nodes # remove principled material.node_tree.nodes.remove( material.node_tree.nodes.get('Principled BSDF')) # get material output material_output = material.node_tree.nodes.get('Material Output') # Add a diffuse shader and set its location: diffuse_node = nodes.new('ShaderNodeBsdfDiffuse') diffuse_node.inputs['Color'].default_value = color # link diffuse shader to material material.node_tree.links.new( material_output.inputs[0], diffuse_node.outputs[0]) return material
5,329,321
def submission_view(request, locker_id, submission_id): """Displays an individual submission""" submission = get_object_or_404(Submission, pk=submission_id) newer = submission.newer() newest = Submission.objects.newest(submission.locker) if not newest: newest = submission oldest = Submission.objects.oldest(submission.locker) if not oldest: oldest = submission older = submission.older() discussion_enabled = submission.locker.discussion_enabled() is_owner = submission.locker.owner == request.user users_discussion = submission.locker.discussion_users_have_access() users_workflow = submission.locker.workflow_users_can_edit() workflow_enabled = submission.locker.workflow_enabled() # generate a message to the user if the submission is deleted if submission.deleted: messages.warning(request, u'<strong>Heads up!</strong> This submission has ' u'been deleted and <strong>will be permanently ' u'removed</strong> from the locker ' u'<strong>{}</strong>.' u''.format(naturaltime(submission.purge_date))) return render(request, 'datalocker/submission_view.html', { 'data': submission.data_dict(with_types=True), 'discussion_enabled': discussion_enabled, 'discussion_users_have_access': users_discussion or is_owner, 'newer': newer, 'newer_disabled': True if submission.id == newer.id else False, 'newest': newest, 'newest_disabled': True if submission.id == newest.id else False, 'older': older, 'older_disabled': True if submission.id == older.id else False, 'oldest': oldest, 'oldest_disabled': True if submission.id == oldest.id else False, 'sidebar_enabled': workflow_enabled or discussion_enabled, 'submission': submission, 'workflow_enabled': workflow_enabled, 'workflow_states': submission.locker.workflow_states(), 'workflow_state': submission.workflow_state, 'workflow_users_can_edit': users_workflow or is_owner, })
5,329,322
def add_filename_suffix(file_path: str, suffix: str) -> str: """ Append a suffix at the filename (before the extension). Args: path: pathlib.Path The actual path object we would like to add a suffix suffix: The suffix to add Returns: path with suffix appended at the end of the filename and before extension """ path = Path(file_path) return str(path.parent.joinpath(path.stem + suffix).with_suffix(path.suffix))
5,329,323
def list_dropdownTS(dic_df): """ input a dictionary containing what variables to use, and how to clean the variables It outputs a list with the possible pair solutions. This function will populate a dropdown menu in the eventHandler function """ l_choice = [] for key_cat, value_cat in dic_df['var_continuous'].items(): l_choice.append(value_cat['name']) l_choice = ['-'] + l_choice return l_choice
5,329,324
def _check_declared_tests_exist(config, tests_root_dir): """Check that all the tests declared in the config file correspond to actual valid test functions""" logging.info("Checking that configured tests exist") test_functions = discover_all_test_functions(tests_root_dir) unused_test_functions = deepcopy(test_functions) try: with soft_assertions(): for dirname, configured_tests in config["test-suites"].items(): assert_that(test_functions).contains_key(dirname) assert_that(test_functions.get(dirname, [])).contains(*[test for test in configured_tests.keys()]) unused_test_functions[dirname] = list(unused_test_functions[dirname] - configured_tests.keys()) except AssertionError as e: logging.error("Some of the configured tests do not exist: %s", e) raise logging.info("Found following unused test functions: %s", json.dumps(unused_test_functions, indent=2))
5,329,325
def get_voice_combinations(**kwargs): """ Gets k possible combinations of voices from a list of voice indexes. If k is None, it will return all possible combinations. The combinations are of a minimum size min_n_voices_to_remove and a max size max_n_voices_to_remove. When choosing a k number a combinations from all possible combinations, the probability of choosing a combination of a number of voices above another can be passed with the prob list, where for a range of voices to remove from 1 to 3, [1, 1, 1] indicates equal probability, [1,1,2] indicates that combinations with 3 voices have double probability of getting chosen, etc. @param kwargs: see below @return voice_idx_comb: combinations of voice indexes """ # list of voices to remove voice_idx = kwargs.get("voice_idx", [0, 1, 2, 3, 4]) min_n_voices_to_remove = kwargs.get( "min_n_voices_to_remove", 1) # min size of the combination max_n_voices_to_remove = kwargs.get( "max_n_voices_to_remove", 3) # max size of the combination # prob of each n_voices_to_remove set in ascending order prob = kwargs.get("prob", [1, 1, 1]) k = kwargs.get("k", 5) # max number of combinations to return if len(voice_idx) < max_n_voices_to_remove: max_n_voices_to_remove = len(voice_idx) range_items = range(min_n_voices_to_remove, max_n_voices_to_remove + 1) assert (len(prob) == len( range_items)), "The prob list must be the same length as the range(min_n_voices_to_remove, max_n_voices_to_remove)" voice_idx_comb = [] weights = [] for i, n_voices_to_remove in enumerate(range_items): _voice_idx_comb = list(itertools.combinations( voice_idx, n_voices_to_remove)) voice_idx_comb.extend(_voice_idx_comb) _weights = list(np.repeat(prob[i], len(_voice_idx_comb))) weights.extend(_weights) if k is not None: # if there is no k, return all possible combinations voice_idx_comb = random.choices(voice_idx_comb, weights=weights, k=k) return list(voice_idx_comb)
5,329,326
def del_contact(credential): """This class gives the application the ability to delete different user inputs or details""" credential.delete_credential()
5,329,327
def data(request): """This is a the main entry point to the Data tab.""" context = cache.get("data_tab_context") if context is None: context = data_context(request) cache.set("data_tab_context", context, 29) return render(request, "rundb/data/data.html", context)
5,329,328
def setup_root(name: str) -> DLogger: """Create the root logger.""" logger = get_logger(name) msg_format = "%(message)s" level_style = { "critical": {"color": "magenta", "bright": True, "bold": True}, "debug": {"color": "green", "bright": True, "bold": True}, "error": {"color": "red", "bright": True, "bold": True}, "info": {"color": 4, "bright": True, "bold": True}, "notice": {"color": "magenta", "bright": True, "bold": True}, "spam": {"color": "green", "faint": True}, "success": {"color": "green", "bright": True, "bold": True}, "verbose": {"color": "blue", "bright": True, "bold": True}, "warning": {"color": "yellow", "bright": True, "bold": True}, } coloredlogs.install(fmt=msg_format, level_styles=level_style, level="INFO") return logger
5,329,329
def from_pickle( filepath: typing.Union[str, pathlib.Path, typing.IO[bytes]] ) -> typing.Union[Categorization, HierarchicalCategorization]: """De-serialize Categorization or HierarchicalCategorization from a file written by to_pickle. Note that this uses the pickle module, which executes arbitrary code in the provided file. Only load from pickle files that you trust.""" try: spec = pickle.load(filepath) except TypeError: with open(filepath, "rb") as fd: spec = pickle.load(fd) return from_spec(spec)
5,329,330
def recostruct(encoded, weights, bias): """ Reconstructor : Encoded -> Original Not Functional """ weights.reverse() for i,item in enumerate(weights): encoded = encoded @ item.eval() + bias[i].eval() return encoded
5,329,331
def add_parser_arguments(parser): """ Add the arguments required by all types of criterion here. Please add task-specific criterion arguments into the function of the same name in 'task/xxx/criterion.py' """ pass
5,329,332
def adjust_learning_rate(optimizer, epoch): """Sets the learning rate to the initial LR multiplied by factor 0.1 for every 10 epochs""" if not ((epoch + 1) % 10): factor = 0.1 for param_group in optimizer.param_groups: param_group['lr'] = param_group['lr'] * factor
5,329,333
def get_file_dataset_from_trixel_id(CatName,index,NfilesinHDF,Verbose=True):#get_file_var_from_htmid in Eran's library """Description: given a catalog basename and the index of a trixel and the number of trixels in an HDF5 file, create the trixel dataset name Input :- CatName - index - NfilesinHDF: number of datasets in an HDF5 files (default is 100) Output :- Filename: name of the HDF5 file where the trixel_dataset is stored - Datasetname: name of the trixel_dataset example: By : Maayane Soumagnac (original Matlab function by Eran Ofek) August 2018""" if Verbose==True: print('index is',index) num_file=math.floor(index/NfilesinHDF)*NfilesinHDF #equivalent to index//Nfiles*Nfiles Filename='%s_htm_%06d.hdf5' % (CatName, num_file) DatasetName='htm_%06d' % index return Filename,DatasetName
5,329,334
def calc_element_column(NH, fmineral, atom, mineral, d2g=0.009): """ Calculate the column density of an element for a particular NH value, assuming a dust-to-gas ratio (d2g) and the fraction of dust in that particular mineral species (fmineral) """ dust_mass = NH * mp * d2g * fmineral # g cm^{-2} print('Dust mass = %.3e g cm^-2' % (dust_mass)) return calc_mass_conversion(atom, mineral) * dust_mass
5,329,335
def test_retrieve_notification_as_owner(notification): """Tests if a logged in user can retrieve it's own notification""" client = get_api_client(user=notification.user) url = _get_notification_url(notification) response = client.get(url) assert response.status_code == status.HTTP_200_OK
5,329,336
def output_dot(sieve, column_labels=None, max_edges=None, filename='structure.dot'): """ A network representation of the structure in Graphviz format. Units in the produced file are in bits. Weight is the mutual information and tc is the total correlation. """ print """Compile by installing graphviz and running a command like: sfdp %s -Tpdf -Earrowhead=none -Nfontsize=12 \\ -GK=2 -Gmaxiter=1000 -Goverlap=False -Gpack=True \\ -Gpackmode=clust -Gsep=0.02 -Gratio=0.7 -Gsplines=True -o structure.pdf""" % filename if column_labels is None: column_labels = map(unicode, range(sieve.n_variables)) else: column_labels = map(unicode, column_labels) f = open(filename, 'w') f.write('strict digraph {\n'.encode('utf-8')) for i, column_label in enumerate(column_labels): line = '%s [label="%s", shape=none]\n' % ('X_' + column_label, column_label) f.write(line.encode('utf-8')) for j, layer in enumerate(sieve.layers): this_tc = 0.6 * sieve.tcs[j] / np.max(sieve.tcs) line = 'Y_%d [shape=circle,margin="0,0",style=filled,fillcolor=black,' \ 'fontcolor=white,height=%0.3f,label=Y%d,tc=%0.3f]\n' % (j, this_tc, j+1, sieve.tcs[j] / np.log(2)) f.write(line.encode('utf-8')) mis = sieve.mis print 'mis', mis if max_edges is None or max_edges > mis.size: w_threshold = 0. else: w_threshold = -np.sort(-np.ravel(mis))[max_edges] for j, layer in enumerate(sieve.layers): for i in range(sieve.n_variables): w = mis[j, i] / np.log(2) if w > w_threshold: line = '%s -> %s [penwidth=%0.3f, weight=%0.3f];\n' % ('X_'+str(i), 'Y_'+str(j), 2 * w, w) f.write(line.encode('utf-8')) for j2 in range(0, j): w = mis[j, sieve.n_variables + j2] / np.log(2) if w > w_threshold: line = '%s -> %s [penwidth=%0.3f, weight=%0.3f];\n' % ('Y_'+str(j2), 'Y_'+str(j), 2 * w, w) f.write(line.encode('utf-8')) f.write('}'.encode('utf-8')) f.close() return True
5,329,337
def projectSimplex_vec(v): """ project vector v onto the probability simplex Parameter --------- v: shape(nVars,) input vector Returns ------- w: shape(nVars,) projection of v onto the probability simplex """ nVars = v.shape[0] mu = np.sort(v,kind='quicksort')[::-1] sm_hist = np.cumsum(mu) flag = (mu - 1./np.arange(1,nVars+1)*(sm_hist-1) > 0) lastTrue = len(flag) - 1 - flag[::-1].argmax() sm_row = sm_hist[lastTrue] theta = 1./(lastTrue+1) * (sm_row - 1) w = np.maximum(v-theta, 0.) return w
5,329,338
def display_c(c, font, screen, lcd, size=5, x=0, y=0): """ Displays a character in the given `font` with top-left corner at the specified `x` and `y` coordinates `c`: A character `font`: A font dictionary `size`: An integer from 1-10, 10 being max size that can fit the display """ char = font[str(c)] width, height = char.size """ if not(size == 10): size /= 10.0 width = int(round(size*width)) height = int(round(size*height)) char.resize((width,height)) """ size = int(round(size * 10)) images.display_img(char,screen,lcd,size,x,y)
5,329,339
def render_list(something: Collection, threshold: int, tab: str) -> List[str]: """ Разложить список или что то подобное """ i = 1 sub_storage = [] order = '{:0' + str(len(str(len(something)))) + 'd}' for element in something: if isinstance(element, Sized) and len(element) > threshold: add = [] render(element, threshold, add, tab + '\t') sub_storage.extend(add) else: sub_storage.append(f'{tab}{order.format(i)}| {element!r}') i += 1 return sub_storage
5,329,340
def combine_result( intent_metrics: IntentMetrics, entity_metrics: EntityMetrics, response_selection_metrics: ResponseSelectionMetrics, interpreter: Interpreter, data: TrainingData, intent_results: Optional[List[IntentEvaluationResult]] = None, entity_results: Optional[List[EntityEvaluationResult]] = None, response_selection_results: Optional[ List[ResponseSelectionEvaluationResult] ] = None, ) -> Tuple[IntentMetrics, EntityMetrics, ResponseSelectionMetrics]: """Collects intent, response selection and entity metrics for cross validation folds. If `intent_results`, `response_selection_results` or `entity_results` is provided as a list, prediction results are also collected. Args: intent_metrics: intent metrics entity_metrics: entity metrics response_selection_metrics: response selection metrics interpreter: the interpreter data: training data intent_results: intent evaluation results entity_results: entity evaluation results response_selection_results: reponse selection evaluation results Returns: intent, entity, and response selection metrics """ ( intent_current_metrics, entity_current_metrics, response_selection_current_metrics, current_intent_results, current_entity_results, current_response_selection_results, ) = compute_metrics(interpreter, data) if intent_results is not None: intent_results += current_intent_results if entity_results is not None: entity_results += current_entity_results if response_selection_results is not None: response_selection_results += current_response_selection_results for k, v in intent_current_metrics.items(): intent_metrics[k] = v + intent_metrics[k] for k, v in response_selection_current_metrics.items(): response_selection_metrics[k] = v + response_selection_metrics[k] for extractor, extractor_metric in entity_current_metrics.items(): entity_metrics[extractor] = { k: v + entity_metrics[extractor][k] for k, v in extractor_metric.items() } return intent_metrics, entity_metrics, response_selection_metrics
5,329,341
def get_fns_for_jobid(jobid): """Given a job ID number, return a list of that job's data files. Input: jobid: The ID number from the job-tracker DB to get files for. Output: fns: A list of data files associated with the job ID. """ import jobtracker query = "SELECT filename " \ "FROM files, job_files " \ "WHERE job_files.file_id=files.id " \ "AND job_files.job_id=%d" % jobid rows = jobtracker.query(query) fns = [str(row['filename']) for row in rows] return fns
5,329,342
def linear_schedule(initial_value: float): """ Linear learning rate schedule. :param initial_value: Initial learning rate. :return: schedule that computes current learning rate depending on remaining progress """ def func(progress_remaining: float) -> float: """ Progress will decrease from 1 (beginning) to 0. :param progress_remaining: :return: current learning rate """ return progress_remaining * initial_value return func
5,329,343
def download(os_list, software_list, dst): """ 按软件列表下载其他部分 """ if os_list is None: os_list = [] arch = get_arch(os_list) LOG.info('software arch is {0}'.format(arch)) results = {'ok': [], 'failed': []} no_mindspore_list = [software for software in software_list if "MindSpore" not in software] for software in no_mindspore_list: res = download_software(software, dst, arch) if res: results['ok'].append(software) continue results['failed'].append(software) return results
5,329,344
def get_hash_bin(shard, salt=b"", size=0, offset=0): """Get the hash of the shard. Args: shard: A file like object representing the shard. salt: Optional salt to add as a prefix before hashing. Returns: Hex digetst of ripemd160(sha256(salt + shard)). """ shard.seek(0) digest = partialhash.compute(shard, offset=offset, length=size, seed=salt, hash_algorithm=hashlib.sha256) shard.seek(0) return ripemd160(digest).digest()
5,329,345
def upgrade() -> None: """Upgrade.""" staticschema = config["schema_static"] # Instructions op.create_table( "oauth2_client", Column("id", Integer, primary_key=True), Column("client_id", Unicode, unique=True), Column("secret", Unicode), Column("redirect_uri", Unicode), schema=staticschema, ) op.create_table( "oauth2_bearertoken", Column("id", Integer, primary_key=True), Column( "client_id", Integer, ForeignKey(staticschema + ".oauth2_client.id", ondelete="CASCADE"), nullable=False, ), Column( "user_id", Integer, ForeignKey(staticschema + ".user.id", ondelete="CASCADE"), nullable=False, ), Column("access_token", Unicode(100), unique=True), Column("refresh_token", Unicode(100), unique=True), Column("expire_at", DateTime(timezone=True)), sqlalchemy.schema.UniqueConstraint("client_id", "user_id"), schema=staticschema, ) op.create_table( "oauth2_authorizationcode", Column("id", Integer, primary_key=True), Column( "client_id", Integer, ForeignKey(staticschema + ".oauth2_client.id", ondelete="CASCADE"), nullable=False, ), Column( "user_id", Integer, ForeignKey(staticschema + ".user.id", ondelete="CASCADE"), nullable=False, ), Column("redirect_uri", Unicode), Column("code", Unicode(100), unique=True), Column("expire_at", DateTime(timezone=True)), sqlalchemy.schema.UniqueConstraint("client_id", "user_id"), schema=staticschema, )
5,329,346
def boltzmann_statistic( properties: ArrayLike1D, energies: ArrayLike1D, temperature: float = 298.15, statistic: str = "avg", ) -> float: """Compute Boltzmann statistic. Args: properties: Conformer properties energies: Conformer energies (a.u.) temperature: Temperature (K) statistic: Statistic to compute: 'avg', 'var' or 'std' Returns: result: Boltzmann statistic """ properties = np.array(properties) # Get conformer weights weights = boltzmann_weights(energies, temperature) # Compute Boltzmann weighted statistic result: float if statistic == "avg": result = np.average(properties, weights=weights) elif statistic == "var": avg = np.average(properties, weights=weights) result = np.sum(weights * (properties - avg) ** 2) elif statistic == "std": avg = np.average(properties, weights=weights) var = np.sum(weights * (properties - avg) ** 2) result = np.sqrt(var) return result
5,329,347
def test_mypy_unmatched_stdout(testdir): """Verify that unexpected output on stdout from mypy is printed.""" stdout = 'This is unexpected output on stdout from mypy.' testdir.makepyfile(conftest=''' import mypy.api def _patched_run(*args, **kwargs): return '{stdout}', '', 1 mypy.api.run = _patched_run '''.format(stdout=stdout)) result = testdir.runpytest_subprocess('--mypy') result.stdout.fnmatch_lines([stdout])
5,329,348
def _check_n_pca_components(ica, _n_pca_comp, verbose=None): """Aux function""" if isinstance(_n_pca_comp, float): _n_pca_comp = ((ica.pca_explained_variance_ / ica.pca_explained_variance_.sum()).cumsum() <= _n_pca_comp).sum() logger.info('Selected %i PCA components by explained ' 'variance' % _n_pca_comp) elif _n_pca_comp is None or _n_pca_comp < ica.n_components_: _n_pca_comp = ica.n_components_ return _n_pca_comp
5,329,349
def main(): """ Calls the other functions to test them. """ print() print("Un-comment and re-comment calls in MAIN one by one as you work.") # run_test_multiply_numbers() # run_test_print_characters() # run_test_print_characters_slanted()
5,329,350
def parse(text): """ This is what amounts to a simple lisp parser for turning the server's returned messages into an intermediate format that's easier to deal with than the raw (often poorly formatted) text. This parses generally, taking any lisp-like string and turning it into a list of nested lists, where each nesting indicates a parenthesized expression. holding multiple top-level parenthesized expressions. Ex: "(baz 0 (foo 1.5))" becomes ['baz', 0, ['foo', 1.5]]. """ # make sure all of our parenthesis match if text.count(b"(") != text.count(b")"): raise ValueError("Message text has unmatching parenthesis!") # result acts as a stack that holds the strings grouped by nested parens. # result will only ever contain one item, the first level of indenting # encountered. this is because the server (hopefully!) only ever sends one # message at a time. # TODO: make sure that the server only ever sends one message at a time! result = [] # the current level of indentation, used to append chars to correct level indent = 0 # the non-indenting characters we find. these are kept in a buffer until # we indent or dedent, and then are added to the current indent level all # at once, for efficiency. s = [] # whether we're currently in the middle of parsing a string in_string = False # the last character seen, None to begin with prev_c = None for c in text.decode(): # prevent parsing parens when inside a string (also ignores escaped # '"'s as well). doesn't add the quotes so we don't have to recognize # that value as a string via a regex. if c == '"' and prev_c != "\\": in_string = not in_string # we only indent/dedent if not in the middle of parsing a string elif c == "(" and not in_string: # recurse into current level of nesting cur = result for i in range(indent): cur = cur[-1] # add our buffered string onto the previous level, then clear it # for the next. if len(s) > 0: val = ''.join(s) # try to convert our string into a value and append it to our # list. failing that, simply append it as an attribute name. if pattern_int.match(val): cur.append(int(val)) elif pattern_float.match(val): cur.append(float(val)) else: cur.append(val) s = [] # append a new level of nesting to our list cur.append([]) # increase the indent level so we can get back to this level later indent += 1 elif c == ")" and not in_string: # append remaining string buffer before dedenting if len(s) > 0: cur = result for i in range(indent): cur = cur[-1] val = ''.join(s) # try to convert our string into a value and append it to our # list. failing that, simply append it as an attribute name. if pattern_int.match(val): cur.append(int(val)) elif pattern_float.match(val): cur.append(float(val)) else: cur.append(val) s = [] # we finished with one level, so dedent back to the previous one indent -= 1 # append non-space characters to the buffer list. spaces are delimiters # for expressions, hence are special. elif c != " ": # append the current string character to the buffer list. s.append(c) # we separate expressions by spaces elif c == " " and len(s) > 0: cur = result for i in range(indent): cur = cur[-1] val = ''.join(s) # try to convert our string into a value and append it to our # list. failing that, simply append it as an attribute name. if pattern_int.match(val): cur.append(int(val)) elif pattern_float.match(val): cur.append(float(val)) else: cur.append(val) s = [] # save the previous character. used to determine if c is escaped prev_c = c # this returns the first and only message found. result is a list simply # because it makes adding new levels of indentation simpler as it avoids # the 'if result is None' corner case that would come up when trying to # append the first '('. return result[0]
5,329,351
def plot_all(graph, plot_id=''): """plot topology graph""" plt.close() fig = plt.figure() fig.canvas.mpl_connect('button_press_event', util.onclick) lane_middle_point_map = {} for i, (nd, color) in enumerate(zip(graph.node, color_iter)): nd_mid_pt = plot_node(nd, plot_id, color) lane_middle_point_map[nd.lane_id] = nd_mid_pt for i, eg in enumerate(graph.edge): plot_edge(eg, lane_middle_point_map) plt.gca().set_aspect(1) plt.title('Routing topology graph') plt.xlabel('x') plt.ylabel('y') plt.legend() plt.draw()
5,329,352
def model_fn(): """ Renvoie un modèle Inception3 avec la couche supérieure supprimée et les poids pré-entraînés sur imagenet diffusés. """ model = InceptionV3( include_top=False, # Couche softmax de classification supprimée weights='imagenet', # Poids pré-entraînés sur Imagenet # input_shape=(100,100,3), # Image de taille 100x100 en couleur (channel=3) pooling='max' # Utilisation du max de pooling ) model.set_weights(bc_model_weights.value) return model
5,329,353
def err(msg): """Yeah, this is an obnoxious error message, but the user will have to pick it out of a long scroll of LaTeX and make output. """ sys.stderr.write("BEGIN PIPELINE ERROR MSG\n") sys.stderr.write(msg) sys.stderr.write("\n") sys.stderr.write("END PIPELINE ERROR MSG\n") sys.exit(-1)
5,329,354
def find_correspondance_date(index, csv_file): """ The method returns the dates reported in the csv_file for the i-subject :param index: index corresponding to the subject analysed :param csv_file: csv file where all the information are listed :return date """ return csv_file.EXAMDATE[index]
5,329,355
def get_config(object_config_id): """ Returns current and previous config :param object_config_id: :type object_config_id: int :return: Current and previous config in dictionary format :rtype: dict """ fields = ('config', 'attr', 'date', 'description') try: object_config = ObjectConfig.objects.get(id=object_config_id) except ObjectConfig.DoesNotExist: return None config = {} for name in ['current', 'previous']: _id = getattr(object_config, name) if _id: config[name] = get_object(ConfigLog, _id, fields, ['date']) else: config[name] = None return config
5,329,356
def find_similarity(long_sequence, short_sequence, match, maximum, homology): """ This function can find which part of the long DNA sequence best matches the short DNA sequence and it finally shows the similarity and the best-matching strand. """ for i in range(len(long_sequence)-len(short_sequence)+1): for j in range(len(short_sequence)): if long_sequence[i+j] == short_sequence[j]: match += 1 # If the number of match is bigger than maximum, assign maximum to match and assign homology to i. if match > maximum: maximum = match homology = i match = 0 print('The best match is ' + str(long_sequence[homology:homology+len(short_sequence)])) print('The similarity is: ' + str(100 * (maximum / len(short_sequence))) + '%')
5,329,357
def normalize_to_ascii(char): """Strip a character from its accent and encode it to ASCII""" return unicodedata.normalize("NFKD", char).encode("ascii", "ignore").lower()
5,329,358
def verify_certificate_chain(certificate, intermediates, trusted_certs, logger): """ :param certificate: cryptography.x509.Certificate :param intermediates: list of cryptography.x509.Certificate :param trusted_certs: list of cryptography.x509.Certificate Verify that the certificate is valid, according to the list of intermediates and trusted_certs. Uses legacy crypto.X509 functions as no current equivalent in https://cryptography.io/en/latest/ See: https://gist.github.com/uilianries/0459f59287bd63e49b1b8ef03b30d421#file-cert-check-py :return: bool """ try: #Create a certificate store and add your trusted certs store = crypto.X509Store() for tc in trusted_certs: store.add_cert(crypto.X509.from_cryptography(tc)) # Create a certificate context using the store, to check any intermediate certificates for i in intermediates: logger.info('| verifying intermediate certificates') i_X509 = crypto.X509.from_cryptography(i) store_ctx = crypto.X509StoreContext(store, i_X509) store_ctx.verify_certificate() # no exception, so Intermediate verified - add the intermediate to the store store.add_cert(i_X509) # Validate certificate against (trusted + intermediate) logger.info('| intermediates passed, verifying user certificate') store_ctx = crypto.X509StoreContext(store, crypto.X509.from_cryptography(certificate)) # Verify the certificate, returns None if it can validate the certificate store_ctx.verify_certificate() logger.info('| user certificate passed') return True except crypto.X509StoreContextError as e: logger.warning(e) return False
5,329,359
def create_forcingfile(meteo_fp, output_file, dir_save, lat, lon, P_unit, timezone=+2.0, fpar=0.45, CO2_constant=False): """ Create forcing file from meteo. Args: meteo_fp (str): file path to meteofile output_file (str): name of output file (.csv not included) dir_save (str): output directory lat (float): latitude lon (float): longitude P_unit (float): unit conversion needed to get to [Pa] """ from canopy.radiation import solar_angles, compute_clouds_rad from canopy.micromet import e_sat dat = pd.read_csv(meteo_fp, sep=';', header='infer', encoding = 'ISO-8859-1') # set to dataframe index dat.index = pd.to_datetime({'year': dat['year'], 'month': dat['month'], 'day': dat['day'], 'hour': dat['hour'], 'minute': dat['minute']}) readme = '' cols = [] # day of year dat['doy'] = dat.index.dayofyear cols.append('doy') readme += "\ndoy: Day of year [days]" # precipitaion unit from [mm/dt] to [m/s] dt = (dat.index[1] - dat.index[0]).total_seconds() # dat['Prec'] = dat['Prec'] * 1e-3 / dt cols.append('Prec') readme += "\nPrec: Precipitation [mm/30min]" # atm. pressure unit from [XPa] to [Pa] dat['P'] = dat['P'] * P_unit cols.append('P') readme += "\nP: Ambient pressure [Pa]" # air temperature: instant and daily [degC] cols.append('Tair') readme += "\nTair: Air temperature [degC]" # dat['Tdaily'] = dat['Tair'].rolling(int((24*3600)/dt), 1).mean() dat['Tdaily'] = dat['Tair'].resample('D').mean() dat['Tdaily'] = dat['Tdaily'].fillna(method='ffill') cols.append('Tdaily') readme += "\nTdaily: Daily air temperature [degC]" # wind speend and friction velocity cols.append('U') readme += "\nU: Wind speed [m/s]" cols.append('Ustar') readme += "\nUstar: Friction velocity [m/s]" # ambient H2O [mmol/mol] from RH esat, _ = e_sat(dat['Tair']) dat['H2O'] = 1e3 * (dat['RH'] / 100.0) * esat / dat['P'] cols.append('H2O') readme += "\nH2O: Ambient H2O [mmol/mol]" # ambient CO2 [ppm] readme += "\nCO2: Ambient CO2 [ppm]" if 'CO2' not in dat or CO2_constant: dat['CO2'] = 400.0 readme += " - set constant!" cols.append('CO2') # zenith angle # jday = dat.index.dayofyear + dat.index.hour / 24.0 + dat.index.minute / 1440.0 # TEST (PERIOD START) # jday = dat.index.dayofyear + dat.index.hour / 24.0 + dat.index.minute / 1440.0 + dt / 2.0 / 86400.0 # TEST (PERIOD END) jday = dat.index.dayofyear + dat.index.hour / 24.0 + dat.index.minute / 1440.0 - dt / 2.0 / 86400.0 dat['Zen'], _, _, _, _, _ = solar_angles(lat, lon, jday, timezone=timezone) cols.append('Zen') readme += "\nZen: Zenith angle [rad], (lat = %.2f, lon = %.2f)" % (lat, lon) # radiation components if {'LWin','diffPar', 'dirPar', 'diffNir', 'dirNir'}.issubset(dat.columns) == False: f_cloud, f_diff, emi_sky = compute_clouds_rad(dat['doy'].values, dat['Zen'].values, dat['Rg'].values, dat['H2O'].values * dat['P'].values, dat['Tair'].values) if 'LWin' not in dat or dat['LWin'].isnull().any(): if 'LWin' not in dat: dat['LWin']=np.nan print('Longwave radiation estimated') else: print('Longwave radiation partly estimated') # Downwelling longwve radiation # solar constant at top of atm. So = 1367 # clear sky Global radiation at surface dat['Qclear'] = np.maximum(0.0, (So * (1.0 + 0.033 * np.cos(2.0 * np.pi * (np.minimum(dat['doy'].values, 365) - 10) / 365)) * np.cos(dat['Zen'].values))) tau_atm = tau_atm = dat['Rg'].rolling(4,1).sum() / (dat['Qclear'].rolling(4,1).sum() + EPS) # cloud cover fraction dat['f_cloud'] = 1.0 - (tau_atm - 0.2) / (0.7 - 0.2) dat['f_cloud'][dat['Qclear'] < 10] = np.nan dat['Qclear_12h'] = dat['Qclear'].resample('12H').sum() dat['Qclear_12h'] = dat['Qclear_12h'].fillna(method='ffill') dat['Rg_12h'] = dat['Rg'].resample('12H').sum() dat['Rg_12h'] = dat['Rg_12h'].fillna(method='ffill') tau_atm = dat['Rg_12h'] / (dat['Qclear_12h'] + EPS) dat['f_cloud_12h'] = 1.0 - (tau_atm -0.2) / (0.7 - 0.2) dat['f_cloud'] = np.where((dat.index.hour > 12) & (dat['f_cloud_12h'] < 0.2), 0.0, dat['f_cloud']) dat['f_cloud'] = dat['f_cloud'].fillna(method='ffill') dat['f_cloud'] = dat['f_cloud'].fillna(method='bfill') dat['f_cloud'][dat['f_cloud'] < 0.0] = 0.0 dat['f_cloud'][dat['f_cloud'] > 1.0] = 1.0 emi0 = 1.24 * (dat['H2O'].values * dat['P'].values / 100 /(dat['Tair'].values + 273.15))**(1./7.) emi_sky = (1 - 0.84 * dat['f_cloud']) * emi0 + 0.84 * dat['f_cloud'] # estimated long wave budget b = 5.6697e-8 # Stefan-Boltzman constant (W m-2 K-4) dat['LWin_estimated'] = emi_sky * b *(dat['Tair'] + 273.15)**4 # Wm-2 downwelling LW dat[['LWin','LWin_estimated']].plot(kind='line') dat['LWin'] = np.where(np.isfinite(dat['LWin']),dat['LWin'],dat['LWin_estimated']) cols.append('LWin') readme += "\nLWin: Downwelling long wave radiation [W/m2]" # Short wave radiation; separate direct and diffuse PAR & NIR if {'diffPar', 'dirPar', 'diffNir', 'dirNir'}.issubset(dat.columns) == False: print('Shortwave radiation components estimated') dat['diffPar'] = f_diff * fpar * dat['Rg'] dat['dirPar'] = (1 - f_diff) * fpar * dat['Rg'] dat['diffNir'] = f_diff * (1 - fpar) * dat['Rg'] dat['dirNir'] = (1 - f_diff) * (1 - fpar) * dat['Rg'] cols.extend(('diffPar', 'dirPar', 'diffNir', 'dirNir')) readme += "\ndiffPar: Diffuse PAR [W/m2] \ndirPar: Direct PAR [W/m2]" readme += "\ndiffNir: Diffuse NIR [W/m2] \ndirNir: Direct NIR [W/m2]" if {'Tsoil', 'Wliq'}.issubset(dat.columns): cols.extend(('Tsoil', 'Wliq')) dat['Wliq'] = dat['Wliq'] / 100.0 readme += "\nTsoil: Soil surface layer temperature [degC]]" readme += "\nWliq: Soil surface layer moisture content [m3 m-3]" X = np.zeros(len(dat)) DDsum = np.zeros(len(dat)) for k in range(1,len(dat)): if dat['doy'][k] != dat['doy'][k-1]: X[k] = X[k - 1] + 1.0 / 8.33 * (dat['Tdaily'][k-1] - X[k - 1]) if dat['doy'][k] == 1: # reset in the beginning of the year DDsum[k] = 0. else: DDsum[k] = DDsum[k - 1] + max(0.0, dat['Tdaily'][k-1] - 5.0) else: X[k] = X[k - 1] DDsum[k] = DDsum[k - 1] dat['X'] = X cols.append('X') readme += "\nX: phenomodel delayed temperature [degC]" dat['DDsum'] = DDsum cols.append('DDsum') readme += "\nDDsum: degreedays [days]" # Checking timestamp validity # clear sky Global radiation at surface So = 1367 dat['Qclear'] = np.maximum(0.0, (So * (1.0 + 0.033 * np.cos(2.0 * np.pi * (np.minimum(dat['doy'].values, 365) - 10) / 365)) * np.cos(dat['Zen'].values))) dat[['Qclear','Rg']].plot(kind='line') dat = dat[cols] dat.plot(subplots=True, kind='line') print("NaN values in forcing data:") print(dat.isnull().any()) save_df_to_csv(dat, output_file, readme=readme, fp=dir_save, timezone=timezone, sep=';')
5,329,360
def distribute_py_test( name, srcs = [], deps = [], tags = [], data = [], main = None, args = [], shard_count = 1, **kwargs): """Generates py_test targets for CPU and GPU. Args: name: test target name to generate suffixed with `test`. srcs: source files for the tests. deps: additional dependencies for the test targets. tags: tags to be assigned to the different test targets. data: data files that need to be associated with the target files. main: optional main script. args: arguments to the tests. shard_count: number of shards to split the tests across. **kwargs: extra keyword arguments to the test. """ cuda_py_test( name = name, srcs = srcs, data = data, main = main, additional_deps = deps, shard_count = shard_count, tags = tags, args = args, **kwargs )
5,329,361
def test_csv_file_validation(test_import_dir): """Create and populate a *.csv file for testing Validation import mapping.""" _test_file = TMP_DIR + "/test_inputs_validation.csv" with open(_test_file, "w") as _csv_file: filewriter = csv.writer( _csv_file, delimiter=";", quotechar="|", quoting=csv.QUOTE_MINIMAL ) filewriter.writerow(HEADERS["Validation"]) filewriter.writerow(ROW_DATA[4]) yield _test_file
5,329,362
def update_strip_chart_data(_n_intervals, acq_state, chart_data_json_str, samples_to_display_val, active_channels): """ A callback function to update the chart data stored in the chartData HTML div element. The chartData element is used to store the existing data values, which allows sharing of data between callback functions. Global variables cannot be used to share data between callbacks (see https://dash.plot.ly/sharing-data-between-callbacks). Args: _n_intervals (int): Number of timer intervals - triggers the callback. acq_state (str): The application state of "idle", "configured", "running" or "error" - triggers the callback. chart_data_json_str (str): A string representation of a JSON object containing the current chart data. samples_to_display_val (float): The number of samples to be displayed. active_channels ([int]): A list of integers corresponding to the user selected active channel checkboxes. Returns: str: A string representation of a JSON object containing the updated chart data. """ updated_chart_data = chart_data_json_str samples_to_display = int(samples_to_display_val) num_channels = len(active_channels) if acq_state == 'running': hat = globals()['_HAT'] if hat is not None: chart_data = json.loads(chart_data_json_str) # By specifying -1 for the samples_per_channel parameter, the # timeout is ignored and all available data is read. read_result = hat.a_in_scan_read(ALL_AVAILABLE, RETURN_IMMEDIATELY) if ('hardware_overrun' not in chart_data.keys() or not chart_data['hardware_overrun']): chart_data['hardware_overrun'] = read_result.hardware_overrun if ('buffer_overrun' not in chart_data.keys() or not chart_data['buffer_overrun']): chart_data['buffer_overrun'] = read_result.buffer_overrun # Add the samples read to the chart_data object. sample_count = add_samples_to_data(samples_to_display, num_channels, chart_data, read_result) # Update the total sample count. chart_data['sample_count'] = sample_count updated_chart_data = json.dumps(chart_data) elif acq_state == 'configured': # Clear the data in the strip chart when Configure is clicked. updated_chart_data = init_chart_data(num_channels, samples_to_display) return updated_chart_data
5,329,363
def etl(fp_source, fp_export, split=None, suffix=None): """Read, transform and save datasets. """ # get the dataset name dataset_name = Path(args.s).stem # read file df = pd.read_csv(fp_source, sep=None, engine="python") # # remove records with missing abstracts # df = df.dropna(subset=['abstract']) # errors if not split or not isinstance(split, list): raise ValueError("Specify at least one column to split on.") if isinstance(suffix, list) and len(split) != len(suffix): raise ValueError("Suffix values should be of same length as split.") if suffix is None: suffix = split for i, split_var in enumerate(split): # split dataset df_split = rename_label(df, split_var) # export Path(fp_export).mkdir(parents=True, exist_ok=True) df_split.to_csv( Path(fp_export, f"{dataset_name}_{suffix[i]}.csv"), index=False )
5,329,364
def genuuid(): """Generate a random UUID4 string.""" return str(uuid.uuid4())
5,329,365
def watsons_f(DI1, DI2): """ calculates Watson's F statistic (equation 11.16 in Essentials text book). Parameters _________ DI1 : nested array of [Dec,Inc] pairs DI2 : nested array of [Dec,Inc] pairs Returns _______ F : Watson's F Fcrit : critical value from F table """ # first calculate R for the combined data set, then R1 and R2 for each individually. # create a new array from two smaller ones DI = np.concatenate((DI1, DI2), axis=0) fpars = fisher_mean(DI) # re-use our functionfrom problem 1b fpars1 = fisher_mean(DI1) fpars2 = fisher_mean(DI2) N = fpars['n'] R = fpars['r'] R1 = fpars1['r'] R2 = fpars2['r'] F = (N-2.)*((R1+R2-R)/(N-R1-R2)) Fcrit = fcalc(2, 2*(N-2)) return F, Fcrit
5,329,366
def encode(integer_symbol, bit_count): """ Returns an updated version of the given symbol list with the given symbol encoded into binary. - `symbol_list` - the list onto which to encode the value. - `integer_symbol` - the integer value to be encoded. - `bit_count` - the number of bits from the end of the symbol list to decode. """ assert type(integer_symbol) == int and integer_symbol >= 0, "The given symbol must be an integer greater than or equal to zero." # Convert the symbol into a bit string. bit_string = bin(integer_symbol) # Strip off any '0b' prefix. if bit_string.startswith('0b'): bit_string = bit_string[2:] # end if # Convert the string into a list of integers. bits = [int(bit) for bit in list(bit_string)] # Check that the number of bits is not bigger than the given bit count. bits_length = len(bits) assert bit_count >= bits_length, \ "The given %d bits to encode with is not enough to encode %d bits." % \ (bit_count, bits_length) # Calculate how many bits we need to pad the bit string with, if any, and pad with zeros. pad_list = [0 for i in xrange(0, bit_count - bits_length)] # Return the newly created bit list, with the zero padding first. symbol_list = pad_list + bits return symbol_list
5,329,367
def superkick(update, context): """Superkick a member from all rooms by replying to one of their messages with the /superkick command.""" bot = context.bot user_id = update.message.from_user.id boot_id = update.message.reply_to_message.from_user.id username = update.message.reply_to_message.from_user.name admin = _admin(user_id) if not admin: return _for_admin_only_message(bot, user_id, username) in_crab_wap = _in_group(context, user_id, config["GROUPS"]["crab_wiv_a_plan"]) in_tutorial = _in_group(context, user_id, config["GROUPS"]["tutorial"]) in_video_stars = _in_group(context, user_id, config["GROUPS"]["video_stars"]) if in_crab_wap: bot.kick_chat_member(chat_id=config["GROUPS"]["crab_wiv_a_plan"], user_id=boot_id) bot.restrict_chat_member(chat_id=config["GROUPS"]["crab_wiv_a_plan"], user_id=boot_id, can_send_messages=True, can_send_media_messages=True, can_add_web_page_previews=True, can_send_other_messages=True) if in_tutorial: bot.kick_chat_member(chat_id=config["GROUPS"]["tutorial"], user_id=boot_id) bot.restrict_chat_member(chat_id=config["GROUPS"]["tutorial"], user_id=boot_id, can_send_messages=True, can_send_media_messages=True, can_add_web_page_previews=True, can_send_other_messages=True) if in_video_stars: bot.kick_chat_member(chat_id=config["GROUPS"]["video_stars"], user_id=boot_id) bot.restrict_chat_member(chat_id=config["GROUPS"]["video_stars"], user_id=boot_id, can_send_messages=True, can_send_media_messages=True, can_add_web_page_previews=True, can_send_other_messages=True) remove_member(boot_id) the_message = '{} has been *SUPER KICKED* from Crab Wiv A Plan, Tutorial Group, and VideoStars.' \ .format(escape_markdown(username)) bot.send_message(chat_id=config["GROUPS"]["boot_channel"], text=the_message, parse_mode='MARKDOWN') bot.delete_message(chat_id=update.message.chat_id, message_id=update.message.message_id)
5,329,368
def encrypt_session( signer: typing.Type[Fernet], session_id: str, current_time: typing.Optional[typing.Union[int, datetime]] = None, ) -> str: """An utility for generating a token from the passed session id. :param signer: an instance of a fernet object :param session_id: a user session id :param current_time: a datetime object or timestamp indicating the time of the session id encryption. By default, it is now """ if current_time is None: current_time = pendulum.now() if isinstance(current_time, datetime): current_time = current_time.timestamp() return signer.encrypt_at_time(session_id.encode("utf-8"), int(current_time)).decode( "utf-8" )
5,329,369
def log_params_to_mlflow( config: Dict[str, Any], prefix: Optional[str] = None, ) -> None: """Log parameters to MLFlow. Allows nested dictionaries.""" nice_config = to_dot(config, prefix=prefix) # mlflow can only process 100 parameters at once keys = sorted(nice_config.keys()) batch_size = 100 for start in range(0, len(keys), batch_size): mlflow.log_params({k: nice_config[k] for k in keys[start:start + batch_size]})
5,329,370
def delete_variants_task(req): """Perform the actual task of removing variants from the database after receiving an delete request Accepts: req(flask.request): POST request received by server """ db = current_app.db req_data = req.json dataset_id = req_data.get("dataset_id") samples = req_data.get("samples") updated, removed = variant_deleter(db, dataset_id, samples) if updated + removed > 0: update_dataset(database=db, dataset_id=dataset_id, samples=samples, add=False) LOG.info(f"Number of updated variants:{updated}. Number of deleted variants:{removed}")
5,329,371
def construct_chargelst(nsingle): """ Makes list of lists containing Lin indices of the states for given charge. Parameters ---------- nsingle : int Number of single particle states. Returns ------- chargelst : list of lists chargelst[charge] gives a list of state indices for given charge, chargelst[charge][ind] gives state index. """ nmany = np.power(2, nsingle) chargelst = [[] for _ in range(nsingle+1)] # Iterate over many-body states for j1 in range(nmany): state = integer_to_binarylist(j1, nsingle) chargelst[sum(state)].append(j1) return chargelst
5,329,372
def parse(json_string): """Constructs the Protocol from the JSON text.""" try: json_data = json.loads(json_string) except: raise ProtocolParseException('Error parsing JSON: %s' % json_string) # construct the Avro Protocol object return make_avpr_object(json_data)
5,329,373
def create_script_dict(allpacks, path, file, skip_lines): """Create script dict or skips file if resources cannot be made""" allpacks["name"] = "FILL" allpacks["title"] = "FILL" allpacks["description"] = "FILL" allpacks["citation"] = "FILL" allpacks["licenses"] = [{"name": "FILL"}] allpacks["keywords"] = [] allpacks["homepage"] = "FILL" allpacks["version"] = "1.0.0" try: resources = create_resources(os.path.join(path, file), skip_lines) except: print("Skipped file: " + file) return allpacks.setdefault("resources", []).append(resources) allpacks["retriever"] = "True" allpacks["retriever_minimum_version"] = "2.1.0" return allpacks
5,329,374
def get_architecture(model_config: dict, feature_config: FeatureConfig, file_io): """ Return the architecture operation based on the model_config YAML specified """ architecture_key = model_config.get("architecture_key") if architecture_key == ArchitectureKey.DNN: return DNN(model_config, feature_config, file_io).get_architecture_op() elif architecture_key == ArchitectureKey.LINEAR: # Validate the model config num_dense_layers = len([l for l in model_config["layers"] if l["type"] == "dense"]) if num_dense_layers == 0: raise ValueError("No dense layers were specified in the ModelConfig") elif num_dense_layers > 1: raise ValueError("Linear model used with more than 1 dense layer") else: return DNN(model_config, feature_config, file_io).get_architecture_op() elif architecture_key == ArchitectureKey.RNN: raise NotImplementedError else: raise NotImplementedError
5,329,375
def get_properties_dict(serialized_file: str, sparql_file: str, repository: str, endpoint: str, endpoint_type: str, limit: int = 1000) -> ResourceDictionary: """ Return a ResourceDictionary with the list of properties in the ontology :param serialized_file: The file where the properties ResourceDictionary is serialized :param sparql_file: The file containing the SPARQL query :param repository: The repository containing the ontology :param endpoint: The SPARQL endpoint :param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called) :param limit: The sparql query limit :return: A ResourceDictionary with the list of properties in the ontology """ global_properties_dict = deserialize(serialized_file) if global_properties_dict: return global_properties_dict global_properties_dict = ResourceDictionary() global_properties_dict.add(RDF.type) properties_sparql_query = open(sparql_file).read() properties_sparql_query_template = Template(properties_sparql_query + " limit $limit offset $offset ") for rdf_property in get_sparql_results(properties_sparql_query_template, ["property"], endpoint, repository, endpoint_type, limit): global_properties_dict.add(rdf_property[0]) serialize(global_properties_dict, serialized_file) return global_properties_dict
5,329,376
def get_duplicate_sample_ids(taxonomy_ids): """Get duplicate sample IDs from the taxonomy table. It happens that some sample IDs are associated with more than taxon. Which means that the same sample is two different species. This is a data entry error and should be removed. Conversely, having more than one sample for a taxon is fine; it's just oversampling and will be handled later. """ taxonomy_ids['times'] = 0 errors = taxonomy_ids.groupby('sample_id').agg( {'times': 'count', 'sci_name': ', '.join}) errors = errors.loc[errors.times > 1, :].drop(['times'], axis='columns') sci_names = errors.sci_name.str.split(r'\s*[;,]\s*', expand=True) id_cols = {i: f'sci_name_{i + 1}' for i in sci_names.columns} sci_names = sci_names.rename(columns=id_cols) errors = pd.concat([errors, sci_names], axis='columns').drop( ['sci_name'], axis=1) return errors
5,329,377
def get_settings_text(poll): """Compile the options text for this poll.""" text = [] locale = poll.user.locale text.append(i18n.t('settings.poll_type', locale=locale, poll_type=translate_poll_type(poll.poll_type, locale))) text.append(i18n.t('settings.language', locale=locale, language=poll.locale)) if poll.anonymous: text.append(i18n.t('settings.anonymous', locale=locale)) else: text.append(i18n.t('settings.not_anonymous', locale=locale)) if poll.due_date: text.append(i18n.t('settings.due_date', locale=locale, date=poll.get_formatted_due_date())) else: text.append(i18n.t('settings.no_due_date', locale=locale)) if poll.results_visible: text.append(i18n.t('settings.results_visible', locale=locale)) else: text.append(i18n.t('settings.results_not_visible', locale=locale)) text.append('') if poll.allow_new_options: text.append(i18n.t('settings.user_options', locale=locale)) else: text.append(i18n.t('settings.no_user_options', locale=locale)) if poll.results_visible: if poll.show_percentage: text.append(i18n.t('settings.percentage', locale=locale)) else: text.append(i18n.t('settings.no_percentage', locale=locale)) if poll.has_date_option(): if poll.european_date_format: text.append(i18n.t('settings.euro_date_format', locale=locale)) else: text.append(i18n.t('settings.us_date_format', locale=locale)) text.append('') # Sorting of user names if poll.poll_type == PollType.doodle.name: sorting_name = i18n.t(f'sorting.doodle_sorting', locale=locale) text.append(i18n.t('settings.user_sorting', locale=locale, name=sorting_name)) elif not poll.anonymous: sorting_name = i18n.t(f'sorting.{poll.user_sorting}', locale=locale) text.append(i18n.t('settings.user_sorting', locale=locale, name=sorting_name)) sorting_name = i18n.t(f'sorting.{poll.option_sorting}', locale=locale) text.append(i18n.t('settings.option_sorting', locale=locale, name=sorting_name)) return '\n'.join(text)
5,329,378
def resize_pic(pic_path, new_width, new_height): """缩放图像并保存副本 """ # 图像缩放 img = Image.open(pic_path) img = img.resize((new_width, new_height), Image.ANTIALIAS) # 副本路径定义(去掉原有扩展名, 加上_resize.png后缀) new_pic_path = os.path.splitext(pic_path)[0] + '_resize.png' # 保存副本 img.save(new_pic_path)
5,329,379
def download_model(model_path, region): """ Downloads a model to a local file. MODEL_PATH The path to download the model to, ending with the name of the model. """ impl_download_model(model_path, ensure_s3_bucket(region), region)
5,329,380
def acd(strymobj= None, window_size=30, plot_iteration = False, every_iteration = 200, plot_timespace = True, save_timespace = False, wave_threshold = 50.0, animation = False, title = 'Average Centroid Distance', **kwargs): """ Average Centroid Distance Algorithm for calculating stop-and-go wavestrength from `acd` implements average centroid distance algorithm to find out the stop-and-go distance traveled based on the given threshold. Parameters ------------- strymobj: `strymread` A valid stymread object window_size: `int` Window size over which to form the cluster of data points on speed-acceleration phasespace plot_iteration: `bool` If `True` plots the intermediate phase-space plots of speed-acceleration phasespace for the `window_size` and distribution of centroid distances every_iteration: `int` If `plot_iteration` is true, then plot the intermediate figures every `every_iteration` iteration plot_timespace: `bool` If `True` plots and save timespace diagram of wavestrength for the given drive. save_timespace: `bool` If `True` save the timespace diagram to the disk wave_threshold: `double` The value of threshold of wavestrength above which classify the driving mode as stop-and-go. It defaults to the value of 50. animation: `bool` If `True` produces animation of phasespace evolving with the time title: `str` Desire plot title for phasespace animation image_path: `str` Path on the disk where to store phasespace animation Returns ---------- `pandas.DataFrame` Returns Pandas Data frame consisting of WaveStrength column as a timeseries `double` Returns stop-and-go distance measured based on the `wave_threshold` in meters """ # Check strymread object was able to successfully read the if strymobj is not None: if not strymobj.success: print("Invalid/Errored strymread object supplied. Check if supplied datafile to strymread is valid.") return None file_name = strymobj.csvfile file_name = file_name.split('/')[-1][0:-4] ## Get the speed speed = strymobj.speed() if speed.shape[0] == 0: print("No speed data found\n") return None elif speed.shape[0] < 10: print("Speed data too low. Skipping ...\n") return None ### Convert speed to m/s speed['Message'] = speed['Message']*0.277778 position = strymread.integrate(speed) # Get the position ## Get the longitudinal acceleration accelx = strymobj.accelx() if accelx.shape[0] == 0: print("No Acceleration data found\n") return None elif accelx.shape[0] < 10: print("Acceleration data too low. Skipping ...\n") return None else: file_name = '' speed = kwargs.get("speed", None) if speed is None: print("No speed data provided. Skipping ...\n") return None accelx = kwargs.get("accelx", None) if accelx is None: print("No longitudinal data provided. Skipping ...\n") return None speed_unit = kwargs.get("speed_unit", "km/h") if speed_unit.lower() not in ["km/h", "m/s"]: print("Unrecognized speed unit '{}'. Provide speed unit in km/h or m/s\n".format(speed_unit)) return None if speed_unit.lower() == "km/h": ### Convert speed to m/s speed['Message'] = speed['Message']*0.277778 elif speed_unit.lower() == "m/s": print("INFO: Speed unit is m/s") position = kwargs.get("position", None) if position is None: position = strymread.integrate(speed) # strymread.plt_ts(speed, title="Original Speed (m/s)") # strymread.plt_ts(position, title="Original Position (m)") # strymread.plt_ts(accelx, title="Original Accel (m/s^2)") # Synchronize speed and acceleration for common time points with a rate of 20 Hz rate = kwargs.get("rate", 20) speed_resampled, accel_resampled = strymread.ts_sync(speed, accelx, rate=rate, method = "nearest") position_resampled, _ = strymread.ts_sync(position, accelx, rate=rate, method = "nearest") # strymread.plt_ts(speed_resampled, title="Resampled Speed (m/s)") # strymread.plt_ts(position_resampled, title="Resampled position (m)") # strymread.plt_ts(accel_resampled, title="Resampled Accel (m/s^2)") assert ((speed_resampled.shape[0] == accel_resampled.shape[0]) and (position_resampled.shape[0]==accel_resampled.shape[0])), "Synchronization Error" df = speed_resampled.copy(deep=True) df["Speed"] = speed_resampled["Message"] df["Accelx"] = accel_resampled["Message"] df["Position"] = position_resampled["Message"] df.drop(columns=["Message"], inplace=True) if df.shape[0] < 3: print("Extremely low sample points in synced-resampled data to obtain any meaningful measure. Skipping ...") return None DeltaT = np.mean(df['Time'].diff()) #print(1./DeltaT) n_Sample_WS = int((1/DeltaT)*window_size) # Number of samples for window_size print("Number of samples for {} seconds: {}".format(window_size, n_Sample_WS)) df.index = np.arange(0, df.shape[0]) #print(n_Sample_WS) df['wavestrength'] = 0 df['EllipseFit_semimajor_axis_len'] = 0 df['EllipseFit_semiminor_axis_len'] = 0 df['Goodness_of_Ellipse_Fit'] = 0 count = 0 # Save images in /tmp folder dy default dt_object = datetime.datetime.fromtimestamp(time.time()) dt = dt_object.strftime('%Y-%m-%d-%H-%M-%S-%f') image_path = kwargs.get("image_path", "/tmp") image_path = image_path + '/WaveStrength_' + dt if animation: if not os.path.exists(image_path): try: os.mkdir(image_path) except OSError: print("[ERROR] Failed to create the image folder {0}.".format(image_path)) figure_count = 1 for r, row in df.iterrows(): if r <=n_Sample_WS: continue df_tempWS = df[r-n_Sample_WS-1:r-1] velocity_tempWS = pd.DataFrame() velocity_tempWS['Time'] = df_tempWS['Time'] velocity_tempWS['Message'] = df_tempWS['Speed'] accel_tempWS = pd.DataFrame() accel_tempWS['Time'] = df_tempWS['Time'] accel_tempWS['Message'] = df_tempWS['Accelx'] ps = phasespace(dfx=velocity_tempWS, dfy=accel_tempWS, resample_type="first", verbose=False) if np.all(velocity_tempWS['Message'] == 0) or np.all(accel_tempWS['Message'] == 0): z1 = 0 z2 = 0 r1 = 0 r2 = 0 phi = 0 residual = 0 else: z1, z2, r1, r2, phi, residual = ellipse_fit(x = velocity_tempWS['Message'].to_numpy(), y = accel_tempWS['Message'].to_numpy()) count = count + 1 if plot_iteration or animation: if count % every_iteration == 0: count = 0 print("--------------------------------------------------------------") print('Time Range: {} to {}'.format(accel_tempWS['Time'].iloc[0], accel_tempWS['Time'].iloc[-1])) #fig, ax = strymread.create_fig() fig, ax = _acdplots() strymread.plt_ts(speed_resampled, ax = ax[0], show = False, title = "Speed") strymread.plt_ts(accel_resampled, ax = ax[1], show = False, title="Acceleration") # Create a Rectangle patch that represents window of the iteration rect = patches.Rectangle((velocity_tempWS['Time'].iloc[0], np.min(speed_resampled['Message'])),\ np.abs(velocity_tempWS['Time'].iloc[-1] - velocity_tempWS['Time'].iloc[0]),\ np.max(speed_resampled['Message']) - np.min(speed_resampled['Message']),\ linewidth=4,edgecolor='g',facecolor='none') ax[0].add_patch(rect) rect = patches.Rectangle((accel_tempWS['Time'].iloc[0], np.min(accel_resampled['Message'])),\ np.abs(accel_tempWS['Time'].iloc[-1] - accel_tempWS['Time'].iloc[0]),\ np.max(accel_resampled['Message']) - np.min(accel_resampled['Message']),\ linewidth=4,edgecolor='g',facecolor='none') ax[1].add_patch(rect) ax1 = ps.phaseplot(title='Phase-space plot',\ xlabel='Speed', ylabel='Acceleration', plot_each = True, ax = [ax[2], ax[3], ax[4]], show = False, fig = fig) subtext = 'Time Window: ['+\ str(accel_tempWS['Time'].iloc[0]) + ', ' + str(accel_tempWS['Time'].iloc[-1])+']\n' + file_name +'\n' ax[2].xaxis.label.set_size(35) ax[3].xaxis.label.set_size(35) ax[4].xaxis.label.set_size(35) ax[2].yaxis.label.set_size(35) ax[3].yaxis.label.set_size(35) ax[4].yaxis.label.set_size(35) ax[2].title.set_fontsize(40) ax[3].title.set_fontsize(40) ax[4].title.set_fontsize(40) ax[4].set_xlim(np.min(speed_resampled['Message'])-2.0, np.max(speed_resampled['Message'])+ 2.0) ax[4].set_ylim(np.min(accel_resampled['Message'])-2.0, np.max(accel_resampled['Message'])+ 2.0) ax[4].set_aspect('equal', adjustable='box') c1= patches.Ellipse((z1, z2), r1*2,r2*2, angle = math.degrees(phi), color='g', fill=False, linewidth = 5) ax[4].add_artist(c1) ax2 = ps.centroidplot( xlabel='Centroid Distance', ylabel='Counts', ax = ax[5], show = False) plt.subplots_adjust(wspace=0, hspace=0) plt.tight_layout() my_suptitle = fig.suptitle(title + '\n' + subtext, y = 1.06) if animation: figure_file_name = image_path + '/' + "wave_strength_{:06d}.png".format(figure_count) fig.savefig(figure_file_name, dpi = 100,bbox_inches='tight',bbox_extra_artists=[my_suptitle]) figure_count = figure_count + 1 if plot_iteration: plt.show() else: fig.clear() plt.close(fig) print("Average Centroid Distane of cluster is {}".format(ps.acd)) #df.iloc[df_tempWS.index[-1], df.columns.get_loc('wavestrength') ] = ps.acd df['wavestrength'].iloc[df_tempWS.index[-1]] = ps.acd #df.iloc[df_tempWS.index[-1], df.columns.get_loc('EllipseFit_semimajor_axis_len') ] = r1 #df.iloc[df_tempWS.index[-1], df.columns.get_loc('EllipseFit_semiminor_axis_len') ] = r2 #df.iloc[df_tempWS.index[-1], df.columns.get_loc('Goodness_of_Ellipse_Fit') ] = residual df['EllipseFit_semimajor_axis_len'].iloc[df_tempWS.index[-1]] = r1 df['EllipseFit_semiminor_axis_len'].iloc[df_tempWS.index[-1]] = r2 df['Goodness_of_Ellipse_Fit'].iloc[df_tempWS.index[-1]] = residual if animation: figdirs = os.listdir(image_path) figdirs.sort() video_name = 'wave_strength' + dt + '.mp4' import ffmpeg ( ffmpeg .input(image_path + '/*.png', pattern_type='glob', framerate=5) .output(video_name) .run() ) # Filter out data for which strong wave was detected high_wave = df[df['wavestrength'] > wave_threshold] # high_wave now is discontinuous in Time column, use this information to create separate # continuous chunks and over which we calculate the total distance high_wave_chunk = strymread.create_chunks(high_wave, continuous_threshold=0.1, \ column_of_interest = 'Time', plot = False) # stop_ang_go_distance = 0.0 # for c in high_wave_chunk: # d = c['Position'][-1] - c['Position'][0] # stop_ang_go_distance = stop_ang_go_distance + d stop_ang_go_distance = 0.0 for c in high_wave_chunk: pos_temp = strymread.integrate(c, msg_axis="Speed") stop_ang_go_distance = stop_ang_go_distance + pos_temp['Message'][-1] if plot_timespace or save_timespace: fig, ax = strymread.create_fig(nrows = 4, ncols=1) im = ax[0].scatter(df['Time'], df['Position'], c=np.log(df['wavestrength']+1), cmap=strymread.sunset, s=3) im2 = ax[1].scatter(df['Time'], df['Position'], c=df['Speed'], cmap=strymread.sunset, s=3) im3 = ax[2].scatter(df['Time'], df['Speed'], c=df['Time'], cmap=strymread.sunset, s=3) im4 = ax[3].scatter(df['Time'], df['wavestrength'], c=df['Time'], cmap=strymread.sunset, s=3) cbr= strymread.set_colorbar(fig = fig, ax = ax[0], im = im, label = "log(wavestrength+1)") ax[0].set_xlabel('Time') ax[0].set_ylabel('Position') ax[0].set_title('Time-Space Diagram with log(wavestrength+1) as color map') cbr= strymread.set_colorbar(fig = fig, ax = ax[1], im = im2, label = "speed") ax[1].set_xlabel('Time') ax[1].set_ylabel('Position') ax[1].set_title('Time-Space Diagram with speed as color map') cbr= strymread.set_colorbar(fig = fig, ax = ax[2], im = im3, label = "Time") ax[2].set_xlabel('Time') ax[2].set_ylabel('Speed') ax[2].set_title('Time-Speed Diagram with Time as color map') cbr= strymread.set_colorbar(fig = fig, ax = ax[3], im = im4, label = "Time") ax[3].set_xlabel('Time') ax[3].set_ylabel('wavestrength') ax[3].set_title('Time-WaveStrength Diagram with Time as color map') dt_object = datetime.datetime.fromtimestamp(time.time()) dt = dt_object.strftime('%Y-%m-%d-%H-%M-%S-%f') if save_timespace: file_to_save = "ACD_"+ file_name + "_time_space_diagram_" + dt fig.savefig(file_to_save + ".png", dpi = 100) fig.savefig(file_to_save + ".pdf", dpi = 100) if plot_timespace: plt.show() else: plt.close(fig) return df, stop_ang_go_distance
5,329,381
def pass_none(func): """ Wrap func so it's not called if its first param is None >>> print_text = pass_none(print) >>> print_text('text') text >>> print_text(None) """ @functools.wraps(func) def wrapper(param, *args, **kwargs): if param is not None: return func(param, *args, **kwargs) return wrapper
5,329,382
def _clean_dataframe(dataframe, required_columns=None, lower_case_col_names=True): """ inputs: dataframe: pandas dataframe required_columns: list of column names that must have a non-NA values lower_case_col_names: should column names be modified to lower case Modifies dataframe in place. The following rows removed: fully empty (all fields have NA values) containing required_columns with 1 or more NA values """ dataframe.dropna(how="all", inplace=True) if required_columns is not None and len(required_columns) > 0: dataframe.dropna(how="any", subset=required_columns, inplace=True) if lower_case_col_names: dataframe.columns = [x.lower() for x in dataframe.columns]
5,329,383
def test_no_update_available_with_no_update_string_and_color_no_updates( fake_qtile, fake_window ): """ test output with no update (with dedicated string and color) """ cu4 = CheckUpdates(distro=good_distro, custom_command=cmd_0_line, no_update_string=nus, colour_no_updates="#654321" ) fakebar = FakeBar([cu4], window=fake_window) cu4._configure(fake_qtile, fakebar) text = cu4.poll() assert text == nus assert cu4.layout.colour == cu4.colour_no_updates
5,329,384
def _p_qrs_tconst(pattern, pwave): """ Temporal constraints of the P Wave wrt the corresponding QRS complex """ BASIC_TCONST(pattern, pwave) obseq = pattern.obs_seq idx = pattern.get_step(pwave) if idx == 0 or not isinstance(obseq[idx - 1], o.QRS): return qrs = obseq[idx - 1] tnet = pattern.last_tnet tnet.add_constraint(pwave.start, pwave.end, PW_DURATION) # PR interval tnet.add_constraint(pwave.start, qrs.start, N_PR_INTERVAL) tnet.set_before(pwave.end, qrs.start)
5,329,385
def test_trunc(): """ Test trunc """ assert trunc('', 3) == '' assert trunc('foo', 3) == 'foo' assert trunc('foobar', 6) == 'foobar' assert trunc('foobar', 5) == 'fo...'
5,329,386
def create_values_key(key): """Creates secondary key representing sparse values associated with key.""" return '_'.join([key, VALUES_SUFFIX])
5,329,387
def make_mask(variable, **flags): """ Return a mask array, based on provided flags For example: make_mask(pqa, cloud_acca=False, cloud_fmask=False, land_obs=True) OR make_mask(pqa, **GOOD_PIXEL_FLAGS) where GOOD_PIXEL_FLAGS is a dict of flag_name to True/False :param variable: :type variable: xarray.Dataset or xarray.DataArray :param flags: list of boolean flags :return: """ flags_def = get_flags_def(variable) mask, mask_value = create_mask_value(flags_def, **flags) return variable & mask == mask_value
5,329,388
def _normalize_block_comments(content: str) -> str: """Add // to the beginning of all lines inside a /* */ block""" comment_partitions = _partition_block_comments(content) normalized_partitions = [] for partition in comment_partitions: if isinstance(partition, Comment): comment = partition normalized_comment_lines = [] comment_lines = comment.splitlines(keepends=True) normalized_comment_lines.append(comment_lines[0]) for line in comment_lines[1:]: if line.lstrip().startswith("//"): normalized_line = line else: normalized_line = f"// {line}" normalized_comment_lines.append(normalized_line) normalized_comment = f'/*{"".join(normalized_comment_lines)}*/' normalized_partitions.append(normalized_comment) else: normalized_partitions.append(partition) normalized_content = "".join(normalized_partitions) return normalized_content
5,329,389
def check_holidays(date_start, modified_end_date, holidays): """ Here app check if holidays in dates of vacation or not. If Yes - add days to vacation, if Not - end date unchangeable """ # first end date for check loop because end date move +1 for every weekend date_1 = datetime.strptime(date_start, '%d.%m.%Y') # start date # second end date (after add holidays) date_2 = datetime.strptime(modified_end_date, '%d.%m.%Y') # third end date for finish date after adding holidays in vacations date_3 = datetime.strptime(modified_end_date, '%d.%m.%Y') # counter for days in vacation x = 0 # loop for check dates in created holidays list for i in holidays: if date_1 <= datetime.strptime(i, '%d.%m.%Y') <= date_2: print(i) x += 1 date_2 = date_2 + timedelta(days=1) print(x) # adding counter to first end date date_end = date_3 + timedelta(days=x) date_end = datetime.strftime(date_end, '%d.%m.%Y') return date_end
5,329,390
def getHouseholdProfiles( n_persons, weather_data, weatherID, seeds=[0], ignore_weather=True, mean_load=True, cores=mp.cpu_count() - 1, ): """ Gets or creates the relevant occupancy profiles for a building simulation or optimization. Parameters ---------- n_persons: integer, required Number of persons living in a single appartment. weather_data: pd.DataFrame(), required A time indexed pandas dataframe containing weather data with the GHI as a column. weatherID: str, required Giving an ID to the weather data to identify the resulting profile. seeds: list, optional (default: [0]) List of integer seeds to create a number of profiles which have similar input parameters, but a varying output. Default, no seed is chosen. ignore_weather: bool, optional (default: False) Since atm only the GHI is required for the electricity load profile, the weather plays a minor role and can be ignored by the identificaiton of profiles. mean_load: bool, optional (default: True) Decides if the created load profiles on 1-minute basis shall be downsampled by taking the mean of 60 minutes or the first value in every 60 minutes. cores: int, optiona(default: mp.cpu_count() - 1) Number of cores used for profile generation. """ # get the potential profile names filenames = {} for seed in seeds: profile_ID = "Profile" + "_occ" + str(int(n_persons)) + "_seed" + str(seed) if not ignore_weather: profile_ID = profile_ID + "_wea" + str(weatherID) if mean_load: profile_ID = profile_ID + "_mean" filenames[seed] = os.path.join( PATH, "results", "occupantprofiles", profile_ID + ".csv" ) # check how many profiles do not exist# not_existing_profiles = {} for seed in seeds: if not os.path.isfile(filenames[seed]): not_existing_profiles[seed] = filenames[seed] # info about runtime if cores < 1: warnings.warn('Recognized cores are less than one. The code will behave as the number is one.') cores = 1 _runtime = np.floor(float(len(not_existing_profiles))/cores) _log_str = str(len(not_existing_profiles)) + " household profiles need to get calculated. \n" _log_str += "With " + str(cores) + " threads, the estimated runtime is " + str(_runtime) + " minutes." logging.info(_log_str) # run in parallel all profiles if len(not_existing_profiles) > 1: new_profiles = simHouseholdsParallel( int(n_persons), weather_data.index[0].year, len(not_existing_profiles), singleProfiles=True, weather_data=weather_data, get_hot_water=True, resample_mean=mean_load, cores=cores, ) # if single profile just create one profile and avoid multiprocessing elif len(not_existing_profiles) > 0: one_profile = simSingleHousehold( int(n_persons), weather_data.index[0].year, weather_data=weather_data, get_hot_water=True, resample_mean=mean_load, ) new_profiles = [one_profile] # write results to csv files for i, seed in enumerate(not_existing_profiles): new_profiles[i].to_csv(not_existing_profiles[seed]) # load all profiles profiles = [] for seed in seeds: profile = pd.read_csv(filenames[seed], index_col=0) # TODO get a proper indexing in tsorb based on the weather data profile.index = weather_data.index profiles.append(profile) return profiles
5,329,391
def find_centroid(restaurants): """Return the centroid of the locations of RESTAURANTS.""" "*** YOUR CODE HERE ***"
5,329,392
def get_local_ffmpeg() -> Optional[Path]: """ Get local ffmpeg binary path. ### Returns - Path to ffmpeg binary or None if not found. """ ffmpeg_path = Path( get_spotdl_path(), "ffmpeg" + ".exe" if platform.system() == "Windows" else "" ) if ffmpeg_path.is_file(): return ffmpeg_path return None
5,329,393
def remaining_time(trace, event): """Calculate remaining time by event in trace :param trace: :param event: :return: """ # FIXME using no timezone info for calculation event_time = event['time:timestamp'].strftime("%Y-%m-%dT%H:%M:%S") last_time = trace[-1]['time:timestamp'].strftime("%Y-%m-%dT%H:%M:%S") try: delta = dt.strptime(last_time, TIME_FORMAT) - dt.strptime(event_time, TIME_FORMAT) except ValueError: # Log has no timestamps return 0 return delta.total_seconds()
5,329,394
def _water_vapor_pressure_difference(temp, wet_bulb_temp, vap_press, psych_const): """ Evaluate the psychrometric formula e_l - (e_w - gamma * (T_a - T_w)). Parameters ---------- temp : numeric Air temperature (K). wet_bulb_temp : numeric Wet-bulb temperature (K). vap_press : numeric Vapor pressure (Pa). psych_const : numeric Psychrometric constant (Pa K-1). Returns ------- wat_vap_press_diff : numeric Water vapor pressure difference (Pa). """ sat_vap_press_wet_bulb = saturation_vapor_pressure(wet_bulb_temp) return vap_press - (sat_vap_press_wet_bulb - psych_const * (temp - wet_bulb_temp))
5,329,395
def bank_left(midiout): """ Will send the note 46, that shifts the bank left """ # Send the message message = mido.Message('note_on', note=46, velocity = 127) midiout.send(message)
5,329,396
def _service_description_required(func): """ Decorator for checking whether the service description is available on a device's service. """ @wraps(func) def wrapper(service, *args, **kwargs): if service.description is None: raise exceptions.NotRetrievedError('No service description retrieved for this service.') elif service.description == exceptions.NotAvailableError: return return func(service, *args, **kwargs) return wrapper
5,329,397
def _copy_cudd_license(args): """Include CUDD's license in wheels.""" path = args.cudd if args.cudd else CUDD_PATH license = os.path.join(path, 'LICENSE') included = os.path.join('dd', 'CUDD_LICENSE') yes = ( args.bdist_wheel and getattr(args, 'cudd') is not None) if yes: shutil.copyfile(license, included) elif os.path.isfile(included): os.remove(included)
5,329,398
def put_all_macros(config, data): """ Update macros in Zendesk. :param config: context config :param data: the macro data to PUT """ entries = ( 'title', 'active', 'actions', 'restriction', 'description', 'attachments' ) succeeded = [] failed = [] with click.progressbar(length=len(data), label='Updating macros...') as bar: for m in data: macro = {'macro': {k: m[k] for k in m if k in entries}} url = f"https://{config['subdomain']}.zendesk.com/api/v2/macros/{m['id']}.json" try: put(config, url, macro) succeeded.append(m['id']) except click.ClickException as err: failed.append((m['id'], err.message)) # Record failures bar.update(1) click.secho('\n\nUpdate complete!') if succeeded: click.secho('\nThe following macros were updated: ', fg='green', bold=True) for s in succeeded: click.secho(s, fg='green') if failed: click.secho('\nThe following macros could not be updated: ', fg='red', bold=True) for f in failed: click.secho(f"{f[0]} ({f[1]})", fg='red')
5,329,399