content
stringlengths
22
815k
id
int64
0
4.91M
def rytz_axis_construction(d1: Vector, d2: Vector) -> Tuple[Vector, Vector, float]: """ The Rytz’s axis construction is a basic method of descriptive Geometry to find the axes, the semi-major axis and semi-minor axis, starting from two conjugated half-diameters. Source: `Wikipedia <https://en.m.wikipedia.org/wiki/Rytz%27s_construction>`_ Given conjugated diameter `d1` is the vector from center C to point P and the given conjugated diameter `d2` is the vector from center C to point Q. Center of ellipse is always ``(0, 0, 0)``. This algorithm works for 2D/3D vectors. Args: d1: conjugated semi-major axis as :class:`Vector` d2: conjugated semi-minor axis as :class:`Vector` Returns: Tuple of (major axis, minor axis, ratio) """ Q = Vector(d1) # vector CQ P1 = Vector(d2).orthogonal(ccw=False) # vector CP', location P' D = P1.lerp(Q) # vector CD, location D, midpoint of P'Q radius = D.magnitude radius_vector = (Q - P1).normalize(radius) # direction vector P'Q A = D - radius_vector # vector CA, location A B = D + radius_vector # vector CB, location B if A.isclose(NULLVEC) or B.isclose(NULLVEC): raise ArithmeticError('Conjugated axis required, invalid source data.') major_axis_length = (A - Q).magnitude minor_axis_length = (B - Q).magnitude if math.isclose(major_axis_length, 0.) or math.isclose(minor_axis_length, 0.): raise ArithmeticError('Conjugated axis required, invalid source data.') ratio = minor_axis_length / major_axis_length major_axis = B.normalize(major_axis_length) minor_axis = A.normalize(minor_axis_length) return major_axis, minor_axis, ratio
5,341,300
def _join_ljust(words, width=9): """join list of str to fixed width, left just""" return ' '.join(map(lambda s: s.ljust(width), words)).strip()
5,341,301
def construct_dictionaries(color,marker,size, scatter_ecolor='k', alpha=1.0, fill_scatter=False, elinewidth=1,capsize=0): """ Example usage: halo_kws = construct_dictionaries('k','o', 20, alpha=.3) pltabund.plot_XFe_XFe(ax, 'K', 'Mg', roed, plot_xlimit=True, plot_ylimit=True, label="Halo", **halo_kws) """ e_kws = {'ecolor':color,'elinewidth':elinewidth,'capsize':capsize} if fill_scatter: ulkws = {'arrow_length':0.25, 'scatter_kws':{'marker':marker,'s':size,'facecolor':color,'alpha':alpha}, 'arrow_kws':{'color':color,'head_length':0.15,'head_width':0.05} } else: ulkws = {'arrow_length':0.25, 'scatter_kws':{'marker':marker,'s':size,'facecolor':'none', 'linewidths':1,'edgecolors':color,'alpha':alpha}, 'arrow_kws':{'color':color,'head_length':0.15,'head_width':0.05} } kws = {'color':color,'edgecolors':scatter_ecolor,'marker':marker,'s':size,'alpha':alpha, 'e_kws':e_kws,'ulkws':ulkws} return kws
5,341,302
def to_file(exp_dir, chain): """Write results to file :param exp_dir: directory to export :param chain: Chain object :raise RoutineErr: on file I/O error :return True: on success """ now = datetime.datetime.today().strftime('%Y%m%d-%H%M%S-%f') file_name = os.path.join(exp_dir, 'chains-{}.txt'.format(now)) try: out = open(file_name, 'wt') except OSError: raise RoutineErr('Could not open file: {}'.format(file_name)) if chain.dna1: out.write('>{}-DNA1\n'.format(chain.info)) out.write('{}\n'.format(chain.dna1)) out.write('\n') if chain.dna2: out.write('>{}-DNA2\n'.format(chain.info)) out.write('{}\n'.format(chain.dna2)) out.write('\n') if chain.rna: out.write('>{}-RNA\n'.format(chain.info)) out.write('{}\n'.format(chain.rna)) out.write('\n') if chain.protein: out.write('>{}-protein\n'.format(chain.info)) out.write('{}\n'.format(chain.protein)) out.write('\n') out.close() return True
5,341,303
def override_config(config): """Overrides the above global parameters used by the Agent.""" global BUFFER_SIZE, BATCH_SIZE global GAMMA, TAU, TAU_INCREASE global LR_ACTOR, LR_CRITIC, WEIGHT_DECAY global NOISE_THETA, NOISE_SIGMA global ALPHA, EPSILON_ERROR, MAXIMUM_ERROR global RANDOM_ACTION_PERIOD, MINIMUM_RANDOM_ACTION_PROB global ACTOR_LAYER_SIZES, CRITIC_LAYER_SIZES BUFFER_SIZE = config["buffer_size"] BATCH_SIZE = config["batch_size"] GAMMA = config["gamma"] TAU = config["tau"] TAU_INCREASE = config["tau_increase"] LR_ACTOR = config["lr_actor"] LR_CRITIC = config["lr_critic"] WEIGHT_DECAY = config["weight_decay"] NOISE_THETA = config["noise_theta"] NOISE_SIGMA = config["noise_sigma"] ALPHA = config["alpha"] EPSILON_ERROR = config["epsilon_error"] MAXIMUM_ERROR = config["maximum_error"] RANDOM_ACTION_PERIOD = config["random_action_period"] MINIMUM_RANDOM_ACTION_PROB = config["minimum_random_action_prob"] ACTOR_LAYER_SIZES = config["actor_layer_sizes"] CRITIC_LAYER_SIZES = config["critic_layer_sizes"]
5,341,304
def test_slurm(region, pcluster_config_reader, clusters_factory): """ Test all AWS Slurm related features. Grouped all tests in a single function so that cluster can be reused for all of them. """ scaledown_idletime = 3 max_queue_size = 5 cluster_config = pcluster_config_reader(scaledown_idletime=scaledown_idletime, max_queue_size=max_queue_size) cluster = clusters_factory(cluster_config) remote_command_executor = RemoteCommandExecutor(cluster) _test_slurm_version(remote_command_executor) _test_dynamic_max_cluster_size(remote_command_executor, region, cluster.asg) _test_cluster_limits(remote_command_executor, max_queue_size, region, cluster.asg) _test_job_dependencies(remote_command_executor, region, cluster.cfn_name, scaledown_idletime, max_queue_size) _test_job_arrays_and_parallel_jobs(remote_command_executor, region, cluster.cfn_name, scaledown_idletime) _test_dynamic_dummy_nodes(remote_command_executor, max_queue_size) assert_no_errors_in_logs(remote_command_executor, ["/var/log/sqswatcher", "/var/log/jobwatcher"])
5,341,305
def rename_lab(lab_list: ui.layers.ListLayer, lab): """Rename a lab""" window = ui.get_window() labs = data.get_labs() text_input = ui.layers.TextInputLayer("Rename Lab") text_input.set_prompt(["Enter Lab's new name"]) text_input.set_text(lab.name) window.run_layer(text_input) if not text_input.canceled: lab.name = text_input.get_text() data.write_labs(labs) fill_lab_list(lab_list, labs)
5,341,306
def arrows(m) -> str: """One or more arrows separate by a space""" return m.arrow_list
5,341,307
def hex2int(hex_str): """ Convert 2 hex characters (e.g. "23") to int (35) :param hex_str: hex character string :return: int integer """ return int(hex_str, 16)
5,341,308
def lat_2_R(lat): """Takes a geodetic latitude and puts out the distance from the center of an ellipsoid Earth to the surface Arguments: lat (float): Geodetic Latitude angle [degrees]. Returns: (float): Geocentric distance [km] """ R_polar = 6356.752314245 R_eq = 6378.137 lat = lat / 180 * pi R = sqrt( ((R_eq ** 2 * cos(lat)) ** 2 + (R_polar ** 2 * sin(lat)) ** 2) / ((R_eq * cos(lat)) ** 2 + (R_polar * sin(lat)) ** 2) ) # e = sqrt(1-R_polar**2/R_eq**2) # R = R_eq/sqrt(1-e**2*sin(lat/180*pi)**2) # R = sqrt( ( (R_eq**2*cos(lat))**2 + (R_polar**2*sin(lat))**2 ) / ( (R_eq*cos(lat))**2 + (R_polar*sin(lat))**2 ) ) return R
5,341,309
def compare_records(old_list, new_list): """Compare two lists of SeqRecord objects.""" assert isinstance(old_list, list) assert isinstance(new_list, list) assert len(old_list) == len(new_list) for old_r, new_r in zip(old_list, new_list): if not compare_record(old_r, new_r): return False return True
5,341,310
def embed_owners_wallet(address, asset_name) -> Embed: """Return discord embed of wallet owner""" title = f"{asset_name} is owned by" description = f"`{address}`" color = Colour.blurple() embed = Embed(title=title, description=description, color=color) name = "This address belongs to wallet..." value = f"{POOL_PM_URL}/{address}/0e14267a" embed.add_field(name=name, value=value, inline=False) embed.set_footer(text=f"Data comes from {POOL_PM_URL}") return embed
5,341,311
def Qdist_H_lm_jk_FGH(a, b, c , d, N_x, N_y, h, par, model): """ Parameters ---------- a : TYPE left end point of the interval in x coordinate chosen for solving the time independent schrodinger equation. b : TYPE right end point of the interval in x coordinate chosen for solving the time independent schrodinger equation. c : TYPE left end point of the interval in y coordinate chosen for solving the time independent schrodinger equation. d : TYPE right end point of the interval in y coordinate chosen for solving the time independent schrodinger equation. N_x : TYPE number of grid points in x coordinate, must be an odd integer value. N_y : TYPE number of grid points in y coordinate, must be an odd integer value. h : TYPE reduced planck's constant of the system. par : TYPE parameters of the potential energy function. Returns ------- H_lm_jk : TYPE Hamiltonian matrix which is a discretisation of the Hamiltonian operator, computed using the FGH method. """ dx = (b-a)/(N_x-1) dpx = 2*np.pi / (N_x-1) / dx dy = (d-c)/(N_y-1) dpy = 2*np.pi / (N_y-1) / dy H_lm_jk = np.zeros((N_x*N_y,N_x*N_y)) for i1 in range(N_x*N_y): #print(i1) for i2 in range(i1, N_x*N_y): k = (i2 // N_x) j = (i2 % N_x) m = (i1 // N_x) l = (i1 % N_x) sum_rhalf = sum(np.cos(2*np.pi*np.arange(1, int((N_x+1)/2))*(j-l)/(N_x-1))) sum_r = 2 * sum_rhalf + 1 sum_shalf = sum(np.cos(2*np.pi*np.arange(1, int((N_y+1)/2))*(k-m)/(N_y-1))) sum_s = 2 * sum_shalf + 1 if j == l and k == m: H_lm_jk[m*N_x+l,k*N_x+j] = H_lm_jk[m*N_x+l,k*N_x+j] + 2 * 1/(N_x-1) /(N_y-1)* h**2 * sum((np.arange(1, int((N_x+1)/2))*dpx)**2 / 2 * np.cos(2*np.pi*np.arange(1, int((N_x+1)/2))*(j-l)/(N_x-1))) * sum_s \ + 2 * 1/(N_x-1) /(N_y-1)* h**2 * sum((np.arange(1, int((N_y+1)/2))*dpy)**2 / 2 * np.cos(2*np.pi*np.arange(1, int((N_y+1)/2))*(k-m)/(N_y-1))) * sum_r H_lm_jk[m*N_x+l,k*N_x+j] = H_lm_jk[m*N_x+l,k*N_x+j] + potential_energy_2dof((a+j*dx),(c+k*dy),par, model) H_lm_jk[k*N_x+j,m*N_x+l] = H_lm_jk[m*N_x+l,k*N_x+j] else: H_lm_jk[m*N_x+l,k*N_x+j] = H_lm_jk[m*N_x+l,k*N_x+j] + 2 * 1/(N_x-1) /(N_y-1)* h**2 * sum((np.arange(1, int((N_x+1)/2))*dpx)**2 / 2 * np.cos(2*np.pi*np.arange(1, int((N_x+1)/2))*(j-l)/(N_x-1))) * sum_s \ + 2 * 1/(N_x-1) /(N_y-1)* h**2 * sum((np.arange(1, int((N_y+1)/2))*dpy)**2 / 2 * np.cos(2*np.pi*np.arange(1, int((N_y+1)/2))*(k-m)/(N_y-1))) * sum_r H_lm_jk[k*N_x+j,m*N_x+l] = H_lm_jk[m*N_x+l,k*N_x+j] return H_lm_jk
5,341,312
def test_gateway_bad_protocol(): """Test initializing gateway with a bad protocol_version.""" gateway = Gateway(protocol_version=None) assert gateway.protocol_version == '1.4'
5,341,313
def get_log_configuration_retention_backup_log_days(host_id): """Return LOG_CONFIGURATION_RETENTION_BACKUP_LOG_DAYS""" from django.core.exceptions import ObjectDoesNotExist from physical.models import Host from backup.models import LogConfiguration try: host = Host.objects.get(id=host_id) databaseinfra = host.instances.all()[0].databaseinfra except Exception as e: LOG.warn("Error on get_log_configuration_retention_backup_log_days. Host id: {} - error: {}".format(host_id, e)) return None try: log_configuration = LogConfiguration.objects.get(environment=databaseinfra.environment, engine_type=databaseinfra.engine.engine_type) except ObjectDoesNotExist: return None return log_configuration.retention_days
5,341,314
def createLabelsAndWeightsFromRois(image, roiimage): """Create class labels and instance labels. Args: image: input image roiimage: input mask image Returns: classlabelsdata, instancelabelsdata, total_instances """ logger.info("Creating class and instance labels ...") W = image.shape[1] H = image.shape[0] logger.info("H, W = {},{}".format(H, W)) classlabelsdata = np.ones([H*W]) instancelabelsdata = np.zeros([H*W]) roi_val = np.unique(roiimage) total_instances = 1 roiimage = roiimage.reshape(-1) for j in range(roi_val.shape[0]): if roi_val[j]>0: indices = np.where(roiimage == roi_val[j]) for ind in indices: classlabelsdata[ind] = 2 instancelabelsdata[ind] = total_instances total_instances+=1 return classlabelsdata, instancelabelsdata, total_instances
5,341,315
def write_poscar(structure, filepath='POSCAR', newformat=True, direct=True, comment=None, heterostructure=False): """ Takes an structure from pychemia and save the file POSCAR for VASP. :param comment: Optional comment to the first line of the POSCAR :param structure: (pychemia.Structure) Structure to write POSCAR :param filepath: (str) Filename of POSCAR file to create :param newformat: (bool) If the new VASP format is used to create the POSCAR :param direct: (bool) If True, use reduced coordinates. If False, use cartesian coordinates (default: True) """ comp = structure.get_composition() # If heterostructure is true it will keep the repeating order found # in the POSCAR. # Added by Uthpala on Apr 20th, 2020. if heterostructure: species = [i[0] for i in groupby(structure.symbols)] else: species = get_species_list(structure) species_count = [len(list(group)) for key, group in groupby(structure.symbols)] ret = '' if comment is None: for i in species: ret += ' ' + i else: ret += comment.strip() ret += '\n' ret += '1.0\n' for i in range(3): ret += ' %20.16f %20.16f %20.16f\n' % tuple(structure.cell[i]) if newformat: for i in species: ret += ' ' + i ret += '\n' for icount, i in enumerate(species): if heterostructure: ret += ' ' + str(species_count[icount]) else: ret += ' ' + str(comp.composition[i]) ret += '\n' if direct: ret += 'Direct\n' for i in range(structure.natom): ret += ' %20.16f %20.16f %20.16f\n' % tuple(structure.reduced[i]) else: ret += 'Cartesian\n' for i in range(structure.natom): ret += ' %20.16f %20.16f %20.16f\n' % tuple(structure.positions[i]) wf = open(filepath, 'w') wf.write(ret) wf.close()
5,341,316
def dup_inner_refine_complex_root(f, x, y, dx, dy, F, K): """One bisection step of complex root refinement algorithm. """ hx, hy = dx/2, dy/2 cx, cy = x + hx, y + hy F1, F2, F3, F4 = F Fx = _dup_inner_sturm(f, K.one, K.zero, cx, cy, K) Fy = _dup_inner_sturm(f, K.zero, K.one, cx, cy, K) # Quadrant #1: ++ F11 = Fx F12 = _dup_sturm_shift(F2, hx, K) F13 = F3 F14 = _dup_sturm_mirror(_dup_sturm_shift(Fy, hy, K), K) k1 = _dup_inner_zeros(F11, F12, F13, F14, hx, hy, K) if k1 == 1: return (cx, cy, hx, hy, (F11, F12, F13, F14)) # Quadrant #2: -+ F21 = _dup_sturm_shift(Fx,-hx, K) F22 = Fy F23 = _dup_sturm_shift(F3, hx, K) F24 = F4 k2 = _dup_inner_zeros(F21, F22, F23, F24, hx, hy, K) if k2 == 1: return (x, cy, hx, hy, (F21, F22, F23, F24)) # Quadrant #3: -- F31 = F1 F32 = _dup_sturm_shift(Fy,-hy, K) F33 = _dup_sturm_mirror(Fx, K) F34 = _dup_sturm_shift(F4, hy, K) k3 = _dup_inner_zeros(F31, F32, F33, F34, hx, hy, K) if k3 == 1: return (x, y, hx, hy, (F31, F32, F33, F34)) # Quadrant #4: +- F41 = _dup_sturm_shift(F1, hx, K) F42 = F2 F43 = _dup_sturm_mirror(_dup_sturm_shift(Fx, hx, K), K) F44 = _dup_sturm_mirror(Fy, K) k4 = _dup_inner_zeros(F41, F42, F43, F44, hx, hy, K) if k4 == 1: return (cx, y, hx, hy, (F41, F42, F43, F44)) raise RefinementFailed("no roots in (%s, %s) x (%s, %s) rectangle" % (x, y, x+dx, y+dy))
5,341,317
def print_dataset_info(superclasses, subclass_splits, label_map, label_map_sub): """ Obtain a dataframe with information about the superclasses/subclasses included in the dataset. Args: superclasses (list): WordNet IDs of superclasses subclass_splits (tuple): Tuple entries correspond to the source and target domains respectively. A tuple entry is a list, where each element is a list of subclasses to be included in a given superclass in that domain. If split is None, the second tuple element is empty. label_map (dict): Map from (super)class number to superclass name label_map_sub (dict): Map from subclass number to subclass name (equivalent to label map for original dataset) Returns: dataDf (pandas DataFrame): Columns contain relevant information about the datast """ def print_names(class_idx): return [f'{label_map_sub[r].split(",")[0]} ({r})' for r in class_idx] data = {'superclass': []} contains_split = len(subclass_splits[1]) if contains_split: data.update({'subclasses (source)': [], 'subclasses (target)': []}) else: data.update({'subclasses': []}) for i, (k, v) in enumerate(label_map.items()): data['superclass'].append(f'{v}') if contains_split: data['subclasses (source)'].append(print_names(subclass_splits[0][i])) data['subclasses (target)'].append(print_names(subclass_splits[1][i])) else: data['subclasses'].append(print_names(subclass_splits[0][i])) dataDf = pd.DataFrame(data) return dataDf
5,341,318
def mock_sync_cavatica_account(mocker): """ Mocks out sync Cavatica account functions """ sync_cavatica_account = mocker.patch( "creator.projects.cavatica.sync_cavatica_account" ) sync_cavatica_account.return_value = [], [], [] return sync_cavatica_account
5,341,319
def test_name(): """Test get a random name""" name1 = helper.role.name() name2 = helper.role.name() assert isinstance(name1, str) assert isinstance(name2, str) assert len(name1) > 4 assert len(name2) > 4 assert name1 != name2
5,341,320
def check_illegal(s): """ :param s: (String) user input :return: (Bool) check user input is illegal or not """ check = 0 for ch in s: if len(ch) > 1: check = 1 if check == 0: return True else: print("Illegal input") return False
5,341,321
def test_directory_update(runcutty: RunCutty, template: Path, project: Path) -> None: """It uses the template directory specified when updating.""" directory = "a" move_repository_files_to_subdirectory(template, directory) runcutty("update", f"--cwd={project}", f"--template-directory={directory}") config = readprojectconfigfile(project) assert directory == str(config.directory)
5,341,322
def phasefold(time, rv, err, period): """Phasefold an rv timeseries with a given period. Parameters ---------- time : array_like An array containing the times of measurements. rv : array_like An array containing the radial-velocities. err : array_like An array containing the radial-velocity uncertainties. period : float The period with which to phase fold. Returns ------- time_phased : array_like The phased timestamps. rv_phased : array_like The phased RVs. err_phased : array_like The phased RV uncertainties. """ phases = (time / period) % 1 sortIndi = sp.argsort(phases) # sorts the points # gets the indices so we sort the RVs correspondingly(?) time_phased = phases[sortIndi] rv_phased = rv[sortIndi] err_phased = err[sortIndi] return time_phased, rv_phased, err_phased
5,341,323
def get_att_mats(translate_model): """ Get's the tensors representing the attentions from a build model. The attentions are stored in a dict on the Transformer object while building the graph. :param translate_model: Transformer object to fetch the attention weights from. :return: """ encdec_atts = [] prefix = 'transformer/body/' postfix = '/multihead_attention/dot_product_attention' for i in range(1, translate_model.hparams.num_hidden_layers): encdec_att = translate_model.attention_weights[ '%sdecoder/layer_%i/encdec_attention%s' % (prefix, i, postfix)] encdec_atts.append(encdec_att) encdec_att_mats = [tf.squeeze(tf.reduce_sum(mat, axis=1)) for mat in encdec_atts] return encdec_att_mats
5,341,324
def get_resource_string(package, resource): """Return a string containing the contents of the specified resource. If the pathname is absolute it is retrieved starting at the path of the importer for 'fullname'. Otherwise, it is retrieved relative to the module within the loader. """ provider, resource = NormalizeResource(package, resource) return provider.get_resource_string(_resource_manager, resource)
5,341,325
def get_initialize_cams(number_of_camera = 7,use_camera=True): """ initialize all camera Args: number_of_camera (int, optional): [description]. Defaults to 7. Returns: list : in list cap object """ cap_list=[] if use_camera: for _ in range(1,number_of_camera+1): cap= cv2.VideoCapture() cap_list.append(cap) else: cap1=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000004226000000.mp4") cap2=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000004888000100.mp4") cap3=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000004987000000.mp4") cap4=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000005012000000.mp4") cap5=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000005066000100.mp4") cap6=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000005142000000.mp4") cap7=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000005143000100.mp4") cap_list.append(cap1) cap_list.append(cap2) cap_list.append(cap3) cap_list.append(cap4) cap_list.append(cap5) cap_list.append(cap6) cap_list.append(cap7) return cap_list
5,341,326
def vpc_security_group_list(rds_instance): """ If VPC security group rule is open to public add to List and return. Args: rds_instance (dict): All the running rds instance on the region Returns: list: List of VPC Security Group Id's """ vpc_list = [] if rds_instance.get('VpcSecurityGroups'): for sec_group in rds_instance['VpcSecurityGroups']: if sec_group['Status'] == 'active': sec_group_rule = ec2.describe_security_group_rules(Filters=[ { 'Name': 'group-id', 'Values': [ sec_group['VpcSecurityGroupId'] ] }, ], MaxResults=512) if rds_sec_group_allowed(sec_group_rule, rds_instance['DbInstancePort']): vpc_list.append(sec_group['VpcSecurityGroupId']) return vpc_list
5,341,327
def parse_commands(docstring): # type: (str) -> Generator[Tuple[List[str], List[str]], None, None] """Parse a docopt-style string for commands and subcommands. Args: docstring: A docopt-style string to parse. If the string is not a valid docopt-style string, it will not yield and values. Yields: All tuples of commands and subcommands found in the docopt docstring. """ try: docopt.docopt(docstring, argv=()) except (TypeError, docopt.DocoptLanguageError): return except docopt.DocoptExit: pass for command in _parse_section("usage", docstring): args = command.split() commands = [] i = 0 for i, arg in enumerate(args): if arg[0].isalpha() and not arg[0].isupper(): commands.append(arg) else: break yield commands, args[i:]
5,341,328
def check_host(host): """Check Server IP.""" error_msg = "Check server IP and port! Wrong format of server name or no connection." if not host: print(error_msg) exit(1) try: socket.gethostbyname(host) except: print(error_msg) exit(1)
5,341,329
def tests(session): """Run tests.""" session.install('-r', 'requirements.txt') session.run('pytest')
5,341,330
def register_root_api(app: Flask): """Register the API with the flask app.""" ROOT_API.init_app(app) # register API blueprints (only do this after the API is registered with flask!) ROOT_API.register_blueprint(ROOT_ENDPOINT) ROOT_API.register_blueprint(API_V1)
5,341,331
def expectation_l(u_values, params_list): """ compute proba for each copula mix to describe the data :param u_values: :param params_list: :return: """ l_state = np.zeros((u_values.shape[0], len(COPULA_DENSITY))) dcopula = np.zeros((u_values.shape[0], len(COPULA_DENSITY))) for copula in COPULA_DENSITY.keys(): dcopula[:, params_list['order'][copula]] = ( params_list['alpha'][params_list['order'][copula]] * ( params_list[copula]['pi'] + (1 - params_list[copula]['pi']) * COPULA_DENSITY[copula]( u_values, params_list[copula]['theta'], ) ) ) for copula in COPULA_DENSITY.keys(): l_state[:, params_list['order'][copula]] = \ dcopula[:, params_list['order'][copula]] / np.sum(dcopula, axis=1) return l_state
5,341,332
def test_load_dataset_generator(): """Testing if the function load_dataset_generator() works.""" generator = dataset.load_dataset_generator(dataset_path) data = next(generator) assert data["steering"] == -0.6 and data["throttle"] == 0.7 data = next(generator) assert data["steering"] == 0.33 and data["throttle"] == 0.5
5,341,333
def lanczosSubPixShift( imageIn, subPixShift, kernelShape=3, lobes=None ): """ lanczosSubPixShift( imageIn, subPixShift, kernelShape=3, lobes=None ) imageIn = input 2D numpy array subPixShift = [y,x] shift, recommened not to exceed 1.0, should be float Random values of kernelShape and lobes gives poor performance. Generally the lobes has to increase with the kernelShape or you'll get a lowpass filter. Generally lobes = (kernelShape+1)/2 kernelShape=3 and lobes=2 is a lanczos2 kernel, it has almost no-lowpass character kernelShape=5 and lobes=3 is a lanczos3 kernel, it's the typical choice Anything with lobes=1 is a low-pass filter, but next to no ringing artifacts """ lanczos_filt = lanczosSubPixKernel( subPixShift, kernelShape=kernelShape, lobes=lobes ) # Accelerate this with a threadPool imageOut = scipy.ndimage.convolve( imageIn, lanczos_filt, mode='reflect' ) return imageOut
5,341,334
def test_retry_timeout_retries_timeouts(): """Test retrying timeout.""" calls = [] def api(*args, **kwargs): """Mock api""" calls.append((args, kwargs)) if len(calls) == 1: raise RequestTimeout() retry_api = retry_timeout(api, retries=2) retry_api(1, 2, hello='world') assert len(calls) == 2
5,341,335
def open_pdb(file_location): """ Opens PDB File. Parameters __________ file_location : str The Location for the PDB File. Returns _______ symbols : list Gives Atomic Symbols for Atoms from PDB File. coordinates: np.ndarray Gives Atomic Coordinates for the PDB File. """ # Reads PDB File and Returns Coordinates + Atom Names. with open(file_location) as pdb_file: pdb_data = pdb_file.readlines() pdb_file.close() # Generate Coordinates and Symbols Lists coordinates = [] symbols = [] # Cycle PDB_DATA for line in pdb_data: if 'ATOM' in line[0:6] or 'HETATM' in line[0:6]: symbols.append(line[76:79].strip()) atom_coordinates = [float(x) for x in line[30:55].split()] coordinates.append(atom_coordinates) coordinates = np.array(coordinates) # End of Script return symbols, coordinates
5,341,336
def _remove_statements(evaluator, stmt, name): """ This is the part where statements are being stripped. Due to lazy evaluation, statements like a = func; b = a; b() have to be evaluated. """ types = [] # Remove the statement docstr stuff for now, that has to be # implemented with the evaluator class. #if stmt.docstr: #res_new.append(stmt) check_instance = None if isinstance(stmt, er.InstanceElement) and stmt.is_class_var: check_instance = stmt.instance stmt = stmt.var types += evaluator.eval_statement(stmt, seek_name=name) if check_instance is not None: # class renames types = [er.get_instance_el(evaluator, check_instance, a, True) if isinstance(a, (er.Function, pr.Function)) else a for a in types] return types
5,341,337
def transform_vector_global_to_local_frame( vector: Tuple[float, float, float], theta: float ) -> Tuple[float, float, float]: """ Transform a vector from global frame to local frame. :param vector: the vector to be rotated :param theta: the amount to rotate by :return the transformed vector. """ return rotate_vector(vector, theta)
5,341,338
def given_source_ids_get_tic8_data(source_ids, queryname, n_max=10000, overwrite=True, enforce_all_sourceids_viable=True): """ Args: source_ids (np.ndarray) of np.int64 Gaia DR2 source_ids queryname (str): used for files overwrite: if True, and finds that this crossmatch has already run, deletes previous cached output and reruns anyway. enforce_all_sourceids_viable: if True, will raise an assertion error if every source id does not return a result. (Unless the query returns n_max entries, in which case only a warning will be raised). Returns: dataframe with Gaia DR2 crossmatch info. """ if type(source_ids) != np.ndarray: raise TypeError( 'source_ids must be np.ndarray of np.int64 Gaia DR2 source_ids' ) if type(source_ids[0]) != np.int64: raise TypeError( 'source_ids must be np.ndarray of np.int64 Gaia DR2 source_ids' ) xmltouploadpath = os.path.join( CACHEDIR, f'toupload_{queryname}_tic8.xml' ) dlpath = os.path.join( CACHEDIR, f'{queryname}_matches_tic8.xml.gz' ) if overwrite: if os.path.exists(xmltouploadpath): os.remove(xmltouploadpath) if not os.path.exists(xmltouploadpath): make_votable_given_source_ids(source_ids, outpath=xmltouploadpath) if os.path.exists(dlpath) and overwrite: os.remove(dlpath) if not os.path.exists(dlpath): tap = TapPlus(url="http://TAPVizieR.u-strasbg.fr/TAPVizieR/tap") jobstr = ( ''' SELECT top {n_max:d} * FROM TAP_UPLOAD.foobar as u, "IV/38/tic" as t WHERE u.source_id=t.GAIA ''' ).format( n_max=n_max ) query = jobstr # might do async if this times out. but it doesn't. j = tap.launch_job(query=query, upload_resource=xmltouploadpath, upload_table_name="foobar", verbose=True, dump_to_file=True, output_file=dlpath) df = given_votable_get_df(dlpath, assert_equal=None) import IPython; IPython.embed() if len(df) != len(source_ids) and enforce_all_sourceids_viable: if len(df) == n_max: wrnmsg = ( 'WRN! got {} matches vs {} source id queries'. format(len(df), len(source_ids)) ) print(wrnmsg) else: errmsg = ( 'ERROR! got {} matches vs {} source id queries'. format(len(df), len(source_ids)) ) print(errmsg) import IPython; IPython.embed() raise AssertionError(errmsg) if len(df) != len(source_ids) and not enforce_all_sourceids_viable: wrnmsg = ( 'WRN! got {} matches vs {} source id queries'. format(len(df), len(source_ids)) ) print(wrnmsg) return df
5,341,339
def kadane_algorithm(sequence: List[int]): """Greedy algorithm to track max sum so far - O(n) time and O(1) space""" if len(sequence) < 1: return 0 max_sum = sequence[0] curr_sum = sequence[0] for curr_index in range(1, len(sequence)): curr_sum = max(sequence[curr_index], curr_sum + sequence[curr_index]) max_sum = max(curr_sum, max_sum) return max_sum
5,341,340
def allow_domains(request: HttpRequest, domains: Iterable[str]) -> HttpResponse: """ Serves a cross-domain access policy allowing a list of domains. Note that if this is returned from the URL ``/crossdomain.xml`` on a domain, it will act as a master policy and will not permit other policies to exist on that domain. If you need to set meta-policy information and allow other policies, use the view :view:`flashpolicies.views.metapolicy` for the master policy instead. **Required arguments:** ``domains`` A list of domains from which to allow access. Each value may be either a domain name (e.g., ``example.com``) or a wildcard (e.g., ``*.example.com``). Due to serious potential security issues, it is strongly recommended that you not use wildcard domain values. **Optional arguments:** None. """ return serve(request, policies.Policy(*domains))
5,341,341
def get_Cs_OR(): """その他の居室の照明区画iに設置された照明設備の人感センサーによる補正係数 Args: Returns: float: Cs_OR その他の居室の照明区画iに設置された照明設備の人感センサーによる補正係数 """ return 1.0
5,341,342
def test_get_by_call_once_behavior() -> None: """It should consume any behavior marked with the `once` flag.""" subject = StubStore() rehearsal = WhenRehearsal(spy_id=42, spy_name="my_spy", args=(1, 2, 3), kwargs={}) behavior = StubBehavior(return_value="fizzbuzz", once=True) subject.add(rehearsal=rehearsal, behavior=behavior) result = subject.get_by_call( call=SpyCall(spy_id=42, spy_name="my_spy", args=(1, 2, 3), kwargs={}) ) assert result == behavior result = subject.get_by_call( call=SpyCall(spy_id=42, spy_name="my_spy", args=(1, 2, 3), kwargs={}) ) assert result == StubBehavior()
5,341,343
def build_url(station, d1, d2): """ Return the URL to fetch the response record for USArray MT station identifier *station* for the time range *d1* to *d2*. """ return 'http://service.iris.edu/irisws/resp/1/query?net=EM&sta={}&loc=--&cha=*&starttime={:%Y-%m-%dT%H:%M:%S}&endtime={:%Y-%m-%dT%H:%M:%S}'.format(station, d1, d2)
5,341,344
def FindGitSubmoduleCheckoutRoot(path, remote, url): """Get the root of your git submodule checkout, looking up from |path|. This function goes up the tree starting from |path| and looks for a .git/ dir configured with a |remote| pointing at |url|. Arguments: path: The path to start searching from. remote: The remote to compare the |url| with. url: The exact URL the |remote| needs to be pointed at. """ def test_config(path): if os.path.isdir(path): remote_url = cros_build_lib.RunCommand( ['git', '--git-dir', path, 'config', 'remote.%s.url' % remote], redirect_stdout=True, debug_level=logging.DEBUG).output.strip() if remote_url == url: return True return False root_dir = osutils.FindInPathParents('.git', path, test_func=test_config) if root_dir: return os.path.dirname(root_dir) return None
5,341,345
def get_tlinks(timeml_doc): """ get tlinks from annotated document """ root = xml_utilities.get_root(timeml_doc) tlinks = [] for e in root: if e.tag == "TLINK": tlinks.append(e) return tlinks
5,341,346
def stack(xs: Sequence[Any], axis: int = 0) -> Any: """ Stack the (leaf) arrays from xs :param xs: list of trees with the same shape, where the leaf values are numpy arrays :param axis: axis to stack along """ return multimap(lambda *xs: np.stack(xs, axis=axis), *xs)
5,341,347
def load_LAC_geocodes_info(path_to_csv): """Import local area unit district codes Read csv file and create dictionary with 'geo_code' PROVIDED IN UNIT?? (KWH I guess) Note ----- - no LAD without population must be included """ with open(path_to_csv, 'r') as csvfile: read_lines = csv.reader(csvfile, delimiter=',') # Read line _headings = next(read_lines) # Skip first row data = {} for row in read_lines: values_line = {} for nr, value in enumerate(row[1:], 1): try: values_line[_headings[nr]] = float(value) except: values_line[_headings[nr]] = str(value) # Add entry with geo_code data[row[0]] = values_line return data
5,341,348
def singleton(class_): """Decorator for singleton class.""" instances = {} def get_instance(*args, **kwargs): if class_ not in instances: instances[class_] = class_(*args, **kwargs) return instances[class_] return get_instance
5,341,349
def convert_model_to_half(model): """ Converts model to half but keeps the batch norm layers in 32 bit for precision purposes """ old_model = model new_model = BN_convert_float(model.half()) del old_model # Delete previous non-half model return new_model
5,341,350
def tokenize_sentences(sentences): """ Tokenize sentences into tokens (words) Args: sentences: List of strings Returns: List of lists of tokens """ # Initialize the list of lists of tokenized sentences tokenized_sentences = [] ### START CODE HERE (Replace instances of 'None' with your code) ### # Go through each sentence for sentence in sentences: # Convert to lowercase letters sentence = sentence.lower() # Convert into a list of words tokenized = nltk.word_tokenize(sentence) # append the list of words to the list of lists tokenized_sentences.append(tokenized) ### END CODE HERE ### return tokenized_sentences
5,341,351
def optimize_syscalls(declares): """Disables filesystem if only a limited subset of syscalls is used. Our syscalls are static, and so if we see a very limited set of them - in particular, no open() syscall and just simple writing - then we don't need full filesystem support. If FORCE_FILESYSTEM is set, we can't do this. We also don't do it if INCLUDE_FULL_LIBRARY, since not including the filesystem would mean not including the full JS libraries, and the same for MAIN_MODULE since a side module might need the filesystem. """ relevant_settings = ['FORCE_FILESYSTEM', 'INCLUDE_FULL_LIBRARY', 'MAIN_MODULE'] if any(settings[s] for s in relevant_settings): return if settings.FILESYSTEM == 0: # without filesystem support, it doesn't matter what syscalls need settings.SYSCALLS_REQUIRE_FILESYSTEM = 0 else: syscall_prefixes = ('__syscall_', 'fd_') syscalls = [d for d in declares if d.startswith(syscall_prefixes)] # check if the only filesystem syscalls are in: close, ioctl, llseek, write # (without open, etc.. nothing substantial can be done, so we can disable # extra filesystem support in that case) if set(syscalls).issubset(set([ '__syscall_ioctl', 'fd_seek', 'fd_write', 'fd_close', ])): if DEBUG: logger.debug('very limited syscalls (%s) so disabling full filesystem support', ', '.join(str(s) for s in syscalls)) settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
5,341,352
def verify_credential_info(): """ This url is called to verify and register the token """ challenge = session["challenge"] username = session["register_username"] display_name = session["register_display_name"] ukey = session["register_ukey"] user_exists = database.user_exists(username) if ( not user_exists or not current_user.is_authenticated or not username == current_user.id ): return make_response(jsonify({"fail": "User not logged in."}), 401) registration_response = request.form trust_anchor_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), TRUST_ANCHOR_DIR ) trusted_attestation_cert_required = TRUSTED_ATTESTATION_CERT_REQUIRED self_attestation_permitted = SELF_ATTESTATION_PERMITTED none_attestation_permitted = ATTESTATION_DATA == "none" webauthn_registration_response = webauthn.WebAuthnRegistrationResponse( RP_ID, ORIGIN, registration_response, challenge, trust_anchor_dir, trusted_attestation_cert_required, self_attestation_permitted, none_attestation_permitted, uv_required=False, ) # User Verification try: webauthn_credential = webauthn_registration_response.verify() except Exception as e: return jsonify({"fail": "Registration failed. Error: {}".format(e)}) credential_id_exists = database.credential_exists( webauthn_credential.credential_id, username ) if credential_id_exists: return make_response(jsonify({"fail": "Credential ID already exists."}), 401) existing_user = database.user_exists(username) if isinstance(existing_user, Response): return existing_user credential = Credential() if not existing_user or True: webauthn_credential.credential_id = str( webauthn_credential.credential_id, "utf-8" ) webauthn_credential.public_key = str(webauthn_credential.public_key, "utf-8") credential.ukey = ukey credential.username = username credential.display_name = display_name credential.pub_key = webauthn_credential.public_key credential.credential_id = webauthn_credential.credential_id credential.sign_count = webauthn_credential.sign_count credential.rp_id = RP_ID if not database.save_credential(credential): return make_response(jsonify({"fail": "Credential was not saved"}), 401) database.turn_on(credential.username) else: return make_response(jsonify({"fail": "User already exists."}), 401) satosa_request = Request() satosa_request.userId = credential.username database.make_success(satosa_request) user = database.get_user(credential.username) login_user(user) return jsonify({"success": "User successfully registered."})
5,341,353
def makeId(timestamp = 0, machine = 0, flow = 0): """ using unix style timestamp, not python timestamp """ timestamp -= _base return (timestamp<<13) | (machine << 8) | flow
5,341,354
def chi2Significant(tuple, unigrams, bigrams): """Returns true, if token1 and token2 are significantly coocurring, false otherwise. The used test is the Chi2-test. Parameters: tuple: tuple of tokens unigrams: unigrams dictionary data structure bigrams: bigrams dictionary data structure """ yes_yes = bigrams.get(tuple, 0) yes_not = unigrams.get(tuple[0], 0) - yes_yes not_yes = unigrams.get(tuple[1], 0) - bigrams.get(tuple, 0) not_not = sum(bigrams.values()) - 1 - yes_not - not_yes + yes_yes chi2score = chi2Score((yes_yes, yes_not, not_yes, not_not), expectationFromObservationDF1((yes_yes, yes_not, not_yes, not_not))) if chi2score and chi2score > df1chi2sigscore: return True return False
5,341,355
def excel2schema(schema_excel_filename, schema_urlprefix, options, schema_dir=None): """ given an excel filename, convert it into memory object, and output JSON representation based on options. params: schema_excel_filename -- string, excel filename schema_urlprefix -- string, urlprefix for downloading schema's jsonld ie. the jsonld version of schema can be obtained from URL <code><schema_urlprefix><schema_release_identifier>.jsonld</code>, e.g.  http://localhost:8080/getschema/cns_top_v2.0.jsonld { schema_urlprefix = http://localhost:8080/getschema/ schema_release_identifier = cns_top_v2.0 schema_name = cns_top schema_vesrion = v2.0 } options -- string, comma seperated strings, each define expected output component, see <code>mem4export</code> return json dict see mem4export """ schema_excel_json = excel2json2018(schema_excel_filename) return table2schema(schema_excel_json, schema_urlprefix, options, schema_dir)
5,341,356
def assign_id_priority(handle): """ Assign priority according to agent id (lower id means higher priority). :param agent: :return: """ return handle
5,341,357
def setup_logging( filename, print_level="INFO", file_level="DEBUG", multiprocessing_aware=True, ): """ Sets up (possibly multiprocessing aware) logging. :param filename: Where to save the logs to :param print_level: What level of logging to print to console. Default: 'INFO' :param file_level: What level of logging to print to file. Default: 'DEBUG' :param multiprocessing_aware: Default: True """ logger = logging.getLogger() logger.setLevel(getattr(logging, file_level)) formatter = logging.Formatter( "%(asctime)s - %(levelname)s" " - %(processName)s %(filename)s:%(lineno)s" " - %(message)s" ) formatter.datefmt = "%Y-%m-%d %H:%M:%S %p" if filename is not None: fh = logging.FileHandler(filename) fh.setLevel(getattr(logging, file_level)) fh.setFormatter(formatter) logger.addHandler(fh) ch = RichHandler() ch.setLevel(getattr(logging, print_level)) ch.setFormatter(formatter) logger.addHandler(ch) if multiprocessing_aware: try: import multiprocessing_logging multiprocessing_logging.install_mp_handler() logging.info("Starting logging") logging.info( "Multiprocessing-logging module found. Logging from all" " processes" ) except ModuleNotFoundError: logging.info("Starting logging") logging.info( "Multiprocessing-logging module not found, not logging " "multiple processes." ) else: logging.info("Starting logging") logging.info("Not logging multiple processes")
5,341,358
def learning_rate_with_decay( batch_size, batch_denom, num_images, boundary_epochs, decay_rates): """Get a learning rate that decays step-wise as training progresses. Args: batch_size: the number of examples processed in each training batch. batch_denom: this value will be used to scale the base learning rate. `0.1 * batch size` is divided by this number, such that when batch_denom == batch_size, the initial learning rate will be 0.1. num_images: total number of images that will be used for training. boundary_epochs: list of ints representing the epochs at which we decay the learning rate. decay_rates: list of floats representing the decay rates to be used for scaling the learning rate. It should have one more element than `boundary_epochs`, and all elements should have the same type. Returns: Returns a function that takes a single argument - the number of batches trained so far (global_step)- and returns the learning rate to be used for training the next batch. """ initial_learning_rate = 0.1 * batch_size / batch_denom batches_per_epoch = num_images / batch_size # Multiply the learning rate by 0.1 at 100, 150, and 200 epochs. boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs] vals = [initial_learning_rate * decay for decay in decay_rates] def learning_rate_fn(global_step): global_step = tf.cast(global_step, tf.int32) return tf.train.piecewise_constant(global_step, boundaries, vals) return learning_rate_fn
5,341,359
def test_custom_class( content_view: Tuple[str, ContentView], serialization_format: Tuple[str, SerializationFormat], ): """Ensure an error is provided when something can't be serialized. A typing error does not exist here because the content is Dict[str, Any]. :param content_view: The content view :param serialization_format: The serialization format """ class CustomClass: """An empty custom class.""" content = {"foo": CustomClass()} serialized = serialize( content=content, content_view=content_view[1], serialization_format=serialization_format[1], ) assert ( f"The requested content could not be converted to {serialization_format[0]!s}." in serialized )
5,341,360
def run_parent(): """ Running in the parent process. """ print("启动行情记录守护父进程") # Chinese futures market trading period (day/night) DAY_START = time(8, 45) DAY_END = time(15, 30) NIGHT_START = time(20, 45) NIGHT_END = time(2, 45) child_process = None while True: current_time = datetime.now().time() trading = False # Check whether in trading period if ( (current_time >= DAY_START and current_time <= DAY_END) or (current_time >= NIGHT_START) or (current_time <= NIGHT_END) ): trading = True # Start child process in trading period if trading and child_process is None: print("启动子进程") child_process = multiprocessing.Process(target=run_child) child_process.start() print("子进程启动成功") # 非记录时间则退出子进程 if not trading and child_process is not None: print("关闭子进程") child_process.terminate() child_process.join() child_process = None print("子进程关闭成功") # 数据清洗 sleep(5)
5,341,361
def change_lock_status(title, new_lock_status, default=False, forced=False): """ This is called when a user starts or stops editing a page """ title = title.replace(" ", "_") if default: return # If this function is called from # is_locked due to the page being expired... if forced: update_lock_query(title, new_lock_status) return # This is mainly because there were pages already created that weren't in # the database. uid = auth_utils.get_user_id(flask.session['username']) query = """SELECT last_edit_uid FROM webpage_files WHERE title = %s""" with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, title) res = cursor.fetchone() # If the page isn't locked before OR if the last user who edited this # Is the same person if not is_locked(title) or res['last_edit_uid'] == uid: update_lock_query(title, new_lock_status)
5,341,362
def stepedit_SignType(*args): """ * Returns a SignType fit for STEP (creates the first time) :rtype: Handle_IFSelect_Signature """ return _STEPEdit.stepedit_SignType(*args)
5,341,363
def create_heatmap(out, data, row_labels, col_labels, title, colormap, vmax, ax=None, cbar_kw={}, cbarlabel="", **kwargs): """ Create a heatmap from a numpy array and two lists of labels. Arguments: data : A 2D numpy array of shape (N,M) row_labels : A list or array of length N with the labels for the rows col_labels : A list or array of length M with the labels for the columns Optional arguments: ax : A matplotlib.axes.Axes instance to which the heatmap is plotted. If not provided, use current axes or create a new one. cbar_kw : A dictionary with arguments to :meth:`matplotlib.Figure.colorbar`. cbarlabel : The label for the colorbar All other arguments are directly passed on to the imshow call. """ if not ax: ax = plt.gca() # Plot the heatmap im = ax.imshow(data, cmap=colormap, vmin=0, vmax=vmax, **kwargs) # Create colorbar cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw) cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom") plt.gcf().subplots_adjust(bottom=0.25) # We want to show all ticks... ax.set_xticks(np.arange(data.shape[1])) ax.set_yticks(np.arange(data.shape[0])) # ... and label them with the respective list entries. ax.set_xticklabels(col_labels) ax.set_yticklabels(row_labels) ax.tick_params(axis='both', which='major', labelsize=6) ax.tick_params(axis='both', which='minor', labelsize=6) ax.tick_params(top=False, bottom=True, labeltop=False, labelbottom=True) # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=90, ha="right") plt.title(title) # Turn spines off and create white grid. #for edge, spine in ax.spines.items(): # spine.set_visible(False) ax.set_xticks(np.arange(data.shape[1]+1)-.6, minor=True) ax.set_yticks(np.arange(data.shape[0]+1)-.6, minor=True) ax.grid(which="minor", color="k", linestyle='-', linewidth=0.5) ax.tick_params(which="minor", bottom=False, left=False) f = plt.savefig(out) plt.clf() return im, cbar
5,341,364
def flip(c1, c2): """last 8 secs in total""" for i in range(0, 10): # 10*0.2 = 4secs setColor(c1) time.sleep(0.2) setColor(c2) time.sleep(0.2) for i in range(0, 20): # 20*0.1 = 4secs setColor(c1) time.sleep(0.1) setColor(c2) time.sleep(0.1) setColor(c2)
5,341,365
def ccw(a: complex, b: complex, c: complex) -> int: """The sign of counter-clockwise angle of points abc. Args: a (complex): First point. b (complex): Second point. c (complex): Third point. Returns: int: If the three points are not colinear, then returns the sign of counter-clockwise angle of abc. That is, if the points abc make counter-clockwise turn, it returns +1. If clockwise turn, returns -1. If they are colinear, returns one of +2, -2, or 0. This depends on the order of points on the line. """ b -= a c -= a if cross(b, c) > 0: # counter-clockwise return +1 elif cross(b, c) < 0: # clockwise return -1 elif dot(b, c) < 0: # c--a--b on line return +2 elif abs(b) < abs(c): # a--b--c on line return -2 else: # b--c--a on line return 0
5,341,366
def uninitializePlugin(obj): """ """ pluginFn = om.MFnPlugin(obj) try: omr.MDrawRegistry.deregisterDrawOverrideCreator(DRAW_DB_CLASSIFICATION, DRAW_REGISTRANT_ID) except: om.MGlobal.displayError("Failed to deregister draw override: {0}".format(PLUGIN_NAME)) try: pluginFn.deregisterNode(PLUGIN_TYPE_ID) except: om.MGlobal.displayError("Failed to unregister node: {0}".format(PLUGIN_NAME))
5,341,367
def _tree_selector(X, leaf_size=40, metric='minkowski'): """ Selects the better tree approach for given data Parameters ---------- X : {array-like, pandas dataframe} of shape (n_samples, n_features) The input data. leaf_size : int, default=40 Number of points to switch to brute-force search of neighbors metric : str or DistanceMetric object, default='minkowski' The distance metric to use for the neighborhood tree. Refer to the DistanceMetric class documentation from sklearn for a list of available metrics Returns ------- tree : {KDTree or BallTree} The best tree to be used to find neighbors given data """ # Low dimensional spaces are fit to KD-Tree if X.shape[1] < 30: return KDTree(X, leaf_size=leaf_size, metric=metric) # High dimensional spaces are fit to Ball Tree if X.shape[1] >= 30: return BallTree(X, leaf_size=leaf_size, metric=metric)
5,341,368
def test_rxcui_output(): """ Test that get_rxcui() outputs a string with the correct format. """ output = get_rxcui(drug_id='lipitor', id_type='name') assert(isinstance(output, str)) assert(output=="153165")
5,341,369
def get_diff_objects(diff_object_mappings, orig_datamodel_object_list): """获取diff_objects :param diff_object_mappings: 变更对象内容mapping :param orig_datamodel_object_list: 操作前/上一次发布内容列表 :return: diff_objects: diff_objects列表 """ # 1)从diff_object_mappings中获取diff_objects diff_objects = [] field_diff_objects = [] model_relation_diff_objects = [] master_table_diff_object = None for key, value in list(diff_object_mappings.items()): if value['object_type'] == DataModelObjectType.FIELD.value: if value not in field_diff_objects: field_diff_objects.append(value) elif value['object_type'] == DataModelObjectType.MODEL_RELATION.value: if value not in model_relation_diff_objects: model_relation_diff_objects.append(value) elif value['object_type'] == DataModelObjectType.MASTER_TABLE.value: master_table_diff_object = value elif value not in diff_objects: diff_objects.append(value) # 2)将field_diff_objects放入主表的diff_objects中 # 如果字段有diff if field_diff_objects or model_relation_diff_objects: # 如果字段有diff,master_table_diff_object还是None if master_table_diff_object is None: for datamodel_object_dict in orig_datamodel_object_list: if datamodel_object_dict['object_type'] == DataModelObjectType.MASTER_TABLE.value: master_table_diff_object = { 'diff_type': DataModelObjectOperationType.UPDATE.value, 'object_id': datamodel_object_dict['object_id'], 'object_type': datamodel_object_dict['object_type'], } break # 将field_diff_objects的内容放在主表对应的object中 master_table_diff_object['diff_objects'] = field_diff_objects + model_relation_diff_objects diff_objects.append(master_table_diff_object) # 字段没有diff,但为主表整体非修改 elif ( master_table_diff_object is not None and master_table_diff_object['diff_type'] != DataModelObjectOperationType.UPDATE.value ): diff_objects.append(master_table_diff_object) return diff_objects
5,341,370
def find_lcs(s1, s2): """find the longest common subsequence between s1 ans s2""" m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)] max_len = 0 p = 0 for i in range(len(s1)): for j in range(len(s2)): if s1[i] == s2[j]: m[i + 1][j + 1] = m[i][j] + 1 if m[i + 1][j + 1] > max_len: max_len = m[i + 1][j + 1] p = i + 1 return s1[p - max_len:p], max_len
5,341,371
def calculate_vertical_vorticity_cost(u, v, w, dx, dy, dz, Ut, Vt, coeff=1e-5): """ Calculates the cost function due to deviance from vertical vorticity equation. For more information of the vertical vorticity cost function, see Potvin et al. (2012) and Shapiro et al. (2009). Parameters ---------- u: 3D array Float array with u component of wind field v: 3D array Float array with v component of wind field w: 3D array Float array with w component of wind field dx: float array Spacing in x grid dy: float array Spacing in y grid dz: float array Spacing in z grid coeff: float Weighting coefficient Ut: float U component of storm motion Vt: float V component of storm motion Returns ------- Jv: float Value of vertical vorticity cost function. References ---------- Potvin, C.K., A. Shapiro, and M. Xue, 2012: Impact of a Vertical Vorticity Constraint in Variational Dual-Doppler Wind Analysis: Tests with Real and Simulated Supercell Data. J. Atmos. Oceanic Technol., 29, 32–49, https://doi.org/10.1175/JTECH-D-11-00019.1 Shapiro, A., C.K. Potvin, and J. Gao, 2009: Use of a Vertical Vorticity Equation in Variational Dual-Doppler Wind Analysis. J. Atmos. Oceanic Technol., 26, 2089–2106, https://doi.org/10.1175/2009JTECHA1256.1 """ dvdz = np.gradient(v, dz, axis=0) dudz = np.gradient(u, dz, axis=0) dwdz = np.gradient(w, dx, axis=2) dvdx = np.gradient(v, dx, axis=2) dwdy = np.gradient(w, dy, axis=1) dwdx = np.gradient(w, dx, axis=2) dudx = np.gradient(u, dx, axis=2) dvdy = np.gradient(v, dy, axis=2) dudy = np.gradient(u, dy, axis=1) zeta = dvdx - dudy dzeta_dx = np.gradient(zeta, dx, axis=2) dzeta_dy = np.gradient(zeta, dy, axis=1) dzeta_dz = np.gradient(zeta, dz, axis=0) jv_array = ((u - Ut) * dzeta_dx + (v - Vt) * dzeta_dy + w * dzeta_dz + (dvdz * dwdx - dudz * dwdy) + zeta * (dudx + dvdy)) return np.sum(coeff*jv_array**2)
5,341,372
def fetch_hero_stats() -> list: """Retrieves hero win/loss statistics from OpenDotaAPI.""" r = httpx.get("https://api.opendota.com/api/heroStats") heroes = r.json() # Rename pro_<stat> to 8_<stat>, so it's easier to work with our enum for hero in heroes: for stat in ["win", "pick", "ban"]: hero[f"{Bracket.PRO.value}_{stat}"] = hero.pop(f"pro_{stat}") return heroes
5,341,373
def test_source_locations_are_within_correct_range(tokamak_source): """Tests that each source has RZ locations within the expected range. As the function converting (a,alpha) coordinates to (R,Z) is not bijective, we cannot convert back to validate each individual point. However, we can determine whether the generated points are contained within the shell of the last closed magnetic surface. See "Tokamak D-T neutron source models for different plasma physics confinement modes", C. Fausser et al., Fusion Engineering and Design, 2012 for more info. """ R_0 = tokamak_source.major_radius A = tokamak_source.minor_radius El = tokamak_source.elongation delta = tokamak_source.triangularity def get_R_on_LCMS(alpha): """Gets R on the last closed magnetic surface for a given alpha""" return R_0 + A * np.cos(alpha + delta * np.sin(alpha)) approx_lt = lambda x, y: x < y or np.isclose(x, y) approx_gt = lambda x, y: x > y or np.isclose(x, y) for source in tokamak_source.sources: R, Z = source.space.r.x[0], source.space.z.x[0] # First test that the point is contained with a simple box with # lower left (r_min,-z_max) and upper right (r_max,z_max) assert approx_gt(R, R_0 - A) assert approx_lt(R, R_0 + A) assert approx_lt(abs(Z), A * El) # For a given Z, we can determine the two values of alpha where # where a = minor_radius, and from there determine the upper and # lower bounds for R. alpha_1 = np.arcsin(abs(Z) / (El * A)) alpha_2 = np.pi - alpha_1 R_max, R_min = get_R_on_LCMS(alpha_1), get_R_on_LCMS(alpha_2) assert approx_lt(R_max, R_0 + A) assert approx_gt(R_min, R_0 - A) assert approx_lt(R, R_max) assert approx_gt(R, R_min)
5,341,374
def print_blue(txt, with_bgc=None, no_end=False): """ 普通蓝色 :param txt: :param with_bgc: 需要添加的背景色,默认不显示 :return: """ if plat == "Linux": print(color(txt, "default", with_bgc or "ordinary", "blue"), end='' if no_end else '\n') elif plat == "Windows": print(Fore.BLUE + win_back_color_map.get(with_bgc) or Back.RESET + txt, end='' if no_end else '\n') else: print(txt)
5,341,375
def GetTDryBulbFromEnthalpyAndHumRatio(MoistAirEnthalpy: float, HumRatio: float) -> float: """ Return dry bulb temperature from enthalpy and humidity ratio. Args: MoistAirEnthalpy : Moist air enthalpy in Btu lb⁻¹ [IP] or J kg⁻¹ HumRatio : Humidity ratio in lb_H₂O lb_Air⁻¹ [IP] or kg_H₂O kg_Air⁻¹ [SI] Returns: Dry-bulb temperature in °F [IP] or °C [SI] Reference: ASHRAE Handbook - Fundamentals (2017) ch. 1 eqn 30 Notes: Based on the `GetMoistAirEnthalpy` function, rearranged for temperature. """ if HumRatio < 0: raise ValueError("Humidity ratio is negative") BoundedHumRatio = max(HumRatio, MIN_HUM_RATIO) if isIP(): TDryBulb = (MoistAirEnthalpy - 1061.0 * BoundedHumRatio) / (0.240 + 0.444 * BoundedHumRatio) else: TDryBulb = (MoistAirEnthalpy / 1000.0 - 2501.0 * BoundedHumRatio) / (1.006 + 1.86 * BoundedHumRatio) return TDryBulb
5,341,376
def test_vector_interpolation_projection(): """Test projection interpolation.""" vec = plonk.visualize.interpolation.vector_interpolation( x_data=X_DATA, y_data=Y_DATA, x_position=XX, y_position=YY, z_position=ZZ, extent=EXTENT, smoothing_length=HH, particle_mass=MM, number_of_pixels=PIX, ) np.testing.assert_allclose(vec, vector_projection, rtol=1e-5)
5,341,377
def calibration_runs(instr, exper, runnum=None): """ Return the information about calibrations associated with the specified run (or all runs of the experiment if no specific run number is provided). The result will be packaged into a dictionary of the following type: <runnum> : { 'calibrations' : [<calibtype1>, <calibtype2>, ... ] , 'comment' : <text> } Where: <runnum> : the run number <calibtype*> : the name of the calibration ('dark', 'flat', 'geometry', etc.) <text> : an optional comment for the run PARAMETERS: @param instr: the name of the instrument @param exper: the name of the experiment @param run: the run number (optional) """ run_numbers = [] if runnum is None: run_numbers = [run['num'] for run in experiment_runs(instr, exper)] else: run_numbers = [runnum] result = {} for runnum in run_numbers: run_info = {'calibrations': [], 'comment':''} for attr in run_attributes(instr, exper, runnum, 'Calibrations'): if attr['name'] == 'comment': run_info['comment'] = attr['val'] elif attr['val'] : run_info['calibrations'].append(attr['name']) result[runnum] = run_info return result
5,341,378
def fetch_eia(api_key, plant_id, file_path): """ Read in EIA data of wind farm of interest - from EIA API for monthly productions, return monthly net energy generation time series - from local Excel files for wind farm metadata, return dictionary of metadata Args: api_key(:obj:`string`): 32-character user-specific API key, obtained from EIA plant_id(:obj:`string`): 5-character EIA power plant code file_path(:obj:`string`): directory with EIA metadata .xlsx files in 2017 Returns: :obj:`pandas.Series`: monthly net energy generation in MWh :obj:`dictionary`: metadata of the wind farm with 'plant_id' """ # EIA metadata plant_var_list = [ "City", "Latitude", "Longitude", "Balancing Authority Name", "Transmission or Distribution System Owner", ] wind_var_list = [ "Utility Name", "Plant Name", "State", "County", "Nameplate Capacity (MW)", "Operating Month", "Operating Year", "Number of Turbines", "Predominant Turbine Manufacturer", "Predominant Turbine Model Number", "Turbine Hub Height (Feet)", ] def meta_dic_fn(metafile, sheet, var_list): all_plant = pd.read_excel(file_path + metafile, sheet_name=sheet, skiprows=1) eia_plant = all_plant.loc[all_plant["Plant Code"] == np.int(plant_id)] # specific wind farm if eia_plant.shape[0] == 0: # Couldn't locate EIA ID in database raise Exception("Plant ID not found in EIA database") eia_info = eia_plant[var_list] # select column eia_info = eia_info.reset_index(drop=True) # reset index to 0 eia_dic = eia_info.T.to_dict() # convert to dictionary out_dic = eia_dic[0] # remove extra level of dictionary, "0" in this case return out_dic # file path with 2017 EIA metadata files plant_dic = meta_dic_fn("2___Plant_Y2017.xlsx", "Plant", plant_var_list) wind_dic = meta_dic_fn("3_2_Wind_Y2017.xlsx", "Operable", wind_var_list) # convert feet to meter hubheight_meter = np.round( unit_conversion.convert_feet_to_meter(wind_dic["Turbine Hub Height (Feet)"]) ) wind_dic.update({"Turbine Hub Height (m)": hubheight_meter}) wind_dic.pop("Turbine Hub Height (Feet)", None) # delete hub height in feet out_dic = plant_dic.copy() out_dic.update(wind_dic) # append dictionary # EIA monthly energy production data api = eia.API(api_key) # get data from EIA series_search_m = api.data_by_series(series="ELEC.PLANT.GEN.%s-ALL-ALL.M" % plant_id) eia_monthly = pd.DataFrame(series_search_m) # net monthly energy generation of wind farm in MWh eia_monthly.columns = ["eia_monthly_mwh"] # rename column eia_monthly = eia_monthly.set_index( pd.DatetimeIndex(eia_monthly.index) ) # convert to DatetimeIndex return eia_monthly, out_dic
5,341,379
def print_version(): """ Prints the version of the package and the license. """ version_text = """ This is mk-deps, version {version} Copyright (C) 2017 Livio Brunner This program comes with ABSOLUTELY NO WARRANTY. You are free to redistribute this code under the terms of the GNU General Public License, version 2, or (at your option) any later version. """.format(version=__VERSION__) print(version_text)
5,341,380
def gen_fathers(): """ :return: """ return
5,341,381
def calc_dist_mat(e: Extractor, indices: list) -> np.array: """ Calculates distance matrix among threads with indices specified Arguments: e : Extractor extractor object indices : list of ints list of indices corresponding to which threads are present for the distance matrix calculation """ # initialize distance matrix dmat = np.zeros((len(indices), len(indices))) # calculate dmat, non-diagonals only for i in range(len(indices)): for j in range(i+1, len(indices)): pos1 = e.spool.threads[indices[i]].positions pos2 = e.spool.threads[indices[j]].positions dmat[i,j] = np.linalg.norm(pos1 - pos2, axis = 1).mean() dmat = dmat + dmat.T return dmat
5,341,382
def spitzer_conductivity2(nele, tele, znuc, zbar): """ Compute the Spitzer conductivity Parameters: ----------- - nele [g/cm³] - tele [eV] - znuc: nuclear charge - zbar: mean ionization Returns: -------- - Spitzer conductivity [cm².s⁻¹] """ lnLam = coulomb_logarithm(nele, znuc, tele) return 2e21*tele**(5./2)/(lnLam*nele*(zbar+1))
5,341,383
def assert_tree_all_finite(tree_like: ArrayTree): """Assert all tensor leaves in a tree are finite. Args: tree_like: pytree with array leaves Raises: AssertionError: if any leaf in the tree is non-finite. """ all_finite = jax.tree_util.tree_all( jax.tree_map(lambda x: jnp.all(jnp.isfinite(x)), tree_like)) if not all_finite: is_finite = lambda x: "Finite" if jnp.all(jnp.isfinite(x)) else "Nonfinite" error_msg = jax.tree_map(is_finite, tree_like) raise AssertionError(f"Tree contains non-finite value: {error_msg}.")
5,341,384
def parse_benchmark_results(benchmark_output, min_elements=None, max_elements=None): """ :type benchmark_output list[str] :type min_elements int|None :type max_elements int|None :rtype BenchmarkResults :return The parsed benchmark results file. The data member dict looks like this: { benchmark_function_str: { data_size_int: { container_type_str: { num_elements_int: cpu_time_nanoseconds } } } } While the sizes_in_bytes and cardinalities members are sorted lists. """ def data_type_to_size(data_type): if data_type == "int": return 4 elif data_type == "size_16": return 16 elif data_type == "size_64": return 64 raise Exception("Unknown type " + data_type) # Regex for individual iterations of the benchmark # Group 1: benchmark function name, e.g., BM_vector_sequential_read # Group 2: container type, e.g., FixedArray<size_16> # Group 3: data type, e.g., int or size_16 # Group 4: number of elements, between 4 and 16384 # Group 5: clock time in ns # Group 6: CPU time in ns # Group 7: iteration count benchmark_re = re.compile(r"^(\w+)<([\w<>:, ]+), (\w+)>\/(\d+)\s+(\d+) ns\s+(\d+) ns\s+(\d+)$") data = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(int)))) data_sizes = set() cardinalities = set() for line in benchmark_output: match = benchmark_re.match(line) if match: benchmark_fn = match.group(1) container_type = match.group(2) if container_type.startswith('std_'): container_type = container_type.replace('std_', 'std::') data_size = data_type_to_size(match.group(3)) num_elements = int(match.group(4)) cpu_time = int(match.group(6)) meets_min_requirements = not min_elements or num_elements >= min_elements meets_max_requirements = not max_elements or num_elements <= max_elements if meets_min_requirements and meets_max_requirements: data[benchmark_fn][data_size][container_type][num_elements] = cpu_time data_sizes.add(data_size) cardinalities.add(num_elements) return BenchmarkResults(data=data, sizes_in_bytes=sorted(data_sizes), cardinalities=sorted(cardinalities))
5,341,385
def test_find_by_id_as(session): """Assert that find an amemdment registration by ID contains all expected elements.""" registration = Registration.find_by_id(200000008) assert registration assert registration.registration_id == 200000008 assert registration.registration_num assert registration.registration_type_cd == 'CO' assert registration.financing_id json_data = registration.json assert json_data['changeType'] == 'CO' assert json_data['courtOrderInformation'] assert json_data['addDebtors'] assert len(json_data['addDebtors']) == 1 assert json_data['addSecuredParties'] assert len(json_data['addSecuredParties']) == 1 assert json_data['addGeneralCollateral'] assert len(json_data['addGeneralCollateral']) == 1 assert json_data['addVehicleCollateral'] assert len(json_data['addVehicleCollateral']) == 1 assert json_data['deleteDebtors'] assert len(json_data['deleteDebtors']) == 1 assert json_data['deleteSecuredParties'] assert len(json_data['deleteSecuredParties']) == 1 assert json_data['deleteGeneralCollateral'] assert len(json_data['deleteGeneralCollateral']) == 1 assert json_data['deleteVehicleCollateral'] assert len(json_data['deleteVehicleCollateral']) == 1 assert 'documentId' not in json_data
5,341,386
def test_load_config_rules(neo4j_session, *args): """ Ensure that expected delivery channels get loaded with their key fields. """ data = tests.data.aws.config.LIST_CONFIG_RULES cartography.intel.aws.config.load_config_rules( neo4j_session, data, TEST_REGION, TEST_ACCOUNT_ID, TEST_UPDATE_TAG, ) expected_nodes = { ( "arn:aws:config:us-east-1:000000000000:config-rule/aws-service-rule/securityhub.amazonaws.com/config-rule-magmce", # noqa:E501 "arn:aws:config:us-east-1:000000000000:config-rule/aws-service-rule/securityhub.amazonaws.com/config-rule-magmce", # noqa:E501 "securityhub-alb-http-drop-invalid-header-enabled-9d3e1985", "Test description", "AWS", "ALB_HTTP_DROP_INVALID_HEADER_ENABLED", tuple(["{'EventSource': 'aws.config', 'MessageType': 'ConfigurationItemChangeNotification'}"]), "securityhub.amazonaws.com", "us-east-1", ), } nodes = neo4j_session.run( """ MATCH (n:AWSConfigRule) RETURN n.id, n.arn, n.name, n.description, n.source_owner, n.source_identifier, n.source_details, n.created_by, n.region """, ) actual_nodes = { ( n['n.id'], n['n.arn'], n['n.name'], n['n.description'], n['n.source_owner'], n['n.source_identifier'], tuple(n['n.source_details']), n['n.created_by'], n['n.region'], ) for n in nodes } assert actual_nodes == expected_nodes
5,341,387
def is_doctest_running() -> bool: """ >>> if not is_setup_test_running(): assert is_doctest_running() == True """ # this is used in our tests when we test cli-commands if os.getenv("PYTEST_IS_RUNNING"): return True for argv in sys.argv: if is_doctest_in_arg_string(argv): return True return False
5,341,388
def read_float_with_comma(num): """Helper method to parse a float string representation that has a comma as decimal separator. Can't use locale as the page being parsed could not be in the same locale as the python running environment Args: num (str): the float string to parse Returns: float: the parsed float """ return float(num.replace(",", "."))
5,341,389
def rds_print_instance(dbinstanceid, full=False): """ Print RDS instance info. :param dbinstanceid: The id/name of the RDS instance. :param full: Print all attributes, or just the most useful ones? Defaults to ``False``. """ dbinstancewrapper = RdsInstanceWrapper.get_dbinstancewrapper(dbinstanceid) print_rds_instance(dbinstancewrapper.dbinstance, full=bool(full), indentspaces=0)
5,341,390
def remote_repr(arg): """Return the `repr()` rendering of the supplied `arg`.""" return arg
5,341,391
def log(pathOrURL, limit=None, verbose=False, searchPattern=None, revision=None, userpass=None): """ :param pathOrURL: working copy path or remote url :param limit: when the revision is a range, limit the record count :param verbose: :param searchPattern: - search in the limited records(by param limit) - matches any of the author, date, log message text, if verbose is True also a changed path - The search pattern use "glob syntax" wildcards ? matches any single character * matches a sequence of arbitrary characters [abc] matches any of the characters listed inside the brackets example: revision=(5, 10) limit=2 output: 5, 6 revision=(10, 5) limit=2 output: 10, 9 :param commonOptions.revision: single revision number or revision range tuple/list - if range specified, format as (5, 10) or (10, 50) are both supported - for (5, 10): return list ordered by 5 -> 10 - for (10, 5): return list ordered by 10 -> 5 - the bound revision 5 or 10 also included """ cmd = 'log' cmd += ' ' + pathOrURL cmd += ' --xml' if limit is not None: cmd += ' -l %s' % limit if verbose: cmd += ' -v' if searchPattern is not None: cmd += ' --search %s' % searchPattern cmd += ' ' + makeRevisionOptionStr(revision) cmd += ' ' + makeUserPassOptionStr(userpass) result = execOutputSubCommand(cmd) root = ET.fromstring(result) ret = [] for logentryNode in root.iterfind('logentry'): logentry = {} ret.append(logentry) logentry['#revision'] = logentryNode.attrib['revision'] logentry['author'] = logentryNode.find('author').text logentry['date'] = logentryNode.find('date').text logentry['msg'] = logentryNode.find('msg').text pathsNode = logentryNode.find('paths') if pathsNode is not None: paths = [] logentry['paths'] = paths for path_node in pathsNode.iterfind('path'): path = {} paths.append(path) path['#'] = path_node.text path['#prop-mods'] = True if path_node.attrib['prop-mods']=='true' else False path['#text-mods'] = True if path_node.attrib['text-mods']=='true' else False path['#kind'] = path_node.attrib['kind'] path['#action'] = path_node.attrib['action'] return ret
5,341,392
def value_cards(cards: [Card], trump: Suite, lead_suite: Suite) -> (Card, int): """Returns a tuple (card, point value) which ranks each card in a hand, point value does not matter""" card_values = [] for card in cards: if vm.is_trump(card, trump): card_values.append((card, vm.trump_value(card, trump) + 20)) elif card.suite == lead_suite: card_values.append((card, card.face_card.value + 10)) else: card_values.append((card, card.face_card.value)) return card_values
5,341,393
def com_google_fonts_check_ftxvalidator_is_available(ftxvalidator_cmd): """Is the command `ftxvalidator` (Apple Font Tool Suite) available?""" if ftxvalidator_cmd: yield PASS, f"ftxvalidator is available at {ftxvalidator_cmd}" else: yield WARN, \ Message("ftxvalidator-available", "Could not find ftxvalidator.")
5,341,394
def submit_scraper_job(job_type, tag, params): """ Submits job :param job_type: :param tag: :param params: :return: """ rule = { "rule_name": "bos_sarcat_scraper", "queue": CRAWLER_QUEUE, "priority": '8', "kwargs": '{}' } print('submitting jobs with param-s:') job_spec = '%s:%s' % (job_type, tag) job_name = '%s-%s' % (job_type, tag) hysds_ios = { "id": "internal-temporary-wiring", "params": params, "job-specification": job_spec } print(json.dumps(params, sort_keys=True, indent=4, separators=(',', ': '))) mozart_job_id = submit_mozart_job({}, rule, hysdsio=hysds_ios, job_name=job_name, enable_dedup=False) LOGGER.info("Job ID: " + mozart_job_id) print("Job ID: " + mozart_job_id) return
5,341,395
def mount_raw_image(path): """Mount raw image using OS specific methods, returns pathlib.Path.""" loopback_path = None if PLATFORM == 'Darwin': loopback_path = mount_raw_image_macos(path) elif PLATFORM == 'Linux': loopback_path = mount_raw_image_linux(path) # Check if not loopback_path: std.print_error(f'Failed to mount image: {path}') # Register unmount atexit atexit.register(unmount_loopback_device, loopback_path) # Done return loopback_path
5,341,396
def timer(start, end, description=''): """https://stackoverflow.com/questions/27779677/how-to-format-elapsed-time-from-seconds-to-hours-minutes-seconds-and-milliseco """ hours, rem = divmod(end-start, 3600) minutes, seconds = divmod(rem, 60) print("{} {:0>2}:{:0>2}:{:05.2f}".format(description, int(hours), int(minutes), seconds))
5,341,397
def calculate_weights_indices(in_length, out_length, scale, kernel_width, antialiasing): """ Get weights and indices """ if (scale < 1) and (antialiasing): # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width kernel_width = kernel_width / scale # Output-space coordinates x = np.linspace(1, out_length, out_length) # Input-space coordinates. Calculate the inverse mapping such that 0.5 # in output space maps to 0.5 in input space, and 0.5+scale in output # space maps to 1.5 in input space. u = x / scale + 0.5 * (1 - 1 / scale) # What is the left-most pixel that can be involved in the computation? left = np.floor(u - kernel_width / 2) # What is the maximum number of pixels that can be involved in the # computation? Note: it's OK to use an extra pixel here; if the # corresponding weights are all zero, it will be eliminated at the end # of this function. P = math.ceil(kernel_width) + 2 # The indices of the input pixels involved in computing the k-th output # pixel are in row k of the indices matrix. indices = np.repeat(left.reshape(out_length, 1), P).reshape(out_length, P) + \ np.broadcast_to(np.linspace( 0, P - 1, P).reshape(1, P), (out_length, P)) # The weights used to compute the k-th output pixel are in row k of the # weights matrix. distance_to_center = np.repeat( u.reshape(out_length, 1), P).reshape(out_length, P) - indices # apply cubic kernel if (scale < 1) and (antialiasing): weights = scale * cubic(distance_to_center * scale) else: weights = cubic(distance_to_center) # Normalize the weights matrix so that each row sums to 1. weights_sum = np.sum(weights, 1).reshape(out_length, 1) weights = weights / np.repeat(weights_sum, P).reshape(out_length, P) # If a column in weights is all zero, get rid of it. only consider the first and last column. weights_zero_tmp = np.sum((weights == 0), 0) if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6): #indices = indices.narrow(1, 1, P - 2) indices = indices[:, 1:P-1] #weights = weights.narrow(1, 1, P - 2) weights = weights[:, 1:P-1] if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6): #indices = indices.narrow(1, 0, P - 2) indices = indices[:, 0:P-1] #weights = weights.narrow(1, 0, P - 2) weights = weights[:, 0:P-1] weights = np.ascontiguousarray(weights) indices = np.ascontiguousarray(indices) sym_len_s = -indices.min() + 1 sym_len_e = indices.max() - in_length indices = indices + sym_len_s - 1 return weights, indices, int(sym_len_s), int(sym_len_e)
5,341,398
def inject_config_into_engines(engine_prefix, config_path): """ Copy the current config into all engines that are managed by `sgr engine`. This is so that the engine has the right credentials and settings for when we do layered querying (a Postgres client queries the engine directly and it has to download objects etc). :param engine_prefix: Prefix for Docker containers that are considered to be engines :param config_path: Path to the config file. """ engine_containers = list_engines(engine_prefix, include_all=True) if engine_containers: logging.info("Copying the config file at %s into all current engines", config_path) for container in engine_containers: copy_to_container(container, config_path, "/.sgconfig") logging.info("Config updated for container %s", container.name)
5,341,399