content
stringlengths
22
815k
id
int64
0
4.91M
def corpus_loader(folder: str) -> List[str]: """ A corpus loader function which takes in a path to a folder and returns a list of strings. """
5,336,700
def socket_port(ip, port): """ 输入IP和端口号,扫描判断端口是否占用 """ try: if port: print u'端口扫描结束' s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) result=s.connect_ex((ip, port)) if result == 0: lock.acquire() print ip,u':', port,u'端口已被占用' lock.release() except: print u'端口扫描异常'
5,336,701
def parse_debug_node_name(node_name): """Parse the name of a debug node. Args: node_name: Name of the debug node. Returns: 1. Name of the watched node, as a str. 2. Output slot index of the watched tensor, as an int. 3. Index of the debug node, as an int. 4. Name of the debug op, as a str, e.g, "DebugIdentity". Raises: ValueError: If the input node name is not a valid debug node name. """ prefix = "__dbg_" name = node_name if not name.startswith(prefix): raise ValueError("Invalid prefix in debug node name: '%s'" % node_name) name = name[len(prefix):] if name.count("_") < 2: raise ValueError("Invalid debug node name: '%s'" % node_name) debug_op = name[name.rindex("_") + 1:] name = name[:name.rindex("_")] debug_op_index = int(name[name.rindex("_") + 1:]) name = name[:name.rindex("_")] if name.count(":") != 1: raise ValueError("Invalid tensor name in debug node name: '%s'" % node_name) watched_node_name = name[:name.index(":")] watched_output_slot = int(name[name.index(":") + 1:]) return watched_node_name, watched_output_slot, debug_op_index, debug_op
5,336,702
def run_basic(): """Check that the windows all open ok (i.e. is GUI functioning?).""" _initialize() s = 'Simulation' p = 'Plots' menu_paths = [ (s,'Test Pattern'), (s,'Model Editor'), (p,'Activity'), (p,'Connection Fields'), (p,'Projection'), (p,'Projection Activity'), (p,'Preference Maps','Orientation Preference'), (p,'Tuning Curves','Orientation Tuning') ] return ft.run([_menu_item_fn(*x) for x in menu_paths],"Running basic GUI tests...")
5,336,703
def node_to_evenly_discretized(node): """ Parses the evenly discretized mfd node to an instance of the :class: openquake.hazardlib.mfd.evenly_discretized.EvenlyDiscretizedMFD, or to None if not all parameters are available """ if not all([node.attrib["minMag"], node.attrib["binWidth"], node.nodes[0].text]): return None # Text to float rates = [float(x) for x in node.nodes[0].text.split()] return mfd.evenly_discretized.EvenlyDiscretizedMFD( float(node.attrib["minMag"]), float(node.attrib["binWidth"]), rates)
5,336,704
def delete_comment(request, collection_id, comment_id): """Delete comment if the staff or comment owner want to delete.""" collection = get_object_or_404(Collection, id=collection_id) comment = get_object_or_404(Comment, id=comment_id, collection=collection) if not request.user.is_authenticated: messages.error(request, "Stop there! How dare you delete a comment without logging in?") return redirect('collection', collection_id=collection.id) if not request.user.is_staff and not request.user.is_superuser and request.user != comment.user: messages.error(request, "Wait! This is not yours! You can't delete this comment!") return redirect('collection', collection_id=collection.id) # After this point, everything is valid now. # It is safe to delete the comment comment.delete() messages.success(request, f"Delete comment successfully!") return redirect('collection', collection_id=collection.id)
5,336,705
def final_messages(systems_created_counter, systems_updated_counter, systems_skipped_counter, systems_multiple_counter, systems_multiple_list, request): """ final messages if function was called from 'system_instant' and 'system_upload' """ # call final messages if systems_created_counter > 0: if systems_created_counter == 1: messages.success(request, f'{systems_created_counter} system was created.') else: messages.success(request, f'{systems_created_counter} systems were created.') if systems_updated_counter > 0: if systems_updated_counter == 1: messages.success(request, f'{systems_updated_counter} system was updated.') else: messages.success(request, f'{systems_updated_counter} systems were updated.') if systems_skipped_counter > 0: if systems_skipped_counter == 1: messages.success(request, f'{systems_skipped_counter} system was skipped.') else: messages.success(request, f'{systems_skipped_counter} systems were skipped.') if systems_multiple_counter > 0: if systems_multiple_counter == 1: messages.warning(request, f'{systems_multiple_counter} system was skipped because it existed several times. {systems_multiple_list}') else: messages.warning(request, f'{systems_multiple_counter} systems were skipped because they existed several times. {systems_multiple_list}') # return to 'csv_main.system_handler' return
5,336,706
def insert_row(key, translation, idx_language): """Create a database AgentXlate.agent row. Args: key: AgentXlate key translation: AgentXlate translation idx_language: Language table index Returns: None """ # Insert and get the new agent value with db.db_modify(20130, die=True) as session: session.add( AgentXlate( agent_program=str(key).strip().encode(), translation=str(translation).strip().encode(), idx_language=idx_language ) )
5,336,707
def layers_weights_as_vector(model, initial=True): """ Creates a list holding the weights of each layer (Conv and Dense) in the CNN as a vector. model: A reference to the instance from the cnn.Model class. initial: When True, the function returns the initial weights of the CNN. When False, the trained weights of the CNN layers are returned. The initial weights are only needed before network training starts. The trained weights are needed to predict the network outputs. Returns a list (network_weights) holding the weights of the CNN layers as a vector. """ network_weights = [] layer = model.last_layer while "previous_layer" in layer.__init__.__code__.co_varnames: if type(layer) in [Conv2D, Dense]: # If the 'initial' parameter is True, append the initial weights. Otherwise, append the trained weights. if initial == True: vector = numpy.reshape(layer.initial_weights, newshape=(layer.initial_weights.size)) # vector = pygad.nn.DenseLayer.to_vector(matrix=layer.initial_weights) network_weights.extend(vector) elif initial == False: vector = numpy.reshape(layer.trained_weights, newshape=(layer.trained_weights.size)) # vector = pygad.nn.DenseLayer.to_vector(array=layer.trained_weights) network_weights.extend(vector) else: raise ValueError("Unexpected value to the 'initial' parameter: {initial}.".format(initial=initial)) # Go to the previous layer. layer = layer.previous_layer # If the first layer in the network is not an input layer (i.e. an instance of the Input2D class), raise an error. if not (type(layer) is Input2D): raise TypeError("The first layer in the network architecture must be an input layer.") # Currently, the weights of the layers are in the reverse order. In other words, the weights of the first layer are at the last index of the 'network_weights' list while the weights of the last layer are at the first index. # Reversing the 'network_weights' list to order the layers' weights according to their location in the network architecture (i.e. the weights of the first layer appears at index 0 of the list). network_weights.reverse() return numpy.array(network_weights)
5,336,708
def calculate_moist_adiabatic_lapse_rate(t, p): """calculate moist adiabatic lapse rate from pressure, temperature p: pressure in hPa t: temperature in Kelvin returns: moist adiabatic lapse rate in Kelvin/m """ es = 611.2*np.exp(17.67*(t-273.15)/(t-29.65)) # Bolton formula, es in Pa qs = 0.622*es/(p*100-0.378*es) num = 1 + lv*qs/(Rdry*t) denom = 1 + lv**2*qs/(cp*Rvap*t**2) gamma = g/cp*(1-num/denom) return gamma
5,336,709
def main(): """ Primary entrypoint to dynamic inventory script """ parser = create_parser() args = parser.parse_args() getSplunkInventory(inventory) if args.write_to_file: with open(os.path.join("/opt/container_artifact", "ansible_inventory.json"), "w") as outfile: json.dump(obfuscate_vars(inventory), outfile, sort_keys=True, indent=4, ensure_ascii=False) elif args.write_to_stdout: #remove keys we don't want to print inventory_to_dump = prep_for_yaml_out(inventory) print("---") print(yaml.dump(inventory_to_dump, default_flow_style=False)) else: print(json.dumps(inventory))
5,336,710
def test_setext_headings_extra_59(): """ Test case extra 59: SetExt heading with inline image with newline between image chars, invalidating it. """ # Arrange source_markdown = """a! [Foo](/uri "testing")a ---""" expected_tokens = [ "[setext(3,1):-:3::(1,1)]", "[text(1,1):a!\n::\n]", '[link(2,1):inline:/uri:testing::::Foo:False:":: :]', "[text(2,2):Foo:]", "[end-link::]", "[text(2,22):a:]", "[end-setext::]", ] expected_gfm = """<h2>a!\n<a href="/uri" title="testing">Foo</a>a</h2>""" # Act & Assert act_and_assert(source_markdown, expected_gfm, expected_tokens)
5,336,711
def read_cat_file(genomeCatFile): """ Read in genome categories and create dictionary of category name and genomes in that category""" inFile = open(genomeCatFile, 'r') catDict = {} for line in inFile: line = line.strip() entries = line.split() genome = entries[0] cat = entries[1] if cat in catDict: catDict[cat].add(genome) else: catDict[cat] = {genome} inFile.close() return catDict
5,336,712
def logprint(log): """wrapper for printing data inside userSetup.py :param log: The string to pring :return: """ print('userSetup.py: %s' % log)
5,336,713
def receive_message(): """ Receive message from server :return: None """ while True: try: msg = client.recv(1024).decode('utf-8') print(msg) except: print("[EXCEPTION WHILE RECEVING]") client.close() break
5,336,714
def _check_axes(axes): """Check if "axes" is an instance of an axis object. If not, use `gca`.""" if axes is None: import matplotlib.pyplot as plt axes = plt.gca() elif not isinstance(axes, Axes): raise ValueError( "`axes` must be an instance of matplotlib.axes.Axes. " "Found type(axes)={}".format(type(axes)) ) return axes
5,336,715
def StronglyEntanglingCircuitBlock(weights, periodic=True, r=1, imprimitive=CNOT, wires=None): """pennylane.template.StronglyEntanglingCircuitBlock(weights, periodic=True, r=1, imprimitive=qml.CNOT, wires) An individual block of a strongly entangling circuit. Args: weights (array[float]): shape ``(len(wires), 3)`` array of weights periodic (bool): whether to use periodic boundary conditions when applying imprimitive gates r (Sequence[int]): range of the imprimitive gates of this block imprimitive (pennylane.ops.Operation): Imprimitive gate to use, defaults to :class:`~.CNOT` Keyword Args: wires (Sequence[int]): Wires the block should act on """ for i, wire in enumerate(wires): Rot(weights[i, 0], weights[i, 1], weights[i, 2], wires=wire) num_wires = len(wires) for i in range(num_wires) if periodic else range(num_wires-1): imprimitive(wires=[wires[i], wires[(i+r) % num_wires]])
5,336,716
def gauss_method_mpc(filename, bodyname, obs_arr=None, r2_root_ind_vec=None, refiters=0, plot=True): """Gauss method high-level function for minor planets (asteroids, comets, etc.) orbit determination from MPC-formatted ra/dec tracking data. Roots of 8-th order Gauss polynomial are computed using np.roots function. Note that if `r2_root_ind_vec` is not specified by the user, then the first positive root returned by np.roots is used by default. Args: filename (string): path to MPC-formatted observation data file bodyname (string): user-defined name of minor planet obs_arr (int vector): line numbers in data file to be processed refiters (int): number of refinement iterations to be performed r2_root_ind_vec (1xlen(obs_arr) int array): indices of Gauss polynomial roots. plot (bool): if True, plots data. Returns: x (tuple): set of Keplerian orbital elements {(a, e, taup, omega, I, omega, T),t_vec[-1]} """ # load MPC data for a given NEA mpc_object_data = load_mpc_data(filename) #load MPC data of listed observatories (longitude, parallax constants C, S) mpc_observatories_data = load_mpc_observatories_data('../station_observatory_data/mpc_observatories.txt') #definition of the astronomical unit in km # au = cts.au.to(uts.Unit('km')).value # Sun's G*m value # mu_Sun = 0.295912208285591100E-03 # au^3/day^2 mu = mu_Sun # cts.GM_sun.to(uts.Unit("au3 / day2")).value # handle default behavior for obs_arr # load JPL DE432s ephemeris SPK kernel # 'de432s.bsp' is automatically loaded by astropy, via jplephem # 'de432s.bsp' is about 10MB in size and will be automatically downloaded if not present yet in astropy's cache # for more information, see astropy.coordinates.solar_system_ephemeris documentation print("") questions = [ inquirer.List('Ephemerides', message="Select ephemerides[de432s(default,small in size,faster)','de430(more precise)]:", choices=['de432s','de430'], ), ] answers = inquirer.prompt(questions) global x_ephem x_ephem=answers["Ephemerides"] solar_system_ephemeris.set(answers["Ephemerides"]) if obs_arr is None: obs_arr = list(range(1, len(mpc_object_data)+1)) #the total number of observations used nobs = len(obs_arr) # if r2_root_ind_vec was not specified, then use always the first positive root by default if r2_root_ind_vec is None: r2_root_ind_vec = np.zeros((nobs-2,), dtype=int) #auxiliary arrays x_vec = np.zeros((nobs,)) y_vec = np.zeros((nobs,)) z_vec = np.zeros((nobs,)) a_vec = np.zeros((nobs-2,)) e_vec = np.zeros((nobs-2,)) taup_vec = np.zeros((nobs-2,)) I_vec = np.zeros((nobs-2,)) W_vec = np.zeros((nobs-2,)) w_vec = np.zeros((nobs-2,)) n_vec = np.zeros((nobs-2,)) x_Ea_vec = np.zeros((nobs,)) y_Ea_vec = np.zeros((nobs,)) z_Ea_vec = np.zeros((nobs,)) t_vec = np.zeros((nobs,)) # Speed of light constant c= 299792.458 print("Consider light propogation time?[y/n]") check=input() if(check!='y' and check!='n'): print("Invalid input.Exiting...\n") sys.exit() for j in range (0,nobs-2): # Apply Gauss method to three elements of data inds = [obs_arr[j]-1, obs_arr[j+1]-1, obs_arr[j+2]-1] print('Processing observation #', j) r1, r2, r3, v2, R, rho1, rho2, rho3, rho_1_sr, rho_2_sr, rho_3_sr, Ea_hc_pos, obs_t = \ gauss_iterator_mpc(mpc_object_data, mpc_observatories_data, inds, refiters=refiters, r2_root_ind=r2_root_ind_vec[j]) # Consider light propagation time if(check=='y'): #print(obs_t[0]) #print(obs_t[1]) obs_t[0]= obs_t[0]-(rho_1_sr/c) obs_t[1]= obs_t[1]-(rho_2_sr/c) obs_t[2]= obs_t[2]-(rho_3_sr/c) #print(rho_1_sr) if j==0: t_vec[0] = obs_t[0] x_vec[0], y_vec[0], z_vec[0] = np.matmul(rot_equat_to_eclip, r1) x_Ea_vec[0], y_Ea_vec[0], z_Ea_vec[0] = np.matmul(rot_equat_to_eclip, earth_ephemeris(obs_t[0])/au) if j==nobs-3: t_vec[nobs-1] = obs_t[2] x_vec[nobs-1], y_vec[nobs-1], z_vec[nobs-1] = np.matmul(rot_equat_to_eclip, r3) x_Ea_vec[nobs-1], y_Ea_vec[nobs-1], z_Ea_vec[nobs-1] = np.matmul(rot_equat_to_eclip, earth_ephemeris(obs_t[2])/au) r2_eclip = np.matmul(rot_equat_to_eclip, r2) v2_eclip = np.matmul(rot_equat_to_eclip, v2) a_num = semimajoraxis(r2_eclip[0], r2_eclip[1], r2_eclip[2], v2_eclip[0], v2_eclip[1], v2_eclip[2], mu) e_num = eccentricity(r2_eclip[0], r2_eclip[1], r2_eclip[2], v2_eclip[0], v2_eclip[1], v2_eclip[2], mu) f_num = trueanomaly5(r2_eclip[0], r2_eclip[1], r2_eclip[2], v2_eclip[0], v2_eclip[1], v2_eclip[2], mu) n_num = meanmotion(mu, a_num) a_vec[j] = a_num e_vec[j] = e_num taup_vec[j] = taupericenter(obs_t[1], e_num, f_num, n_num) w_vec[j] = np.rad2deg( argperi(r2_eclip[0], r2_eclip[1], r2_eclip[2], v2_eclip[0], v2_eclip[1], v2_eclip[2], mu) ) I_vec[j] = np.rad2deg( inclination(r2_eclip[0], r2_eclip[1], r2_eclip[2], v2_eclip[0], v2_eclip[1], v2_eclip[2]) ) W_vec[j] = np.rad2deg( longascnode(r2_eclip[0], r2_eclip[1], r2_eclip[2], v2_eclip[0], v2_eclip[1], v2_eclip[2]) ) n_vec[j] = n_num t_vec[j+1] = obs_t[1] x_vec[j+1] = r2_eclip[0] y_vec[j+1] = r2_eclip[1] z_vec[j+1] = r2_eclip[2] Ea_hc_pos_eclip = np.matmul(rot_equat_to_eclip, Ea_hc_pos[1]) x_Ea_vec[j+1] = Ea_hc_pos_eclip[0] y_Ea_vec[j+1] = Ea_hc_pos_eclip[1] z_Ea_vec[j+1] = Ea_hc_pos_eclip[2] a_mean = np.mean(a_vec) #au e_mean = np.mean(e_vec) #dimensionless taup_mean = np.mean(taup_vec) #deg w_mean = np.mean(w_vec) #deg I_mean = np.mean(I_vec) #deg W_mean = np.mean(W_vec) #deg n_mean = np.mean(n_vec) #sec print('\n*** ORBIT DETERMINATION: GAUSS METHOD ***') print('Observational arc:') print('Number of observations: ', len(obs_arr)) print('First observation (UTC) : ', Time(t_vec[0], format='jd').iso) print('Last observation (UTC) : ', Time(t_vec[-1], format='jd').iso) print('\nAVERAGE ORBITAL ELEMENTS (ECLIPTIC, MEAN J2000.0): a, e, taup, omega, I, Omega, T') print('Semi-major axis (a): ', a_mean, 'au') print('Eccentricity (e): ', e_mean) # print('Time of pericenter passage (tau): ', Time(taup_mean, format='jd').iso, 'JDTDB') print('Pericenter distance (q): ', a_mean*(1.0-e_mean), 'au') print('Apocenter distance (Q): ', a_mean*(1.0+e_mean), 'au') print('Argument of pericenter (omega): ', w_mean, 'deg') print('Inclination (I): ', I_mean, 'deg') print('Longitude of Ascending Node (Omega): ', W_mean, 'deg') print('Orbital period (T): ', 2.0*np.pi/n_mean, 'days') # PLOT if plot: npoints = 500 # number of points in orbit theta_vec = np.linspace(0.0, 2.0*np.pi, npoints) t_Ea_vec = np.linspace(t_vec[0], t_vec[-1], npoints) x_orb_vec = np.zeros((npoints,)) y_orb_vec = np.zeros((npoints,)) z_orb_vec = np.zeros((npoints,)) x_Ea_orb_vec = np.zeros((npoints,)) y_Ea_orb_vec = np.zeros((npoints,)) z_Ea_orb_vec = np.zeros((npoints,)) for i in range(0,npoints): x_orb_vec[i], y_orb_vec[i], z_orb_vec[i] = xyz_frame2(a_mean, e_mean, theta_vec[i], np.deg2rad(w_mean), np.deg2rad(I_mean), np.deg2rad(W_mean)) xyz_Ea_orb_vec_equat = earth_ephemeris(t_Ea_vec[i])/au xyz_Ea_orb_vec_eclip = np.matmul(rot_equat_to_eclip, xyz_Ea_orb_vec_equat) x_Ea_orb_vec[i], y_Ea_orb_vec[i], z_Ea_orb_vec[i] = xyz_Ea_orb_vec_eclip ax = plt.axes(aspect='auto', projection='3d') # Sun-centered orbits: Computed orbit and Earth's ax.scatter3D(0.0, 0.0, 0.0, color='yellow', label='Sun') ax.scatter3D(x_Ea_vec, y_Ea_vec, z_Ea_vec, color='blue', marker='.', label='Earth orbit') ax.plot3D(x_Ea_orb_vec, y_Ea_orb_vec, z_Ea_orb_vec, color='blue', linewidth=0.5) ax.scatter3D(x_vec, y_vec, z_vec, color='red', marker='+', label=bodyname+' orbit') ax.plot3D(x_orb_vec, y_orb_vec, z_orb_vec, 'red', linewidth=0.5) plt.legend() ax.set_xlabel('x (au)') ax.set_ylabel('y (au)') ax.set_zlabel('z (au)') xy_plot_abs_max = np.max((np.amax(np.abs(ax.get_xlim())), np.amax(np.abs(ax.get_ylim())))) ax.set_xlim(-xy_plot_abs_max, xy_plot_abs_max) ax.set_ylim(-xy_plot_abs_max, xy_plot_abs_max) ax.set_zlim(-xy_plot_abs_max, xy_plot_abs_max) ax.legend(loc='center left', bbox_to_anchor=(1.04,0.5)) #, ncol=3) ax.set_title('Angles-only orbit determ. (Gauss): '+bodyname) plt.show() return a_mean, e_mean, taup_mean, w_mean, I_mean, W_mean, 2.0*np.pi/n_mean,t_vec[-1]
5,336,717
def removeCable(n, edges): """ @param n 道路 @param edges 连通情况 """ fa = initFa(n) totalW, nodes = 0, [] for x, y, w in edges: node = Node(x, y, w) nodes.append(node) totalW += w def getW(node): return node.w nodes.sort(key=getW) tmpW = 0 for node in nodes: if find(fa, node.x) == find(fa, node.y): continue fa[find(fa, node.x)] = find(fa, node.y) tmpW += node.w return totalW - tmpW
5,336,718
def attach_vnc(caller_id, vm_id): """ Attaches VNC redirection to VM. @cmview_user @param_post{vm_id,int} id of the VM to have attached VM redirection """ vm = VM.get(caller_id, vm_id) vm.attach_vnc() try: vm.save() except: raise CMException('vnc_attach')
5,336,719
def test_date_time_max_inclusive003_1133_date_time_max_inclusive003_1133_v(mode, save_output, output_format): """ TEST :Facet Schemas for string : facet=maxInclusive and value=1999-05-12T10:31:00 and document value=1985-04-12T10:30:00 """ assert_bindings( schema="msData/datatypes/Facets/dateTime/dateTime_maxInclusive003.xsd", instance="msData/datatypes/Facets/dateTime/dateTime_maxInclusive003.xml", class_name="Test", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,336,720
def upload(): """POST route through which downloading sequence is triggered :param checked: which pins were selected by user :returns: log of arrays with pins, files downloaded counts, and notes """ DASHRlut = findSNs(compCrawl()) checked = request.get_json() from read_file import read_selected, narrow chosen = narrow(checked, DASHRlut) log = read_selected(chosen) return jsonify(log), 200
5,336,721
def verify_credentials(): """Verify credentials to gdrive for the current user""" if 'credentials' not in flask.session: return flask.redirect(flask.url_for('authorize_app', _external=True)) credentials = client.OAuth2Credentials.from_json( flask.session['credentials']) if credentials.access_token_expired: return flask.redirect(flask.url_for('authorize_app', _external=True)) return None
5,336,722
def _gaussian_blur(heatmaps, kernel=11): """Modulate heatmap distribution with Gaussian. sigma = 0.3*((kernel_size-1)*0.5-1)+0.8 sigma~=3 if k=17 sigma=2 if k=11; sigma~=1.5 if k=7; sigma~=1 if k=3; Note: batch_size: N num_keypoints: K heatmap height: H heatmap width: W Args: heatmaps (np.ndarray[N, K, H, W]): model predicted heatmaps. kernel (int): Gaussian kernel size (K) for modulation, which should match the heatmap gaussian sigma when training. K=17 for sigma=3 and k=11 for sigma=2. Returns: np.ndarray[N, K, H, W]: Modulated heatmap distribution. """ assert kernel % 2 == 1 border = (kernel - 1) // 2 batch_size = heatmaps.shape[0] num_joints = heatmaps.shape[1] height = heatmaps.shape[2] width = heatmaps.shape[3] for i in range(batch_size): for j in range(num_joints): origin_max = np.max(heatmaps[i, j]) dr = np.zeros((height + 2 * border, width + 2 * border), dtype=np.float32) dr[border:-border, border:-border] = heatmaps[i, j].copy() dr = cv2.GaussianBlur(dr, (kernel, kernel), 0) heatmaps[i, j] = dr[border:-border, border:-border].copy() heatmaps[i, j] *= origin_max / np.max(heatmaps[i, j]) return heatmaps
5,336,723
def pose_vec2mat(vec): """Converts 6DoF parameters to transformation matrix Args: vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6] Returns: A transformation matrix -- [B, 4, 4] """ # batch_size, _ = vec.get_shape().as_list() batch_size = tf.shape(vec)[0] translation = tf.slice(vec, [0, 0], [-1, 3]) translation = tf.expand_dims(translation, -1) rx = tf.slice(vec, [0, 3], [-1, 1]) ry = tf.slice(vec, [0, 4], [-1, 1]) rz = tf.slice(vec, [0, 5], [-1, 1]) rot_mat = euler2mat(rz, ry, rx) rot_mat = tf.squeeze(rot_mat, axis=[1]) filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4]) filler = tf.tile(filler, [batch_size, 1, 1]) transform_mat = tf.concat([rot_mat, translation], axis=2) transform_mat = tf.concat([transform_mat, filler], axis=1) return transform_mat
5,336,724
def get_gradients_through_compute_gradients(optimizer, loss, activations): """Compute gradients to send to TPU embedding. Args: optimizer: a subclass of optimizer.Optimizer, usually CrossShardOptimizer. Used to call compute_gradients(). loss: a Tensor to call optimizer.compute_gradients() on. activations: an OrderedDict mapping feature_name to Tensors of activations. Returns: An OrderedDict mapping from feature name Strings to Tensors of gradients of the loss wrt the activations of the features. """ activation_list = activations.values() grads_and_vars = optimizer.compute_gradients(loss, activation_list) grads = [grad for grad, _ in grads_and_vars] feature_to_gradient_dict = collections.OrderedDict( zip(activations.keys(), grads)) return feature_to_gradient_dict
5,336,725
def distance_to_mesh(mesh, pts, engine="auto", bvh=None): """ Compute the distance from a set of points to a mesh. Args: mesh (:class:`Mesh`): A input mesh. pts (:class:`numpy.ndarray`): A :math:`N \\times dim` array of query points. engine (``string``): BVH engine name. Valid choices are "cgal", "geogram", "igl" if all dependencies are used. The default is "auto" where an available engine is automatically picked. bvh (:class:`BVH`): BVH engine instance (optional) Returns: Three values are returned. * ``squared_distances``: squared distances from each point to mesh. * ``face_indices`` : the closest face to each point. * ``closest_points``: the point on mesh that is closest to each query point. """ if not bvh: bvh = BVH(engine, mesh.dim) bvh.load_mesh(mesh) squared_distances, face_indices, closest_points = bvh.lookup(pts) return squared_distances, face_indices, closest_points
5,336,726
def parse_config(settings: Any) -> Tuple[Dict[str, Queue], Dict[str, dict]]: """ SAQ configuration parsing. Args: settings: The settings (can be pydantic.BaseSettings). Returns: Tuple[Dict[str, Queue], Dict[str, dict]]: The SAQ queues and the queue settings. """ saq_queues: Dict[str, dict] = getattr(settings, "SAQ_QUEUES", {}) if not isinstance(saq_queues, dict): raise RuntimeError("SAQ_QUEUES must be a dict, got {}".format(type(saq_queues))) queue_maps = {} queue_settings = {} for q_name, q_param in saq_queues.items(): url = q_param.get("url", None) if not url: raise RuntimeError("No url specified for queue {}".format(q_name)) queue = Queue.from_url(url, q_name) queue_maps[q_name] = queue queue_settings[q_name] = q_param return queue_maps, queue_settings
5,336,727
async def test_manual_setup_connection_exception(hass: HomeAssistant): """Test configuration flow with a connection error.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == "form" assert result["step_id"] == "user" assert not result["errors"] with _patch_list(raise_error=True): result = await hass.config_entries.flow.async_configure( result["flow_id"], USER_INPUT ) await hass.async_block_till_done() assert result["type"] == "form" assert result["errors"] == {"base": "cannot_connect"}
5,336,728
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True): """Create a slot initialized to 0 with same shape as the primary object. Args: primary: The primary `Variable` or `Output`. name: Name to use for the slot variable. dtype: Type of the slot variable. Defaults to the type of `primary`. colocate_with_primary: Boolean. If True the slot is located on the same device as `primary`. Returns: A `Variable` object. """ if dtype is None: dtype = primary.dtype val = array_ops.zeros(primary.get_shape().as_list(), dtype=dtype) return create_slot(primary, val, name, colocate_with_primary=colocate_with_primary)
5,336,729
def AdditionalMedicareTax(e00200, MARS, AMEDT_ec, sey, AMEDT_rt, FICA_mc_trt, FICA_ss_trt, ptax_amc, payrolltax): """ Computes Additional Medicare Tax (Form 8959) included in payroll taxes. Notes ----- Tax Law Parameters: AMEDT_ec : Additional Medicare Tax earnings exclusion AMEDT_rt : Additional Medicare Tax rate FICA_ss_trt : FICA Social Security tax rate FICA_mc_trt : FICA Medicare tax rate Taxpayer Charateristics: e00200 : Wages and salaries sey : Self-employment income Returns ------- ptax_amc : Additional Medicare Tax payrolltax : payroll tax augmented by Additional Medicare Tax """ line8 = max(0., sey) * (1. - 0.5 * (FICA_mc_trt + FICA_ss_trt)) line11 = max(0., AMEDT_ec[MARS - 1] - e00200) ptax_amc = AMEDT_rt * (max(0., e00200 - AMEDT_ec[MARS - 1]) + max(0., line8 - line11)) payrolltax += ptax_amc return (ptax_amc, payrolltax)
5,336,730
def append_step_list(step_list, step, value, go_next, mode, tag): """from step_list, append the number of times a step needs to be repeated if runmode or retry is present :Arguments: step_list = Ordered list of steps to be executed step = Current step value = attempts in runmode/retry go_next = value of the real next step mode = runmode or retry tag = In runmode it is value, in retry it is count :Return: step_list = New step list formed by appending the replicated steps """ for i in range(0, value): copy_step = copy.deepcopy(step) copy_step.find(mode).set(tag, go_next) copy_step.find(mode).set("attempt", i + 1) copy_step.find(mode).set(mode+"_val", value) step_list.append(copy_step) return step_list
5,336,731
def test_sorted_h5_keys_many(): """ Test to check a key is returned with many entries """ with TempDirectory() as tempdir: # Creating some dummy data d1 = np.random.random(size=(10, 20)) hf = h5py.File(tempdir.path + "data.h5", "w") # Adding entries in different order hf.create_dataset("dataset_1", data=d1) hf.create_dataset("dataset_3", data=d1) hf.create_dataset("dataset_2", data=d1) hf.close() # Checking func returns the same thing expected = ["dataset_1", "dataset_2", "dataset_3"] actual = util.get_h5_sorted_keys(tempdir.path + "data.h5") assert expected == actual
5,336,732
def load_license(request, project_slug): """ Reload the license input queryset with the right options for the access form's current access policy choice. Called via ajax. """ user = request.user project = ActiveProject.objects.filter(slug=project_slug) if project: project = project.get() else: raise Http404() form = forms.AccessMetadataForm(instance=project) form.set_license_queryset(access_policy=int(request.GET['access_policy'])) return render(request, 'project/license_input.html', {'form':form})
5,336,733
def populate_db() -> None: """Populate the database using sample data""" users = [ { "username": "user1", "password": "pass_user1", "name": "User 1", "email": "user1@usermail.com", "role": "USER", "daily_calories": 2500, "meals": [ { "date": date(2020, 2, 11), "time": time(15, 0, 3), "name": "meal 1", "grams": 100, "description": "Meal 1 User 1", "calories": 500, "under_daily_total": False, }, { "date": date(2020, 2, 11), "time": time(15, 10, 3), "name": "meal 2", "grams": 100, "description": "Meal 2 User 1", "calories": 2100, "under_daily_total": False, }, ], }, { "username": "user2", "password": "pass_user2", "name": "User 2", "email": "user2@usemail.com", "role": "USER", "daily_calories": 3000, "meals": [ { "date": date(2020, 2, 11), "time": time(15, 0, 3), "name": "cheese", "grams": 100, "description": "Meal 3 User 2", "calories": 500, "under_daily_total": True, } ], }, { "username": "manager1", "password": "pass_manager1", "name": "Manager 1", "email": "manager1@managermail.com", "role": "MANAGER", "daily_calories": 2000, "meals": [], }, { "username": "manager2", "password": "pass_manager2", "name": "Manager 2", "email": "manager2@managermail.com", "role": "MANAGER", "daily_calories": 4000, "meals": [], }, ] for user in users: u = User( username=user.get("username"), password=user.get("password"), name=user.get("name"), email=user.get("email"), role=user.get("role"), daily_calories=user.get("daily_calories"), ) for meal in user.get("meals"): u.meals.append( Meal( user_id=meal.get(""), date=meal.get("date"), time=meal.get("time"), name=meal.get("name"), grams=meal.get("grams"), description=meal.get("description"), calories=meal.get("calories"), ) ) db.session.add(u) logger.info(f"User: '{user['username']}' added successfully to users") db.session.commit()
5,336,734
def wt(): """Return default word tokenizer.""" return WordTokenizer()
5,336,735
def test_template_params(): """ Should print out text with passed params """ runner = CliRunner() result = runner.invoke(template.template, ['--name', 'Caesar was here', '--choose']) print(f'result: {result.output}') assert result.output == ('value: Caesar was here\n' 'Hello Caesar was here. Your input value is True\n')
5,336,736
def test_roller_value_changed(hass, mock_openzwave): """Test position changed.""" hass.data[zwave.zwave.DATA_NETWORK] = MagicMock() node = MockNode() value = MockValue(data=None, node=node, command_class=const.COMMAND_CLASS_SWITCH_MULTILEVEL) values = MockEntityValues(primary=value, open=None, close=None, node=node) device = zwave.get_device(hass=hass, node=node, values=values, node_config={}) assert device.current_cover_position is None assert device.is_closed is None value.data = 2 value_changed(value) assert device.current_cover_position == 0 assert device.is_closed value.data = 35 value_changed(value) assert device.current_cover_position == 35 assert not device.is_closed value.data = 97 value_changed(value) assert device.current_cover_position == 100 assert not device.is_closed
5,336,737
def InstallAppengineDatabaseBackend(): """Installs the appengine database backend into Django. The appengine database lives in the db/ subdirectory of this package, but is known as "appengine" to Django. This function installs the module where Django expects to find its database backends. """ from appengine_django import db sys.modules['django.db.backends.appengine'] = db logging.debug("Installed appengine database backend")
5,336,738
def assert_(val: bool): """ usage.scipy: 916 usage.skimage: 18 usage.statsmodels: 351 """ ...
5,336,739
def epochplot(epochs, *, ax=None, height=None, fc='0.5', ec='0.5', alpha=0.5, hatch='////', label=None, hc=None,**kwargs): """Docstring goes here. """ if ax is None: ax = plt.gca() ymin, ymax = ax.get_ylim() if height is None: height = ymax - ymin if hc is not None: try: hc_before = mpl.rcParams['hatch.color'] mpl.rcParams['hatch.color']=hc except KeyError: warnings.warn("Hatch color not supported for matplotlib <2.0") for ii, (start, stop) in enumerate(zip(epochs.starts, epochs.stops)): ax.add_patch( patches.Rectangle( (start, ymin), # (x,y) width=stop - start , # width height=height, # height hatch=hatch, facecolor=fc, edgecolor=ec, alpha=alpha, label=label if ii == 0 else "_nolegend_", **kwargs ) ) ax.set_xlim([epochs.start, epochs.stop]) if hc is not None: try: mpl.rcParams['hatch.color'] = hc_before except UnboundLocalError: pass return ax
5,336,740
def training(dataset, database_path, resize, channels, normalization, transformations, lr, epochs, batch_size, train_iterations, valid_iterations, classes, backbone, trainvalsplit, model_SavePath, backbone_type, model_LoadWeights, use_mvcnn ): """ Function that begin trainig: Please check "train_config.py" file for parameter significance """ # Check available device (GPU|CPU) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Label distributions of MRI exams score_freq = [212, 124, 46, 139, 26, 82, 33, 113, 24, 28, 14, 58, 42, 15, 6, 6, 1,2] # Create list with weights to be assigned to each class weights = [round(max(score_freq)/item,2) for item in score_freq] class_weights = torch.FloatTensor(weights).to(device) # Load or create model if no weights # Why "mvcnn", cause it's based on the multi-view # convolutional network if model_LoadWeights: mvcnn = torch.load(model_LoadWeights) else: # Use a modified VGG as the backbone if backbone_type == 'VGGM': mvcnn = VGGM(classes).to(device) # Use a classical UNet as the backbone elif backbone_type == 'UNET': mvcnn = UNet(classes).to(device) # Use UNet along with attention as backbone # more info : https://arxiv.org/pdf/1804.03999.pdf elif backbone_type == 'UNetA': mvcnn = UNetA(classes).to(device) else: raise ValueError("Backbone not recognized : {}".format(backbone_type)) # Define loss and compile model with optimizer criterion = nn.CrossEntropyLoss(weight=class_weights) print("Loss used \t \t \t : nn.CrossEntropyLoss") optimizer = optim.Adam(mvcnn.parameters(), lr=lr) # Instantiate train and validation generators for the respective # databases if dataset == 'SEP': # Get train and valid patient information train_patient_information, valid_patient_information = get_PatientInfo(database_path) sep = SEPGenerator(database_path, channels=channels, resize=resize, normalization=normalization) train_generator = sep.generator(train_patient_information, transformations=transformations, dataset='train') valid_generator = sep.generator(valid_patient_information, dataset='valid') elif dataset == 'COCO': coco = CMGenerator(base_path='/media/data/Coco/', resize=resize, batch_size=batch_size) train_generator = coco.generator(dataset='train', transformations=transformations, channels=channels) valid_generator = coco.generator(dataset='val', channels=channels) elif dataset == "MURA": mura = CMGenerator(base_path='/home/allwyn/MURA/', resize=resize, batch_size=batch_size, dataset=dataset) train_generator = mura.generator(dataset='train', transformations=transformations, channels=channels) valid_generator = mura.generator(dataset='valid', channels=channels) elif dataset == "MIMIC": mimic = MIMIC_generator(base_path='/media/data/chest_dataset/', resize=resize, batch_size=batch_size) train_generator = mimic.generator(dataset='train', transformations=transformations) valid_generator = mimic.generator(dataset='valid') else: raise ValueError("Dataset not recognized : {}".format(dataset)) # Ouput some info concerning the training effectuated print("---------- Training has begun ----------") print("Learning rate \t \t \t : {}".format(lr)) print("Batch size \t \t \t : {}".format(batch_size)) print("Number of train iterations \t : {}".format(train_iterations)) print("NUmber of valid iterations \t : {}".format(valid_iterations)) print("Train validation split \t \t : {}".format(trainvalsplit)) print("Number of epochs \t \t : {}".format(epochs)) print("Training dataset \t \t : {}".format(dataset)) print("Backbone used \t \t \t : {}".format(backbone_type)) print("Loading models \t \t \t : {}".format(model_LoadWeights)) print("Path to save models \t \t : {}".format(model_SavePath)) create_dir(model_SavePath) print("Training has begun ........") for epoch in range(epochs): total_TrainLoss = 0 for t_m, t_item in enumerate(train_generator): # Get image and label and pass it through available device image_3D, label = torch.tensor(t_item[0], device=device).float(), torch.tensor(t_item[1], device=device) # Sometimes .dcm files don't contain images # Continue if this is the case if image_3D.shape[0] == 0: continue output = mvcnn(image_3D, batch_size, use_mvcnn) # Get output from network loss = criterion(output, label) # Get loss loss.backward() # Back-propagate optimizer.step() # Update total_TrainLoss += loss if not (t_m+1)%100: print("On_Going_Epoch : {} \t | Iteration : {} \t | Training Loss : {}".format(epoch+1, t_m+1, total_TrainLoss/(t_m+1))) if (t_m+1) == train_iterations: total_ValidLoss = 0 with torch.no_grad(): for v_m, v_item in enumerate(valid_generator): image_3D, label = torch.tensor(v_item[0], device=device).float(), torch.tensor(v_item[1], device=device) if image_3D.shape[0] == 0: continue output = mvcnn(image_3D, batch_size, use_mvcnn) total_ValidLoss += criterion(output, label) if (v_m + 1) == valid_iterations: break print("Epoch : {} \t | Training Loss : {} \t | Validation Loss : {} ".format(epoch+1, total_TrainLoss/(t_m+1), total_ValidLoss/(v_m+1)) ) torch.save(mvcnn, model_SavePath + '/' + config['backbone'] +'_'+ str(epoch+1) + '.pkl') break
5,336,741
def do_db_sync(): """ Place a database under migration control and upgrade, creating first if necessary. """ api.db_sync(api.get_engine(), CONF.command.version)
5,336,742
def run_dag( dag_id, run_id=None, conf=None, replace_microseconds=True, execution_date=None, ): """Runs DAG specified by dag_id :param dag_id: DAG ID :param run_id: ID of the dag_run :param conf: configuration :param replace_microseconds: whether microseconds should be zeroed :return: first dag run - even if more than one Dag Runs were present or None dag_model = DagModel.get_current(dag_id) if dag_model is None: raise DagNotFound("Dag id {} not found in DagModel".format(dag_id)) dagbag = DagBag(dag_folder=dag_model.fileloc) """ dagbag = DagBag() dag_run = DagRun() runs = _run_dag( dag_id=dag_id, dag_run=dag_run, dag_bag=dagbag, run_id=run_id, conf=conf, replace_microseconds=replace_microseconds, execution_date=execution_date, ) return runs[0] if runs else None
5,336,743
def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True) -> Any: """Download the given URL and return a binary-mode file object to access the data.""" assert is_url(url) assert num_attempts >= 1 # Lookup from cache. url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest() if cache_dir is not None: cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*")) if len(cache_files) == 1: return open(cache_files[0], "rb") # Download. url_name = None url_data = None with requests.Session() as session: if verbose: print("Downloading %s ..." % url, end="", flush=True) for attempts_left in reversed(range(num_attempts)): try: with session.get(url) as res: res.raise_for_status() if len(res.content) == 0: raise IOError("No data received") if len(res.content) < 8192: content_str = res.content.decode("utf-8") if "download_warning" in res.headers.get("Set-Cookie", ""): links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link] if len(links) == 1: url = requests.compat.urljoin(url, links[0]) raise IOError("Google Drive virus checker nag") if "Google Drive - Quota exceeded" in content_str: raise IOError("Google Drive quota exceeded") match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", "")) url_name = match[1] if match else url url_data = res.content if verbose: print(" done") break except: if not attempts_left: if verbose: print(" failed") raise if verbose: print(".", end="", flush=True) # Save to cache. if cache_dir is not None: safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name) cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name) temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name) os.makedirs(cache_dir, exist_ok=True) with open(temp_file, "wb") as f: f.write(url_data) os.replace(temp_file, cache_file) # atomic # Return data as file object. return io.BytesIO(url_data)
5,336,744
def main_ploting( title_: str or None, xlabel_: str or None, ylabel_: str or None, legends_: [str] or None, average_color_: str, average_fill_: str, ): """ if you want to edit the information of your graphic, edit this function """ if title_: plt.title(title_) if xlabel_: plt.xlabel(xlabel_) if ylabel_: plt.ylabel(ylabel_) # xy use is the non-outlier data connected by line if len(PrivateConstant.XUSE[0]) != 0: for i, xs in enumerate(PrivateConstant.XUSE): if i == len(PrivateConstant.XUSE) - 1: plt.plot(xs, list(PrivateConstant.YUSE[i]), color=average_color_, alpha=0.5, label='average') const = np.average(list(PrivateConstant.YUSE[i])) plt.axhline(y=const, color=average_color_, linestyle='-.') plt.fill_between(xs, const, list(PrivateConstant.YUSE[i]), color=average_fill_, alpha=0.25) else: if legends_: plt.plot(xs, list(PrivateConstant.YUSE[i]), color=PrivateConstant.COLORS[i], alpha=0.5, label=legends_[i]) else: plt.plot(xs, list(PrivateConstant.YUSE[i]), color=PrivateConstant.COLORS[i], alpha=0.5) plt.legend(loc='upper left') print('standard deviation: ', PrivateConstant.STDAVG)
5,336,745
def get_function_handle(method, var): """ Return a function handle to a given calculation method. Parameters ---------- method : str Identifier of the calculation method to return a handle to. var : dict Local variables needed in the mu update method. Returns ------- f_handle : function Handle to the calculation method defined in this globals scope. """ return globals()['wrap_calculate_using_' + method](var)
5,336,746
def build_receiver_model(params, ds_meta, utt_len: int, vocab_size: int, pre_conv=None) -> ReceiverModel: """ given the size of images from a dataset, and a desired vocab size and utterance length, creates a ReceiverModel, which will take in images, and utterances, and classify the images as being consistent with the utterances or not. """ p = params if pre_conv is None: pre_conv = pre_conv_lib.build_preconv(params=p, ds_meta=ds_meta) multimodal_classifier = multimodal_classifiers.build_multimodal_classifier( params=p, pre_conv=pre_conv, ds_meta=ds_meta) linguistic_encoder = linguistic_encoders.build_linguistic_encoder( params=p, utt_len=utt_len, vocab_size=vocab_size) receiver_model = ReceiverModel( pre_conv=pre_conv, multimodal_classifier=multimodal_classifier, linguistic_encoder=linguistic_encoder) return receiver_model
5,336,747
def closeSeleniumWebDriver(web_driver): """ This method fetches all child (grandchild and all ancestors) process ids that was open in Selenium's Web Driver initialization. We're calling web_driver.quit() and then we're killing all ancestor's processes. If the process was meanwhile terminated, we're ignoring if. If the process id was reused, we also do nothing. :param web_driver: :return: """ # see closeBmpDaemon() # see https://github.com/AutomatedTester/browsermob-proxy-py/issues/8#issuecomment-679150656 if web_driver is not None: if getattr(getattr(web_driver, 'service', {}), 'process', None) is not None: childs_process = [] try: cmd_process = psutil.Process(web_driver.service.process.pid) childs_process = cmd_process.children(recursive=True) childs_process = [*childs_process, cmd_process] web_driver.quit() finally: for child in childs_process: # we can't accidentally kill newly created process # we can kill only the process we have cached earlier # if process was already finished we will get NoSuchProcess # that we're just suppressing with suppress(psutil.NoSuchProcess): child.send_signal(signal.SIGTERM) else: web_driver.quit()
5,336,748
def test_triangle_number_factors(): """ test Problem test_problem_12(answer) :return: """ from euler_python.easiest import p012 output = p012.triangle_number_factors(5) expected_output = 28 assert output == expected_output
5,336,749
def fermi_fitness(strategy_pair, N, i, utilities, selection_intensity=1): """ Return the fermi fitness of a strategy pair in a population with N total individuals and i individuals of the first type. """ F, G = [math.exp(k) for k in fitness(strategy_pair, N, i, utilities)] return F / (F + G), G / (F + G)
5,336,750
def diff_smf(mstar_arr, volume, h1_bool, colour_flag=False): """ Calculates differential stellar mass function in units of h=1.0 Parameters ---------- mstar_arr: numpy array Array of stellar masses volume: float Volume of survey or simulation h1_bool: boolean True if units of masses are h=1, False if units of masses are not h=1 colour_flag (optional): boolean 'R' if galaxy masses correspond to red galaxies & 'B' if galaxy masses correspond to blue galaxies. Defaults to False. Returns --------- maxis: array Array of x-axis mass values phi: array Array of y-axis values err_tot: array Array of error values per bin bins: array Array of bin edge values counts: array Array of number of things in each bin """ if not h1_bool: # changing from h=0.7 to h=1 assuming h^-2 dependence logmstar_arr = np.log10((10**mstar_arr) / 2.041) else: logmstar_arr = np.log10(mstar_arr) if survey == 'eco' or survey == 'resolvea': bin_min = np.round(np.log10((10**8.9) / 2.041), 1) if survey == 'eco' and colour_flag == 'R': bin_max = np.round(np.log10((10**11.5) / 2.041), 1) bin_num = 6 elif survey == 'eco' and colour_flag == 'B': bin_max = np.round(np.log10((10**11) / 2.041), 1) bin_num = 6 elif survey == 'resolvea': # different to avoid nan in inverse corr mat bin_max = np.round(np.log10((10**11.5) / 2.041), 1) bin_num = 7 else: # For eco total bin_max = np.round(np.log10((10**11.5) / 2.041), 1) bin_num = 7 bins = np.linspace(bin_min, bin_max, bin_num) elif survey == 'resolveb': bin_min = np.round(np.log10((10**8.7) / 2.041), 1) bin_max = np.round(np.log10((10**11.8) / 2.041), 1) bins = np.linspace(bin_min, bin_max, 7) # Unnormalized histogram and bin edges counts, edg = np.histogram(logmstar_arr, bins=bins) # paper used 17 bins dm = edg[1] - edg[0] # Bin width maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers # Normalized to volume and bin width err_poiss = np.sqrt(counts) / (volume * dm) err_tot = err_poiss phi = counts / (volume * dm) # not a log quantity phi = np.log10(phi) return maxis, phi, err_tot, bins, counts
5,336,751
def get_parameter(dbutils, parameter_name: str, default_value='') -> str: """Creates a text widget and gets parameter value. If ran from ADF, the value is taken from there.""" dbutils.widgets.text(parameter_name, default_value) return dbutils.widgets.get(parameter_name)
5,336,752
def single_init(cfg: GenomeConfig): """Random initialized floating GRU value, calculated via a normal distribution.""" return clip(gauss(cfg.gru_init_mean, cfg.gru_init_stdev), a_min=cfg.gru_min_value, a_max=cfg.gru_max_value)
5,336,753
def get_recent_articles(request): """ 获取最近更新内容 """ user = get_login_user(request) recommend = request.POST.get('recommend', 'recommend') if recommend == 'unrecommend': articles = Article.objects.raw(get_other_articles_sql) elif recommend == 'recommend': articles = Article.objects.raw(get_recommend_articles_sql) else: logger.warning(f'未知的类型:{recommend}') user_sub_feeds = [] if user: user_sub_feeds = get_user_sub_feeds(user.oauth_id) context = dict() context['articles'] = articles context['user'] = user context['user_sub_feeds'] = user_sub_feeds return render(request, 'explore/recent_articles.html', context=context)
5,336,754
def add_close_export_to_cell(cell): """ Adds an HTML comment to close question export for PDF filtering to the top of ``cell``. ``cell`` should be a Markdown cell. This adds ``<!-- END QUESTION-->`` as the first line of the cell. Args: cell (``nbformat.NotebookNode``): the cell to add the close export to Returns: ``nbformat.NotebookNode``: the cell with the close export comment at the top """ cell = copy.deepcopy(cell) source = get_source(cell) source = ["<!-- END QUESTION -->\n", "\n"] + source cell['source'] = "\n".join(source) return cell
5,336,755
def get_ram_list_linux(): """Get RAM list using dmidecode.""" cmd = ['sudo', 'dmidecode', '--type', 'memory'] dimm_list = [] manufacturer = 'Unknown' size = 0 # Get DMI data proc = run_program(cmd) dmi_data = proc.stdout.splitlines() # Parse data for line in dmi_data: line = line.strip() if line == 'Memory Device': # Reset vars manufacturer = 'Unknown' size = 0 elif line.startswith('Size:'): size = line.replace('Size: ', '') try: size = string_to_bytes(size, assume_binary=True) except ValueError: # Assuming empty module size = 0 elif line.startswith('Manufacturer:'): manufacturer = line.replace('Manufacturer: ', '') dimm_list.append([size, manufacturer]) # Save details return dimm_list
5,336,756
def test_random_startup_node(): """ Hard to test reliable for a random """ s = [{"1": 1}, {"2": 2}, {"3": 3}], n = NodeManager(startup_nodes=s) random_node = n.random_startup_node() for i in range(0, 5): assert random_node in s
5,336,757
def get_capability_list(capability=esdl.Producer): """Returns a list of all subtypes of the specified capability. Used to get a list of e.g. all producers in ESDL The list is automatically generated based on the ESDL meta model""" subtype_list = list() for eclassifier in esdl.eClass.eClassifiers: if isinstance(eclassifier, EClass): if capability.eClass in eclassifier.eAllSuperTypes() and not eclassifier.abstract: subtype_list.append(eclassifier.name) subtype_list.sort() return subtype_list
5,336,758
def test_reset_password_bad_token(client: TestClient, session: db.Session): """Cannot change password with a bad token""" bad_token = uuid.uuid4() password = utils.generate_random_chars(8) response = client.post( f"/v2/reset/{bad_token}", json={"password": password, "password_confirm": password}, ) assert response.status_code == status.HTTP_404_NOT_FOUND
5,336,759
def _get_exec_binary(binary, kw): """ On win32, the subprocess module can only reliably resolve the target binary if it's actually a binary; as for a Node.js script it seems to only work iff shell=True was specified, presenting a security risk. Resolve the target manually through which will account for that. The kw argument is the keyword arguments that will be passed into whatever respective subprocess.Popen family of methods. The PATH environment variable will be used if available. """ binary = which(binary, path=kw.get('env', {}).get('PATH')) if binary is None: raise_os_error(errno.ENOENT) return binary
5,336,760
def init_SSE_square(Lx, Ly): """Initialize a starting configuration on a 2D square lattice.""" n_sites = Lx*Ly # initialize spins randomly with numbers +1 or -1, but the average magnetization is 0 spins = 2*np.mod(np.random.permutation(n_sites), 2) - 1 op_string = -1 * np.ones(10, np.intp) # initialize with identities bonds = [] for x0 in range(Lx): for y0 in range(Ly): s0 = site(x0, y0, Lx, Ly) s1 = site(np.mod(x0+1, Lx), y0, Lx, Ly) # bond to the right bonds.append([s0, s1]) s2 = site(x0, np.mod(y0+1, Ly), Lx, Ly) # bond to the top bonds.append([s0, s2]) bonds = np.array(bonds, dtype=np.intp) return spins, op_string, bonds
5,336,761
def filter_signal(eeg_df, iqrs, dic_filt_opts): """ Filter signal """ all_labels = list(eeg_df.columns) # check the order of labels label_grouped = False if all_labels[0].split('.')[-1] == all_labels[1].split('.')[-1]: label_grouped = True data_labels = all_pow_nodes meta_labels = [lab for lab in all_labels if lab not in data_labels] eeg_pow_filt = [] for phase in eeg_df.phase.unique(): print('\t',phase) sub = eeg_df.loc[ (eeg_df.phase == phase), :].copy() sub = sub.reset_index(drop=True) meta = sub[meta_labels].values # [N, ] data = sub[data_labels].values # always [N,70] if dic_filt_opts['per_phases']: th_up_all = iqrs[(dic_filt_opts['datafiltset'], phase)] # OLDER ORDER else: th_up_all = iqrs[(dic_filt_opts['datafiltset'], dic_filt_opts['setphase'])] # OLDER ORDER if label_grouped: th_up_all = iqr_by_group(th_up_all) # group iqrs print('\tFiltering --> nodes are grouped') m_thresh = np.repeat([np.array(th_up_all)], data.shape[0], axis=0) mask = data > m_thresh data[mask] = m_thresh[mask] / 2. # median filter applying for rr in range(data.shape[1]): # by colums (70 cols = 14 channesl * 5 waves) data[:, rr] = signal.medfilt(data[:, rr], kernel_size=3) df = pd.DataFrame(np.concatenate((data, meta), axis=1), columns=data_labels + meta_labels) eeg_pow_filt.append(df) del df eeg_pow_filt = pd.concat(eeg_pow_filt, axis=0, ignore_index=True) return eeg_pow_filt
5,336,762
def get_sym_inequiv_components( components: List[Component], spg_analyzer: SpacegroupAnalyzer ) -> List[Component]: """Gets and counts the symmetrically inequivalent components. Component data has to have been generated with ``inc_site_ids=True``. Args: components: A list of structure components, generated using :obj:`pymatgen.analysis.dimensionality.get_structure_components`, with ``inc_site_ids=True``. spg_analyzer: A `pymatgen.symmetry.analyzer.SpacegroupAnalyzer` analyzer object for the structure containing the components. Returns: A list of the symmetrically inequivalent components. Any duplicate components will only be returned once. The component objects are in the same format is given by :obj:`pymatgen.analysis.dimensionality.get_structure_components` but the additional property: - ``"count"`` (:obj:`int`): The number of times this component appears in the structure. """ components = deepcopy(components) sym_inequiv_components = {} equivalent_atoms = spg_analyzer.get_symmetry_dataset()["equivalent_atoms"] for component in components: sym_indices = frozenset(equivalent_atoms[x] for x in component["site_ids"]) # if two components are composed of atoms that are symmetrically # equivalent they are the same. if sym_indices in sym_inequiv_components: sym_inequiv_components[sym_indices]["count"] += 1 continue component["count"] = 1 sym_inequiv_components[sym_indices] = component return list(sym_inequiv_components.values())
5,336,763
def aic(llf, nobs, df_modelwc): """ Akaike information criterion Parameters ---------- llf : {float, array_like} value of the loglikelihood nobs : int number of observations df_modelwc : int number of parameters including constant Returns ------- aic : float information criterion References ---------- https://en.wikipedia.org/wiki/Akaike_information_criterion """ return -2.0 * llf + 2.0 * df_modelwc
5,336,764
def summarize_logs(df, wells, cat, props, sr=0.5): """ Function to calculate petrophysical summaries based on well and categorical data. All logs averaged with simple arithmetic means (maybe supply log permeability to have a better averaged estimation) Parameters: logs (pd.DataFrame): dataframe containing well logs data, use appropiate filters in advance to provide net logs wells (string): column with well names in the logs dataframe cat (string): column with filtering discrete property in the logs dataframe props (list:string): list of properties (logs) to be summarized sr (float): log sampling rate in project units for net thickness calculations Returns: summ (pd.DataFrame): dataframe with summarized data """ col_list = [] col_list.append(wells) col_list.append(cat) [col_list.append(i) for i in props] df1 = df[col_list].dropna(axis=0, how='any') col_list.append('NetH') summ = pd.DataFrame(columns=col_list) idx = 0 for well in df1[wells].unique(): for cat_ in df1[cat].unique(): summ.loc[idx, [wells, cat]] = [well, cat_] summ.loc[idx, props] = df1[(df1[wells]==well)&(df1[cat]==cat_)][props].mean() summ.loc[idx, 'NetH'] = df1[(df1[wells]==well)&(df1[cat]==cat_)][props[0]].count() * sr idx += 1 for col in summ.columns: if col not in [wells, cat]: summ[col] = pd.to_numeric(summ[col], errors='ignore') return summ
5,336,765
def run_trap_harvesting(prev_values = [], selected_harvest= 0, radius= default_radius, height= default_height, slope= default_slope, delta= default_delta, constant_population= True): """Runs the model for one harvesting cycle. Where a harvesting cycle is period of time ending in the next low tide in which the trap is closed with fish inside. Args: prev_values is an array of arrays with: [0]: The total number of harvested fish at hour indexed [1]: The total number of fish in the trap at hour at hour indexed [2]: the total number of fish outside the trap at hour indexed [3]: list of the size of all harvests The values in this array are the history of the model. if the model is being run from the start, pass in []. selected_harvest: how many fish will be harvested this cycle. This is to be user selected radius: the radius of the semi-circular trap created height: the height of the trap slope: slope of the beach delta: how far down the y axis the "center" of the semi-circle is from the origin constant_population: if true the population will reset to max_fish after every harvest, else it will decrease by the number of harvested fish Returns: An 2d array containing: [0]: The total number of harvested fish at hour indexed [1]: The total number of fish in the trap at hour at hour indexed [2]: the total number of fish outside the trap at hour indexed [3]: list of the size of all harvests [4]: a boolean showing if the model is completed This returned array is shows one more cycle of harvesting than the inputed one. Throws: ValueError if harvesting is not a positive integer <= the number of the fish in the trap """ movement_rate = 0.025 max_fish = 1000 perimeter_ratio = (np.pi * radius) / (np.pi * 25) tide_values = get_tide_values() perimeter = get_perimeter(radius, height, delta, slope) height_adjustment =1 / min(1, height / 4) #TODO #if allowing users to input arbitrary values check that all the user inputs are within reasonable bounds or throw an error if they are not if(len(prev_values) == 0): #if the model is just starting current_free_fish = max_fish current_caught_fish = 0 total_harvested = [0] in_trap = [0] out_trap = [max_fish] catches = [] else: #update the model with the harvest the user selected total_harvested = prev_values[0] in_trap = prev_values[1] out_trap = prev_values[2] catches = prev_values[3] current_free_fish = out_trap[-1] current_caught_fish = in_trap[-1] try: selected_harvest = int(selected_harvest) except ValueError: raise ValueError("selected_harvest must be a positive integer not larger than the number of fish in the trap") if(selected_harvest > current_caught_fish or selected_harvest < 0): raise ValueError("selected_harvest must be a positive integer not larger than the number of fish in the trap") catches.append(selected_harvest) level = tide_values[len(in_trap) - 1] coverage = get_ratio_of_perimeter_covered(level, perimeter, radius) free_to_caught = current_free_fish * coverage * movement_rate * perimeter_ratio caught_to_free = current_caught_fish * coverage * movement_rate * perimeter_ratio * height_adjustment current_caught_fish = current_caught_fish - caught_to_free + free_to_caught current_free_fish = current_free_fish + caught_to_free - free_to_caught if(constant_population): current_free_fish = max_fish else: current_free_fish = current_free_fish + (current_caught_fish - selected_harvest) total_harvested.append(total_harvested[-1] + selected_harvest) #empty the traps and record the step after the selected harvest current_caught_fish = 0 in_trap.append(current_caught_fish) out_trap.append(current_free_fish) #drop tide values already ran tide_values = tide_values[len(in_trap) - 1 : len(tide_values)] for level in tide_values: coverage = get_ratio_of_perimeter_covered(level, perimeter, radius) if(math.floor(current_caught_fish) != 0 and coverage == 0): return [total_harvested, in_trap, out_trap, catches, False] free_to_caught = current_free_fish * coverage * movement_rate * perimeter_ratio caught_to_free = current_caught_fish * coverage * movement_rate * perimeter_ratio current_caught_fish = current_caught_fish - caught_to_free + free_to_caught current_free_fish = current_free_fish + caught_to_free - free_to_caught total_harvested.append(total_harvested[-1]) in_trap.append(current_caught_fish) out_trap.append(current_free_fish) return [total_harvested, in_trap, out_trap, catches, True]
5,336,766
def get_tfpn_mean(targets, predictions): """ 给定标签和预测,返回对应所有类的 Tp, FN, FP, TN 的平均值 :param targets: :param predictions: :return: """ cm = confusion_matrix(targets, predictions) total = np.array(cm).sum() TP = cm.diagonal().sum() FN = total - TP FP = FN TN = total * len(cm) - TP - FN - FP return TP, FN, FP, TN
5,336,767
def salad(img_dir_path: str, test_size: float = 0.2, seed: Optional[int] = None, dir_names: tuple(str, str) = ('train', 'valid')): """ Physically split and shuffle the data(copy) in the directory like salad. default test size is 20%(0.2), seed is optional, default directory names 'train' and 'valid'. """ path = Path(img_dir_path) files = list(path.glob('*.*')) if seed is not None: np.random.seed(seed) np.random.shuffle(files) test_index = np.round(len(files) * test_size) print(f'[::INFO:: {test_size * 100}% FILES : {int(test_index)}]') print( f'[::INFO:: {(1 - test_size) * 100}% FILES : {int(np.round(len(files))) - int(test_index)}]') for dir_name in dir_names: if not os.path.isdir(f'{dir_name}'): os.mkdir(f'{dir_name}') for i, file_path in enumerate(files): train_path = os.path.join(dir_names[0], Path(file_path).name) valid_path = os.path.join(dir_names[1], Path(file_path).name) if int(test_index) > i: shutil.copy(file_path, valid_path) print(f'[::WORK:: COPY "{file_path}" to "{valid_path}"]') else: shutil.copy(file_path, train_path) print(f'[::WORK:: COPY "{file_path}" to "{train_path}"]')
5,336,768
def cal_deltaE00_from_LCh(LCh_1, Lab_2): """ Calculate the color difference :math:`\Delta E_{00}` between two given colorspace arrays. :param LCh_1: array-like :param Lab_2: array-like :return: numeric or ndarray """ Lab_1 = LCh2Lab(LCh_1) return deltaE00(Lab_1, Lab_2)
5,336,769
def get_var_type_glue(vtype): """Get glue module from variable's type. Parameters ---------- vtype: data type Returns ------- Glue Module if glue exists, otherwise None. """ global DTYPE_TO_GLUE, PKG_NAME_TO_GLUE_ARGS glue_mod = DTYPE_TO_GLUE.get(vtype, None) if glue_mod is not None: return glue_mod pkg_name = vtype.__module__.split('.')[0] if pkg_name not in PKG_NAME_TO_GLUE_ARGS: return None # try to register glue_mod _register_glue_real(*PKG_NAME_TO_GLUE_ARGS[pkg_name]) return DTYPE_TO_GLUE.get(vtype, None)
5,336,770
def sgd(params, lr, batch_size): """小批量随机梯度下降。 Defined in :numref:`sec_linear_scratch`""" with torch.no_grad(): for param in params: param -= lr * param.grad / batch_size param.grad.zero_()
5,336,771
def contract_TRG(state, svd_option_1st=None, svd_option_rem=None): """ Contract the PEPS using Tensor Renormalization Group. Parameters ---------- svd_option_1st: tensorbackends.interface.Option, optional Parameters for the first SVD in TRG. Will default to tensorbackends.interface.ReducedSVD() if not given. svd_option_rem: tensorbackends.interface.Option, optional Parameters for the remaining SVD truncations. Will perform SVD if given. Returns ------- output: state.backend.tensor or scalar The contraction result. References ---------- https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.99.120601 https://journals.aps.org/prb/abstract/10.1103/PhysRevB.78.205116 """ # base case if state.shape <= (2, 2): return contract_BMPS(state, svd_option_rem) # SVD each tensor into two tn = np.empty(state.shape + (2,), dtype=object) for (i, j), tsr in np.ndenumerate(state.grid): str_uv = 'abi,icdpq' if (i+j) % 2 == 0 else 'aidpq,bci' tn[i,j,0], _, tn[i,j,1] = state.backend.einsumsvd( 'abcdpq->' + str_uv, tsr, option=svd_option_1st or ReducedSVD(), absorb_s='even' ) tn[i,j,(i+j)%2] = tn[i,j,(i+j)%2].reshape(*(tn[i,j,(i+j)%2].shape + (1, 1))) return _contract_TRG(state, tn, svd_option_rem)
5,336,772
def riccati_3(nmax,x): """Riccati bessel function of the 3rd kind returns (r3, r3'), n=0,1,...,nmax""" x = np.asarray(x) result = np.zeros((2,nmax) + x.shape, dtype=complex) for n in range(nmax): yn = special.spherical_yn(n+1,x) ynp = special.spherical_yn(n+1,x, derivative=True) result[0,n] = x*yn result[1,n] = yn + x*ynp return result
5,336,773
def conv_batch_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): """ Convenience layer that performs a convolution, a batch, and a ReLU. Inputs: - x: Input to the convolutional layer - w, b, conv_param: Weights and parameters for the convolutional layer - gamma, beta, bn_param : batch norm parameters Returns a tuple of: - out: Output from the pooling layer - cache: Object to give to the backward pass """ convOut, conv_cache = layers.conv_forward(x, w, b, conv_param) normOut, norm_cache = layers.spatial_batchnorm_forward(convOut, gamma, beta, bn_param) out, relu_cache = layers.relu_forward(normOut) cache = (conv_cache, norm_cache, relu_cache) return out, cache
5,336,774
def hex_layout(npos, width, rotate=None): """Compute positions in a hexagon layout. Place the given number of positions in a hexagonal layout projected on the sphere and centered at z axis. The width specifies the angular extent from vertex to vertex along the "X" axis. For example:: Y ^ O O O | O O O O | O O + O O +--> X O O O O O O O Each position is numbered 0..npos-1. The first position is at the center, and then the positions are numbered moving outward in rings. Args: npos (int): The number of positions packed onto wafer. width (float): The angle (in degrees) subtended by the width along the X axis. rotate (array, optional): Optional array of rotation angles in degrees to apply to each position. Returns: (array): Array of quaternions for the positions. """ zaxis = np.array([0, 0, 1], dtype=np.float64) nullquat = np.array([0, 0, 0, 1], dtype=np.float64) sixty = np.pi/3.0 thirty = np.pi/6.0 rtthree = np.sqrt(3.0) rtthreebytwo = 0.5 * rtthree angdiameter = width * np.pi / 180.0 # find the angular packing size of one detector nrings = hex_nring(npos) posdiam = angdiameter / (2 * nrings - 2) result = np.zeros((npos, 4), dtype=np.float64) for pos in range(npos): if pos == 0: # center position has no offset posrot = nullquat else: # Not at the center, find ring for this position test = pos - 1 ring = 1 while (test - 6 * ring) >= 0: test -= 6 * ring ring += 1 sectors = int(test / ring) sectorsteps = np.mod(test, ring) # Convert angular steps around the ring into the angle and distance # in polar coordinates. Each "sector" of 60 degrees is essentially # an equilateral triangle, and each step is equally spaced along # the edge opposite the vertex: # # O # O O (step 2) # O O (step 1) # X O O O (step 0) # # For a given ring, "R" (center is R=0), there are R steps along # the sector edge. The line from the origin to the opposite edge # that bisects this triangle has length R*sqrt(3)/2. For each # equally-spaced step, we use the right triangle formed with this # bisection line to compute the angle and radius within this # sector. # The distance from the origin to the midpoint of the opposite # side. midline = rtthreebytwo * float(ring) # the distance along the opposite edge from the midpoint (positive # or negative) edgedist = float(sectorsteps) - 0.5 * float(ring) # the angle relative to the midpoint line (positive or negative) relang = np.arctan2(edgedist, midline) # total angle is based on number of sectors we have and the angle # within the final sector. posang = sectors * sixty + thirty + relang posdist = rtthreebytwo * posdiam * float(ring) / np.cos(relang) posx = np.sin(posdist) * np.cos(posang) posy = np.sin(posdist) * np.sin(posang) posz = np.cos(posdist) posdir = np.array([posx, posy, posz], dtype=np.float64) norm = np.sqrt(np.dot(posdir, posdir)) posdir /= norm posrot = qa.from_vectors(zaxis, posdir) if rotate is None: result[pos] = posrot else: prerot = qa.rotation(zaxis, rotate[pos] * np.pi / 180.0) result[pos] = qa.mult(posrot, prerot) return result
5,336,775
def copy_sim_files(sim_file_list, sim_dir, param_dict): """ Given a list of file paths 'sim_file_list' and simulation directory 'sim_dir', copies the files to the simulation directory and replaces variables in those files. """ print("Copying {} to {} and replacing vars".format(sim_file_list, sim_dir)) for fname in sim_file_list: with open(fname, 'r') as f_in, \ open(join(sim_dir, fname), 'w') as f_out: text = f_in.read() text = replace_vars(text, param_dict) f_out.write(text)
5,336,776
def dup_zz_hensel_step(m, f, g, h, s, t, K): """ One step in Hensel lifting in `Z[x]`. Given positive integer `m` and `Z[x]` polynomials `f`, `g`, `h`, `s` and `t` such that:: f == g*h (mod m) s*g + t*h == 1 (mod m) lc(f) is not a zero divisor (mod m) lc(h) == 1 deg(f) == deg(g) + deg(h) deg(s) < deg(h) deg(t) < deg(g) returns polynomials `G`, `H`, `S` and `T`, such that:: f == G*H (mod m**2) S*G + T**H == 1 (mod m**2) References ========== 1. [Gathen99]_ """ M = m**2 e = dup_sub_mul(f, g, h, K) e = dup_trunc(e, M, K) q, r = dup_div(dup_mul(s, e, K), h, K) q = dup_trunc(q, M, K) r = dup_trunc(r, M, K) u = dup_add(dup_mul(t, e, K), dup_mul(q, g, K), K) G = dup_trunc(dup_add(g, u, K), M, K) H = dup_trunc(dup_add(h, r, K), M, K) u = dup_add(dup_mul(s, G, K), dup_mul(t, H, K), K) b = dup_trunc(dup_sub(u, [K.one], K), M, K) c, d = dup_div(dup_mul(s, b, K), H, K) c = dup_trunc(c, M, K) d = dup_trunc(d, M, K) u = dup_add(dup_mul(t, b, K), dup_mul(c, G, K), K) S = dup_trunc(dup_sub(s, d, K), M, K) T = dup_trunc(dup_sub(t, u, K), M, K) return G, H, S, T
5,336,777
def generate_sobol_index_sample_sets(samplesA, samplesB, index): """ Given two sample sets A and B generate the sets :math:`A_B^{I}` from The rows of A_B^I are all from A except for the rows with non zero entries in the index I. When A and B are QMC samples it is best to change as few rows as possible See Variance based sensitivity analysis of model output. Design and estimator for the total sensitivity index """ nvars = samplesA.shape[0] I = np.arange(nvars) mask = np.asarray(index, dtype=bool) samples = np.vstack([samplesA[~mask], samplesB[mask]]) J = np.hstack([I[~mask], I[mask]]) samples = samples[np.argsort(J), :] return samples
5,336,778
def export_gmt_files(files): """To convert the output .csv files to gmt: 1. Manually remove the index column and column labels. 2. Save file as .txt 3. Then change the suffix as .gmt """ path = "tfac/data/gsea_libraries/" for f in files: translate_gene_sets(pd.read_csv(path + f + ".csv", header=None), path, f)
5,336,779
def setup_logfile_logger(log_path, log_level=None, log_format=None, date_format=None): """ Set up logging to a file. """ # Create the handler handler = WatchedFileHandler(log_path, mode='a', encoding='utf-8', delay=0) if log_level: # Grab and set the level level = LOG_LEVELS.get(log_level.lower(), logging.ERROR) handler.setLevel(level) # Set the default console formatter config if not log_format: log_format = '%(asctime)s [%(name)s][%(levelname)s] %(message)s' if not date_format: date_format = '%Y-%m-%d %H:%M:%S' formatter = logging.Formatter(log_format, datefmt=date_format) handler.setFormatter(formatter) root_logger.addHandler(handler) return handler
5,336,780
def pubsub_pub_command(): """发布频道""" global r r.publish("my-first-channel", "my-first-channel-data")
5,336,781
def test_suma(a, gen_b ): """Test suma""" #flexmock(gtw, suma=9 ) s = gtw.suma(a,gen_b) assert (s == a+gen_b)
5,336,782
def test_tc_train_effectiveness(): """assert that training decreases the loss""" happy_tc = HappyTextClassification( model_type="DISTILBERT", model_name="distilbert-base-uncased" ) before_loss = happy_tc.eval("../data/tc/train-eval.csv").loss happy_tc.train("../data/tc/train-eval.csv") after_loss = happy_tc.eval("../data/tc/train-eval.csv").loss assert after_loss < before_loss
5,336,783
def fix_encoding_and_explain(text): """ Deprecated copy of `ftfy.fix_encoding_and_explain()`. """ warnings.warn( "`fix_encoding_and_explain()` has moved to the main module of ftfy.", DeprecationWarning, ) return ftfy.fix_encoding_and_explain(text)
5,336,784
def test_cspad_xy_at_z() : """ Test cspad geometry table """ ## 'CxiDs1.0:Cspad.0)' or 'DscCsPad' basedir = '/reg/g/psdm/detector/alignment/cspad/calib-cxi-camera1-2014-09-24/' fname_geometry = basedir + '2016-06-03-geometry-cxi06216-r25-camera1-z175mm.txt' fname_data = basedir + '2016-06-03-chun-cxi06216-0025-DscCsPad-max.txt' geometry = GeometryAccess(fname_geometry, 0377) # get pixel coordinate index arrays: xyc = xc, yc = 1000, 1000 #iX, iY = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc) #iX, iY = geometry.get_pixel_coord_indexes(do_tilt=True) #iX, iY = geometry.get_pixel_xy_inds_at_z(zplane=None, xy0_off_pix=xyc) iX, iY = geometry.get_pixel_xy_inds_at_z(zplane=150000) root, ext = os.path.splitext(fname_data) arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float) #print 'arr.shape=', arr.shape arr.shape= (32,185,388) #ave, rms = arr.mean(), arr.std() #amp_range = (ave-rms, ave+3*rms) amp_range = (0, 1000) print 'amp_range', amp_range print 'iX, iY, W shape:', iX.shape, iY.shape, arr.shape img = img_from_pixel_arrays(iX,iY,W=arr) axim = gg.plotImageLarge(img,amp_range=amp_range) gg.move(500,10) gg.show()
5,336,785
def parse_16bit_color(color16): """解析16位的颜色 :param color16: 16位的颜色值 """ r = int(gamma5[int((color16 >> 11) & 0x1F)]) g = int(gamma6[int((color16 >> 5) & 0x3F)]) b = int(gamma5[int(color16 & 0x1F)]) return (r, g, b)
5,336,786
def remove_default_cube(): """ Remove the cube that is in the blender default scene to get an empty scene. """ bpy.data.objects["Cube"].select = True bpy.ops.object.delete()
5,336,787
def test_ramp_up_weights(): """Test TPE adjust observed points correctly""" weights = ramp_up_weights(25, 15, True) assert len(weights) == 25 assert numpy.all(weights == 1.0) weights = ramp_up_weights(25, 15, False) assert len(weights) == 25 assert numpy.all(weights[:10] == (numpy.linspace(1.0 / 25, 1.0, num=10))) assert numpy.all(weights[10:] == 1.0) weights = ramp_up_weights(10, 15, False) assert len(weights) == 10 assert numpy.all(weights == 1.0) weights = ramp_up_weights(25, 0, False) assert len(weights) == 25 assert numpy.all(weights == (numpy.linspace(1.0 / 25, 1.0, num=25)))
5,336,788
def dqc_0008(instance, error_log, suppress_errors, namespaces): """DQC_0008 Reversed Calculation""" dts = instance.dts ns = get_namespace(namespaces, 'us-gaap') us_gaap_calc = dqc_0008_calculations.get(ns) if us_gaap_calc: for linkrole in dts.calculation_link_roles(arcrole_summation_item): nw = dts.calculation_network(linkrole, arcrole_summation_item) for rel in nw.relationships: us_gaap_items = us_gaap_calc.get(rel.target_concept.name, []) if rel.source_concept.name in us_gaap_items: report_error(error_log, suppress_errors, 'DQC.US.0008.6819', extCalcTarget=rel.target_concept, extCalcSource=rel.source_concept)
5,336,789
def orders(): """ List all orders """ orders = Order.query.filter_by(user_id=current_user.id).all() return render_template('customer/orders.html', orders=orders, title="Orders")
5,336,790
def packpeeklist1(n1, n2, n3, n4, n5): """ Packs and returns 5 item list """ listp = [n1, n2, n3, n4, n5] return listp
5,336,791
def valid_commands(commands: List[str]) -> List[str]: """ Get list of valid commands from list of commands. :param (list) commands: User-supplied commands. :return: """ return [command for command in commands if command in available_commands()]
5,336,792
def duck_list(request): """ lists all ducks """ ducks = Duck.objects.all() return render(request, 'duck/list.html', {'duck_list': ducks})
5,336,793
def test_scheme_16(): """SCHEME 16: Rule 12: Remove Rings First Where the Linker Is Attached to a Ring Hetero-atom at Either End of the Linker Ring heteroatoms are more easy to functionalise and, therefore, are often functionalised in the later stage of a chemical library synthesis and thus less characteristic for a chemical scaffold """ # Deferasirox test_smiles = 'O=C1C=CC=C/C1=C1\\N/C(=C2/C=CC=CC2=O)N(c2ccc(C(=O)O)cc2)N1' correct_smiles = canon('O=C1C=CC=CC1=C1NNC(=C2C=CC=CC2=O)N1') frags = tree_frags_from_mol(Chem.MolFromSmiles(test_smiles)) frags = [Chem.MolToSmiles(x) for x in frags] assert correct_smiles in frags
5,336,794
def get_debian_version(file_path): """ Get the version of a debian file :param file_path: the path of the debian file :return: the version of the debian file """ cmd_args = ["dpkg-deb", "-f", file_path, "Version"] debian_version = run_command(cmd_args) return debian_version
5,336,795
def hash_type( draw, hash_type_strategy: Optional[SearchStrategy[HashType]] = None ) -> HashType: """Composite strategy for fetching a :class:`~modist.package.hasher.HashType`.""" return draw(HashType_strategy if not hash_type_strategy else hash_type_strategy)
5,336,796
def get_initializer(initializer_name): """Get the corresponding initializer function based on the initializer string. API of an initializer: init_fn, hparams = get_initializer(init) new_params, final_l = init_fn(loss, init_params, hps, num_outputs, input_shape) Args: initializer_name: (str) e.g. default. Returns: initializer Raises: ValueError if model is unrecognized. """ try: return _ALL_INITIALIZERS[initializer_name][0] except KeyError: raise ValueError('Unrecognized initializer: {}'.format(initializer_name))
5,336,797
def sh(cmd, grid=False, infile=None, outfile=None, errfile=None, background=False): """ simple wrapper for system calls """ if grid: return 0 # A fake retcode else: if infile: cmd += " < {0} ".format(infile) if outfile and outfile != "stdout": cmd += " > {0} ".format(outfile) if errfile: cmd += " 2> {0} ".format(errfile) if background: cmd += " & " logging.debug(cmd) return call(cmd, shell=True)
5,336,798
def clean_kubeflow_repo( shared_kubeflow_repo: Repository, clean_repo: Repository ) -> Generator[Repository, None, None]: """Creates a clean repo with a provisioned local kubeflow stack. Args: shared_kubeflow_repo: A repository with a provisioned local kubeflow stack clean_repo: An empty repository Yields: An empty repository with a provisioned local kubeflow stack. """ # Copy the stack configuration from the shared kubeflow repo. At this point # the stack resources are already provisioned by the module-scoped fixture. kubeflow_stack = shared_kubeflow_repo.active_stack clean_repo.register_stack(kubeflow_stack) clean_repo.activate_stack(kubeflow_stack.name) # Delete the artifact store of previous tests if os.path.exists(kubeflow_stack.artifact_store.path): shutil.rmtree(kubeflow_stack.artifact_store.path) yield clean_repo
5,336,799