content
stringlengths
22
815k
id
int64
0
4.91M
def deploy_droplet(token): """ deploy a new droplet. return the droplet infos so that it can be used to further provision. """ droplet_info = { 'name': 'marian', 'region': 'sfo2', 'size': '4gb', 'image': 'ubuntu-18-04-x64', 'ssh_keys[]': get_key_fingerprints(token), 'tags[]': ['marian'], } print('deploying new droplet...') url = 'https://api.digitalocean.com/v2/droplets' request = requests.post(url, headers=headers(token), params=droplet_info) # see https://github.com/requests/requests/blob/master/requests/status_codes.py # pylint: disable=E1101 if request.status_code != requests.codes.accepted: print('Something went wrong. ' + request.json()['message']) request.raise_for_status() droplet_infos = request.json()['droplet'] droplet_id = droplet_infos['id'] print(f'Deployed Marian 👸 (id: {droplet_id})!') return droplet_infos
5,344,200
def annotate_repos(repos, roster): """Annotate repo['login'] with the login of the student who generated the repo Find the longest common prefix of the repository names. """ common_prefix = longest_prefix([r["name"] for r in repos]) for r in repos: login = r["name"][len(common_prefix) :] r["login"] = login r["author"] = roster.get(login, login) # Annotate repo['commits'] with commits that Christian didn't author r["commits"] = [ c["node"] for c in r["ref"]["target"]["history"]["edges"] if c["node"]["author"]["email"] != "christian@nyu.edu" ]
5,344,201
def _convert_run_describer_v1_like_dict_to_v0_like_dict( new_desc_dict: Dict[str, Any]) -> Dict[str, Any]: """ This function takes the given dict which is expected to be representation of `RunDescriber` with `InterDependencies_` (underscore!) object and without "version" field, and converts it to a dict that is a representation of the `RunDescriber` object with `InterDependencies` (no underscore!) object and without "version" field. """ new_desc_dict = new_desc_dict.copy() # We intend to use conversion methods from `serialization` module, # but those work only with RunDescriber representations that have # "version" field. So first, the "version" field with correct value is # added. new_desc_dict['version'] = 1 # Out of that dict we create RunDescriber object of the current version # (regardless of what the current version is). new_desc = serial.from_dict_to_current(new_desc_dict) # The RunDescriber of the current version gets converted to a dictionary # that represents a RunDescriber object of version 0 - this is the one # that has InterDependencies object in it (not the InterDependencies_ one). old_desc_dict = serial.to_dict_as_version(new_desc, 0) # Lastly, the "version" field is removed. old_desc_dict.pop('version') return old_desc_dict
5,344,202
def currentGUIusers(): """Gets a list of GUI users by parsing the output of /usr/bin/who""" gui_users = [] proc = subprocess.Popen('/usr/bin/who', shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = proc.communicate()[0].decode("UTF-8") lines = output.splitlines() for line in lines: if 'console' in line: parts = line.split() gui_users.append(parts[0]) # 10.11 sometimes has a phantom '_mbsetupuser' user. Filter it out. users_to_ignore = ['_mbsetupuser'] gui_users = [user for user in gui_users if user not in users_to_ignore] return gui_users
5,344,203
def download_sequences(request): """Download the selected and/or user uploaded protein sequences.""" selected_values = request.session.get("list_names", []) list_nterminal = request.session.get("list_nterminal", []) list_middle = request.session.get("list_middle", []) list_cterminal = request.session.get("list_cterminal", []) values = [] if list_nterminal: values += list_nterminal if list_middle: values += list_middle if list_cterminal: values += list_cterminal if selected_values: values += selected_values values = list(set(values)) data = PesticidalProteinDatabase.objects.filter(name__in=values) userdata = UserUploadData.objects.filter( session_key=request.session.session_key) combined_selection = [] if list_nterminal: combined_selection += list_nterminal if list_middle: combined_selection += list_middle if list_cterminal: combined_selection += list_cterminal if selected_values: combined_selection += selected_values accession = {} data = PesticidalProteinDatabase.objects.filter( name__in=combined_selection) if data: for item in data: accession[item.accession] = item protein_detail = ProteinDetail.objects.filter( accession__in=list(accession.keys())) file = StringIO() # buffer = BytesIO() for item in data: output = "" item_name = item.name # print("item_name", item_name) if item.name in list_nterminal: nterminal = [ protein for protein in protein_detail if protein.accession == item.accession] item_name += "_d1" for item1 in nterminal: output += item1.get_endotoxin_n() if item.name in list_middle: middle = [ protein for protein in protein_detail if protein.accession == item.accession] item_name += "_d2" for item1 in middle: output += item1.get_endotoxin_m() if item.name in list_cterminal: cterminal = [ protein for protein in protein_detail if protein.accession == item.accession] # print(cterminal) item_name += "_d3" for item1 in cterminal: output += item1.get_endotoxin_c() # print("download output", output) if item.name in selected_values: fasta = textwrap.fill(item.sequence, 80) output += fasta # print(str_to_write) # file.write(str_to_write) if output: str_to_write = f">{item_name}\n{output}\n" file.write(str_to_write) for item in userdata: fasta = textwrap.fill(item.sequence, 80) if len(item.name) > 10: item.name = item.name[:10] str_to_write = f">{item.name}\n{fasta}\n" file.write(str_to_write) response = HttpResponse(file.getvalue(), content_type="text/plain") download_file = "cart_fasta_sequences.txt" response["Content-Disposition"] = "attachment;filename=" + download_file response["Content-Length"] = file.tell() return response
5,344,204
def resize_to_fill(image, size): """ Resize down and crop image to fill the given dimensions. Most suitable for thumbnails. (The final image will match the requested size, unless one or the other dimension is already smaller than the target size) """ resized_image = resize_to_min(image, size) return crop_to_centre(resized_image, size)
5,344,205
def display_warning(warning_msg): """ Displays warning message in Maya :param warning_msg: str, warning text to display """ warning_msg = warning_msg.replace('\n', '\ntp:\t\t') maya.OpenMaya.MGlobal.displayWarning('tp:\t\t' + warning_msg) LOGGER.warning('\n{}'.format(warning_msg))
5,344,206
def _iterate_examples( file_in, version, ): """Reads examples from TSV file.""" if version == Version.V_02: for line in file_in: fields = line.rstrip().split('\t') qid = fields[0] question = fields[1] wtq_table_id = fields[2] answers = fields[3:] yield qid, question, wtq_table_id, answers if version == Version.V_10: for line in csv.DictReader(file_in, delimiter='\t'): # Parse question and answers. qid = line['id'] question = line['utterance'] wtq_table_id = line['context'] answers = line['targetValue'].split('|') yield qid, question, wtq_table_id, answers
5,344,207
def extract_dual_coef(num_classes, sv_ind_by_clf, sv_coef_by_clf, labels): """ Construct dual coefficients array in SKLearn peculiar layout, as well corresponding support vector indexes """ sv_ind_by_class = group_indices_by_class( num_classes, sv_ind_by_clf, labels) sv_ind_mapping = map_sv_to_columns_in_dual_coef_matrix(sv_ind_by_class) num_unique_sv = len(sv_ind_mapping) dc_dt = sv_coef_by_clf[0].dtype dual_coef = np.zeros((num_classes - 1, num_unique_sv), dtype=dc_dt) support_ = np.empty((num_unique_sv,), dtype=np.int32) p = 0 for i in range(0, num_classes): for j in range(i + 1, num_classes): sv_ind_i_vs_j = sv_ind_by_clf[p] sv_coef_i_vs_j = sv_coef_by_clf[p] p += 1 for k, sv_index in enumerate(sv_ind_i_vs_j): label = labels[sv_index] col_index = sv_ind_mapping[sv_index] if j == label: row_index = i else: row_index = j - 1 dual_coef[row_index, col_index] = sv_coef_i_vs_j[k] support_[col_index] = sv_index return dual_coef, support_
5,344,208
def test_setattr_context(): """Test of the setattr_context() function""" class Foo(object): pass f = Foo() f.attr = "abc" with utils.setattr_context(f, attr="123"): assert_equal(f.attr, "123") assert_equal(f.attr, "abc") try: with utils.setattr_context(f, attr="123"): raise ValueError() except: pass assert_equal(f.attr, "abc")
5,344,209
def extract(func): """ Decorator function. Open and extract data from CSV files. Return list of dictionaries. :param func: Wrapped function with *args and **kwargs arguments. """ def _wrapper(*args): out = [] instance, prefix = args for fname in glob.glob(os.path.join(getattr(instance, 'directory'), *prefix)): with open(fname) as g: out.extend(func(instance, data=csv.DictReader(g))) return out return _wrapper
5,344,210
def namespaces_of(name): """ utility to determine namespaces of a name @raises ValueError @raises TypeError """ if name is None: raise ValueError('name') try: if not isinstance(name, basestring): raise TypeError('name') except NameError: if not isinstance(name, str): raise TypeError('name') if not name: return ['/'] splits = [x for x in name.split('/') if x] return ['/'] + ['/'+'/'.join(splits[:i]) for i in range(1, len(splits))]
5,344,211
def create_c3d_sentiment_model(): """ C3D sentiment Keras model definition :return: """ model = Sequential() input_shape = (16, 112, 112, 3) model.add(Conv3D(64, (3, 3, 3), activation='relu', padding='same', name='conv1', input_shape=input_shape)) model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', name='pool1')) # 2nd layer group model.add(Conv3D(128, (3, 3, 3), activation='relu', padding='same', name='conv2')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool2')) # 3rd layer group model.add(Conv3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3a')) model.add(Conv3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3b')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool3')) # 4th layer group model.add(Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4a')) model.add(Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4b')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool4')) # 5th layer group model.add(Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv5a')) model.add(Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv5b')) model.add(ZeroPadding3D(padding=((0, 0), (0, 1), (0, 1)), name='zeropad5')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool5')) model.add(Flatten()) # FC layers group model.add(Dense(4096, activation='relu', name='fc6')) model.add(Dropout(.5)) model.add(Dense(4096, activation='relu', name='fc7')) model.add(Dropout(.5)) model.add(Dense(2, activation='softmax', name='nfc8')) return model
5,344,212
def list_all_projects(path): """lists all projects from a folder""" project_names = [] for file in os.listdir(path): if not os.path.isfile(os.path.join(path, file)): continue project_names.append(file.split('.')[0]) return project_names
5,344,213
def notch_filter(data: FLOATS_TYPE, sampling_freq_hz: float, notch_freq_hz: float, quality_factor: float) -> FLOATS_TYPE: """ Design and use a notch (band reject) filter to filter the data. Args: data: time series of the data sampling_freq_hz: sampling frequency :math:`f_s`, in Hz (or other consistent units) notch_freq_hz: notch frequency, in Hz (or other consistent units) quality_factor: notch filter quality factor, :math:`Q` Returns: filtered data """ b, a = iirnotch( w0=normalized_frequency(notch_freq_hz, sampling_freq_hz), Q=quality_factor ) filtered_data = lfilter(b=b, a=a, x=data) return filtered_data
5,344,214
def get_trail_max(self, rz_array=None): """ Return the position of the blob maximum. Either in pixel or in (R,Z) coordinates if rz_array is passed. """ if (rz_array is None): return self.xymax # Remember xycom[:,1] is the radial (X) index which corresponds to R return rz_array[self.xymax[:,0].astype('int'), self.xymax[:,1].astype('int'), :]
5,344,215
def create_action_type(request): """ Create a new action type """ # check name uniqueness if ActionType.objects.filter(name=request.data['name']).exists(): raise SuspiciousOperation(_('An action with a similar name already exists')) description = request.data.get("description") label = request.data.get("label") format_data = request.data.get("format", {'type': 'undefined'}) lang = translation.get_language() # create the action type action_type = ActionType() action_type.name = request.data['name'] action_type.set_label(lang, label) action_type.description = description action_type.format = format_data action_controller = ActionController(action_type, request.user) if format_data['type'] != 'undefined': # format validation ActionStepFormatManager.check(action_controller, format_data) action_type.save() result = { 'id': action_type.id, 'name': action_type.name, 'label': action_type.get_label(), 'format': action_type.format, 'description': action_type.description } return HttpResponseRest(request, result)
5,344,216
def ec_chi_sq(params,w,y,weights,model,normalize='deg'): """ Chi squared for equivalent circuit model. Parameters: ----------- params: dict of model parameters w: frequencies y: measured impedance data: nx2 matrix of Zreal, Zimag weights: weights for squared residuals (n-vector) model: equivalent circuit model normalize: normalization method. Options: 'deg': normalize by degrees of freedom, i.e. len(y) - len(params) 'n': normalize by number of observations, i.e. len(y) False: don't normalize """ Zfit = model(w,**params) y_fit = np.array([Zfit.real,Zfit.imag]).T x2 = chi_sq(y,y_fit,weights) #+ np.sum((x < 0).astype(int)*1000) if normalize=='deg': x2 /= (len(y) - len(params)) elif normalize=='n': x2 /= len(y) elif normalize is not False: raise ValueError(f'Invalid normalize option {normalize}. Options are ''deg'', ''n'', False') return x2
5,344,217
def delete_demo(collection_id): """Deletes a single demo collection. Args: collection_id: str. ID of the demo collection to be deleted. """ if not collection_domain.Collection.is_demo_collection_id(collection_id): raise Exception('Invalid demo collection id %s' % collection_id) collection = get_collection_by_id(collection_id, strict=False) if not collection: logging.info('Collection with id %s was not deleted, because it ' 'does not exist.' % collection_id) else: delete_collection( feconf.SYSTEM_COMMITTER_ID, collection_id, force_deletion=True)
5,344,218
def plot_regressors_scores(r_list, r_str, x_test, y_true, fig_dir, txt): """Given a list of fitted regressor objects, compare their skill on a variety of tests""" mse = [] r2_u = [] r2_w = [] exp_var_u = [] exp_var_w = [] for reg in r_list: y_pred = reg.predict(x_test) mse.append(metrics.mean_squared_error(y_true, y_pred, multioutput='uniform_average')) r2_u.append(metrics.r2_score(y_true, y_pred, multioutput='uniform_average')) r2_w.append(metrics.r2_score(y_true, y_pred, multioutput='variance_weighted')) expl = metrics.explained_variance_score exp_var_u.append(expl(y_true, y_pred, multioutput='uniform_average')) exp_var_w.append(expl(y_true, y_pred, multioutput='variance_weighted')) fig = plt.figure() plt.subplot(1, 2, 1) tick = range(len(mse)) # Plot mean squared error plt.yticks(tick, r_str) plt.semilogx(mse, tick, marker='o',) plt.title('Mean Squared Error') # Plot R2 plt.subplot(1, 2, 2) plt.plot(r2_u, tick, marker='o', label='uniform') plt.plot(r2_w, tick, marker='o', label='weighted') plt.setp(plt.gca().get_yticklabels(), visible=False) plt.legend(loc="upper left") plt.title('R^2 score') plt.xlim((-1, 1)) fig.savefig(fig_dir + txt + '_scores.png', bbox_inches='tight', dpi=450) plt.close()
5,344,219
def declare_eq_p_balance_dc_approx(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts, inlet_branches_by_bus, outlet_branches_by_bus, approximation_type=ApproximationType.BTHETA, **rhs_kwargs): """ Create the equality constraints for the real power balance at a bus using the variables for real power flows, respectively. NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign """ m = model con_set = decl.declare_set('_con_eq_p_balance', model, index_set) m.eq_p_balance = pe.Constraint(con_set) for bus_name in con_set: if approximation_type == ApproximationType.BTHETA: p_expr = -sum([m.pf[branch_name] for branch_name in outlet_branches_by_bus[bus_name]]) p_expr += sum([m.pf[branch_name] for branch_name in inlet_branches_by_bus[bus_name]]) elif approximation_type == ApproximationType.BTHETA_LOSSES: p_expr = -0.5*sum([m.pfl[branch_name] for branch_name in inlet_branches_by_bus[bus_name]]) p_expr -= 0.5*sum([m.pfl[branch_name] for branch_name in outlet_branches_by_bus[bus_name]]) p_expr -= sum([m.pf[branch_name] for branch_name in outlet_branches_by_bus[bus_name]]) p_expr += sum([m.pf[branch_name] for branch_name in inlet_branches_by_bus[bus_name]]) if bus_gs_fixed_shunts[bus_name] != 0.0: p_expr -= bus_gs_fixed_shunts[bus_name] if bus_p_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error p_expr -= m.pl[bus_name] if rhs_kwargs: k = bus_name for idx, val in rhs_kwargs.items(): if isinstance(val, tuple): val,key = val k = (key,bus_name) if not k in eval("m." + val).index_set(): continue if idx == 'include_feasibility_slack_pos': p_expr -= eval("m." + val)[k] if idx == 'include_feasibility_slack_neg': p_expr += eval("m." + val)[k] for gen_name in gens_by_bus[bus_name]: p_expr += m.pg[gen_name] m.eq_p_balance[bus_name] = \ p_expr == 0.0
5,344,220
def synthesize_genre_favs(xn_train_df): """ Making synthetic user-genre favorite interactions We're going to just count the genres watched by each user. Subsample from a random top percentile of genres and consider those the user's favorites. We will then subsample again -- simulating the voluntary aspect of favoriting a genre. """ def sample_fav(df, q_thresh=None, frac=None): q_thresh = q_thresh or np.random.rand() frac = frac or np.random.rand() return df.reset_index().genre_id \ .loc[(df.item_id >= df.item_id.quantile(q_thresh)).values] \ .sample(frac=frac, replace=False) n_users = xn_train_df['user_id'].nunique() genre_counts = xn_train_df.groupby(('user_id', 'genre_id')).count() xn_genre_favs = genre_counts.groupby(level=0) \ .apply(sample_fav) \ .reset_index().drop('level_1', axis=1) # say 0.7 users know of the genre favoriting feature aware_users = set(np.random.permutation(n_users)[:int(0.7 * n_users)]) xn_genre_favs_samp = xn_genre_favs.loc[ xn_genre_favs.user_id.isin(aware_users)] return xn_genre_favs_samp
5,344,221
def mfa_to_challenge(mfa): """ Convert MFA from bastion to internal Challenge param mfa: MFA from bastion :rtype: Challenge :return: a converted Challenge """ if not mfa.fields: return None message_list = [] echos = [False for x in mfa.fields] fields = mfa.fields if hasattr(mfa, "auth_type"): message_list.append("Authentication type: %s" % mfa.auth_type) if mfa.fields[0] == "username": fields = fields[1:] echos = echos[1:] message_list.append("Username: %s" % mfa.username) message = "\n".join(message_list) recall = (len(fields) == 0) return Challenge( challenge_type="MFA", title="= MultiFactor Authentication =", message=message, fields=fields, echos=echos, username=mfa.username, token=mfa.token, recall=recall )
5,344,222
def override_stdouts(): """Override `sys.stdout` and `sys.stderr` with `StringIO`.""" prev_out, prev_err = sys.stdout, sys.stderr mystdout, mystderr = StringIO(), StringIO() sys.stdout = sys.__stdout__ = mystdout sys.stderr = sys.__stderr__ = mystderr yield mystdout, mystderr sys.stdout = sys.__stdout__ = prev_out sys.stderr = sys.__stderr__ = prev_err
5,344,223
def put_vns3_controller_api_password( api_client, vns3_controller_id, api_password=None, **kwargs ): # noqa: E501 """Update VNS3 Controller API password # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> response = await api.put_vns3_controller_api_password(id, async_req=True) :param VNS3Client api_client: (required) :param vns3_controller_id int: Controller ID (required) :param api_password str: New api password (required) :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: APIResponse or awaitable if async """ local_var_params = locals() request_params = ["api_password"] collection_formats = {} path_params = {"vns3_controller_id": vns3_controller_id} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = {} for param in [p for p in request_params if local_var_params.get(p) is not None]: body_params[param] = local_var_params[param] # HTTP header `Accept` header_params["Accept"] = api_client.select_header_accept( ["application/json"] ) # noqa: E501 # HTTP header `Content-Type` header_params["Content-Type"] = api_client.select_header_content_type( # noqa: E501 ["application/json"] ) # noqa: E501 # Authentication setting auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501 return api_client.call_api( "/vns3_controllers/{vns3_controller_id}/update_api_password", "PUT", path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type="object", # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get("async_req"), _return_http_data_only=local_var_params.get( "_return_http_data_only" ), # noqa: E501 _preload_content=local_var_params.get("_preload_content", True), _request_timeout=local_var_params.get("_request_timeout"), collection_formats=collection_formats, )
5,344,224
def create_pod(interface_type=None, pvc_name=None, desired_status=constants.STATUS_RUNNING, wait=True): """ Create a pod Args: interface_type (str): The interface type (CephFS, RBD, etc.) pvc (str): The PVC that should be attached to the newly created pod desired_status (str): The status of the pod to wait for wait (bool): True for waiting for the pod to reach the desired status, False otherwise Returns: Pod: A Pod instance Raises: AssertionError: In case of any failure """ if interface_type == constants.CEPHBLOCKPOOL: pod_dict = constants.CSI_RBD_POD_YAML interface = constants.RBD_INTERFACE else: pod_dict = constants.CSI_CEPHFS_POD_YAML interface = constants.CEPHFS_INTERFACE pod_data = templating.load_yaml_to_dict(pod_dict) pod_data['metadata']['name'] = create_unique_resource_name( f'test-{interface}', 'pod' ) pod_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE if pvc_name: pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_name pod_obj = pod.Pod(**pod_data) pod_name = pod_data.get('metadata').get('name') created_resource = pod_obj.create(do_reload=wait) assert created_resource, ( f"Failed to create resource {pod_name}" ) if wait: assert wait_for_resource_state(pod_obj, desired_status) return pod_obj
5,344,225
def main(): """TODO: Docstring for main. :returns: TODO """ s_task = select([t_task.c.id, t_task.c.dname, t_task.c.addresses]).where(t_task.c.status == 'new').limit(1000) s_result = select([t_result.c.id, t_result.c.dname]) u_task = t_task.update().where(t_task.c.id == bindparam('_id')).values({'status': 'done', 'url_result_id': bindparam("url_result_id")}) engine = create_engine(MYSQL_URI, pool_recycle=1000, encoding='utf-8') q_in = Queue() q_out = Queue() exc = get_exchange('name') ps = [] # for func in (location_resolve_bulk, icp_resolve_bulk, whois_resolve_bulk): logger.info("开启子进程用于各api查询") for func in (location_resolve_bulk, whois_resolve_bulk): a = ActorWrapper(partial(func, n=100), q_in) a.daemon = True a.start() ps.append(a) b = Process(target=combine_result, args=(q_in, q_out, )) b.daemon = True b.start() signal.signal(signal.SIGTERM, term) Session = sessionmaker(bind=engine) session = Session() with exc.subscribe(*ps): while True: # data = [(1, 'www.baidu.com', '["136.243.10"]'), (2, 'www.qq.com', '["122.226.223.35", "111.241.90.245"]'), (3, 'www.2134wfewqrwqre.com', '["122.226.223.38"]')] try: data = session.execute(s_task).fetchall() n_data = len(data) logger.info(f"从url_task表获取了{n_data}条记录") if not n_data: session.commit() logger.info(f"休眠4分钟") time.sleep(4 * 60) continue dnames_ids = {x[1]: x[0] for x in data} dnames_ips = {x[1]: x[2] for x in data} dnames = dnames_ids.keys() # 过滤出已在t_result表存在的记录,这些不用再查了,直接将url_result_id关联过来 r_select = session.execute(s_result.where(t_result.c.dname.in_(dnames))) exist_records = {t[1]:t[0] for t in r_select} if exist_records: update_data_pre = [{'_id': dnames_ids[k], 'url_result_id': v} for k,v in exist_records.items()] r_update_pre = session.execute(u_task, update_data_pre) logger.info(f"发现已查询过的记录,在url_task表更新了{r_update_pre.rowcount}条记录") not_exist_dnames = list(set(dnames) - set(exist_records.keys())) if not_exist_dnames: # 过滤出查询结果是安全的结果,将这些dname的记录从t_task表删除 logger.info(f"开始进行腾讯安全接口查询") r_tencent = tencent_resolve_bulk(not_exist_dnames, 100)['tencent'] logger.info(f"腾讯安全接口查询完毕") safe = {key:value for key,value in r_tencent.items() if value['category'] == '安全'} if safe: ids_safe = [dnames_ids[k] for k in safe] r_delete = session.execute(t_task.delete().where(t_task.c.id.in_(ids_safe))) logger.info(f"从url_task表删除了{r_delete.rowcount}条已确认为安全的记录") # 过滤后剩下的需要查询的dname,注意这些记录的tencent_info已经查过了 filtered_dnames = list(set(not_exist_dnames) - set(safe.keys())) if filtered_dnames: filtered_dnames_ips = {k:v for k,v in dnames_ips.items() if k in filtered_dnames} # 将腾讯接口查询的结果放到q_in队列中做合并 tencent = {'tencent': {key:r_tencent[key] for key in filtered_dnames}} dns = {'dns': {key:dnames_ips[key] for key in filtered_dnames}} icp = {'icp': {key:{} for key in filtered_dnames}} # icp = icp_resolve_bulk(filtered_dnames_ips) # whois = whois_resolve_bulk(filtered_dnames_ips) # location = location_resolve_bulk(filtered_dnames_ips) q_in.put(tencent) q_in.put(dns) q_in.put(icp) # q_in.put(whois) # q_in.put(location) # 给交换机下发任务 exc.send(filtered_dnames_ips) # 获取各接口查询完之后合并的结果 resolved_data = q_out.get() bulk_result = [UrlResult(**x) for x in resolved_data] session.bulk_save_objects(bulk_result, return_defaults=True) extracted_result = {result.dname:result.id for result in bulk_result} update_data_suf = [{'_id': dnames_ids[k], 'url_result_id': v} for k,v in extracted_result.items()] r_update_suf = session.execute(u_task, update_data_suf) logger.info(f"在url_result表插入了{r_update_suf.rowcount}条记录,在url_task表更新了{r_update_suf.rowcount}条记录") session.commit() except Exception: session.rollback() logger.error("数据库操作过程中遇错,退出", exc_info=True) break # 并查看是否哪个查询子程序挂了,挂了的话重启该子进程 for i, p in enumerate(ps): if not p.is_alive(): logger.error('{} occured error, trying to reboot it'.format(p.name)) ps[i] = ActorWrapper(p.func, q_in) ps[i].start() if not b.is_alive(): b = Process(target=combine_result, args=(q_in, q_out, )) b.daemon = True b.start() for p in ps: p.join() b.join()
5,344,226
def before_feature(context, feature): """ feature hook before running """ GlobalContext.process("before_feature_processor", context, feature)
5,344,227
def _hash(item: str, maxsize=sys.maxsize) -> int: """An unsalted hash function with a range between 0 and maxsize :param item: string or string like object that is accepted by builtin function `str` :type item: str param maxsize: maximum value of returned integer :type maxsize: int :return: hash between 0 and maxsize :rtype: int """ if isinstance(item, int): return item % maxsize else: return int( hashlib.sha256(str(item).encode('utf-8')).hexdigest(), 16 ) % maxsize
5,344,228
def _wait_for_query_to_complete(search_id, qradar_client, timeout, polling_interval): """ Poll QRadar until search execution finishes """ start_time = time.time() search_status = qradar_client.get_search_status(search_id) if not search_status: # Sometimes it takes a little while to be able to query a search id time.sleep(4) search_status = qradar_client.get_search_status(search_id) while search_status.get("status", "") in ("WAIT", "EXECUTE", "SORTING"): if timeout != 0: if time.time() - start_time > timeout: raise SearchTimeout(search_id, search_status.get("status", "")) time.sleep(polling_interval) search_status = qradar_client.get_search_status(search_id) if search_status.get("status", "") != "COMPLETED": LOG.error(search_status) raise SearchFailure(search_id, search_status.get("status", ""))
5,344,229
def create_annotation(annotation_id: int, image_id: int, category_id: int, is_crowd: int, area: int, bounding_box: Tuple[int, int, int, int], segmentation: List[Tuple[int, int]]) -> dict: """ Converts input data to COCO annotation information storing format. :param int annotation_id: unique identificator of the annotation :param int image_id: identificator of related image :param int category_id: identificator of related category (annotation class) :param int is_crowd: "iscrowd": 0 if your segmentation based on polygon (object instance) "iscrowd": 1 if your segmentation based uncompressed RLE (crowd) :param float area: area occupied by segmentation in pixels :param Tuple[float, float, float, float] bounding_box: coordinates of bbox in format (x,y,w,h) :param list segmentation: polygon coordinates :return: dict of the annotation information in COCO format """ return { "id": annotation_id, "image_id": image_id, "category_id": category_id, "iscrowd": is_crowd, "area": area, # float "bbox": bounding_box, # [x,y,width,height] "segmentation": segmentation # [polygon] }
5,344,230
def generate_heatmap(correlation_frame, p_values, x_axes, y_axes, filename='heatmap'): """Creates an heat map @:param correlation_frame: pandas frame that contains the values to plot @:param p_values: the pandas frame that contains the p-values @:param x_axes: the axes to render to the x axis @:param y_axes: the axes to render to the y axis @:param filename: the file for the image to plot out @:return it saves the image to the specified path with the name as argument """ x_axes = [x[:10] for x in x_axes] y_axes = [x[:10] for x in y_axes] matrix = correlation_frame.as_matrix() matrix = np.around(matrix, decimals=2) ps = p_values.as_matrix() ps = np.around(ps, decimals=2) np.set_printoptions(formatter={'float': '{: 0.2f}'.format}) annotations = [] for n, row in enumerate(matrix): for m, val in enumerate(row): p = ps[n][m] if p >= 0.10 or abs(val) < 0.39: aux = '~' else: if val > 0: aux = str(val)[:4] else: aux = str(val)[:5] if p >= 0.05: aux = aux+'*' if math.isnan(val): aux = '/' annotations.append( dict( text=str(aux), x=x_axes[m], y=y_axes[n], font=dict(color='gray' if aux == '/' else 'white', size=50), showarrow=False) ) data = [ go.Heatmap( z=matrix, x=x_axes, y=y_axes, zmin=-1, zmax=1, showscale=False )] layout = go.Layout( annotations=annotations, xaxis=dict( tickangle=45, tickfont=dict( size=50 ), ), yaxis=dict( tickangle=0, tickfont=dict( size=50 ), ), autosize=False, width=3000, height=2000, margin=go.Margin( l=250, r=50, b=260, t=10, pad=4 ) ) fig = go.Figure(data=data, layout=layout) py.image.save_as(fig, filename=images + filename+'.pdf')
5,344,231
def check_sequence_is_valid(seq, alignment=False): """ Parameters -------------- seq : str Amino acid sequence alignment : bool Flag that defines if this alignment sequence rules should be applied or not. Returns ------------ Tuple Returns a tuple of size 2 where element 0 is a boolean (True or False) that flags if the sequence was valid (True) or not (False). element 1 is a value that will return as the invalid amino acid (if sequence is invalid) OR if it's a valid sequence will be 0 """ if alignment == True: valid_AA_list = STANDARD_AAS_WITH_GAP else: valid_AA_list = STANDARD_AAS s = list(set(seq)) for i in s: if i not in valid_AA_list: return (False, i) return (True, 0)
5,344,232
def get_device(): """Pick GPU if available, else CPU""" if torch.cuda.is_available(): return torch.device('cuda') else: return torch.device('cpu')
5,344,233
def extension_chisquare(x, y=None, lower=True): """Calculates a one-way chi square test for file extensions. :param x: Paths to compare with y. :type x: list, tuple, array of WindowsFilePath or PosixFilePath objects :param y: Paths to compare with x. :type y: list, tuple, array of WindowsFilePath or PosixFilePath objects :param lower: Convert the extensions to lower before counting. :type lower: boolean :return: The test result. :rtype: scipy.stats.Power_divergenceResult """ try: from scipy.stats import chisquare from sklearn.feature_extraction import DictVectorizer except ModuleNotFoundError: raise MissingDependencyError( "Install the module 'scipy' and 'sklearn' to compute chisquares.") counts_x = extension_counts(x, lower=lower) counts_y = extension_counts(y, lower=lower) dv = DictVectorizer(sparse=False) arr = dv.fit_transform([counts_x, counts_y]) return chisquare(arr[0], arr[1])
5,344,234
def cprint( fmt, fg=None, bg=None, style=None ): """ Colour-printer. cprint( 'Hello!' ) # normal cprint( 'Hello!', fg='g' ) # green cprint( 'Hello!', fg='r', bg='w', style='bx' ) # bold red blinking on white List of colours (for fg and bg): k black r red g green y yellow b blue m magenta c cyan w white List of styles: b bold i italic u underline s strikethrough x blinking r reverse y fast blinking f faint h hide """ COLCODE = { 'k': 0, # black 'r': 1, # red 'g': 2, # green 'y': 3, # yellow 'b': 4, # blue 'm': 5, # magenta 'c': 6, # cyan 'w': 7 # white } FMTCODE = { 'b': 1, # bold 'f': 2, # faint 'i': 3, # italic 'u': 4, # underline 'x': 5, # blinking 'y': 6, # fast blinking 'r': 7, # reverse 'h': 8, # hide 's': 9, # strikethrough } # properties props = [] if isinstance(style,str): props = [ FMTCODE[s] for s in style ] if isinstance(fg,str): props.append( 30 + COLCODE[fg] ) if isinstance(bg,str): props.append( 40 + COLCODE[bg] ) # display props = ';'.join([ str(x) for x in props ]) if props: print( '\x1b[%sm%s\x1b[0m' % (props, fmt) ) else: print( fmt )
5,344,235
def kuster_toksoz_moduli( k1, mu1, k2, mu2, frac2, inclusion_shape="spheres", alpha=None ): """Kuster-Toksoz Moduli for an inclusion to a material. Best used for low-porosity materials. To add multiple inclusions to a model use this function recursively substituting the output for k1 and mu1 after the first pass. Inclusions are added randomly (iso-tropic). Assumes the material is learn and elastic, is limited to dilute concentrations of inclusions and idealised ellipsoidal inclusion shapes. Args: k1 (array-like): Material bulk moduli mu1 (array-like): Material shear moduli k2 (array-like): Inclusion bulk moduli mu2 (array-like): Inclusion shear moduli frac2 (array-like): The volume fraction of the inclusion to be added. inclusion_shape (str, Optional): The shape of the inclusion. Defaults to 'spheres'. One of ['spheres', 'needles', 'disks', 'cracks']. alpha (float, Optional): Required if inclusion_shape='cracks'. The aspect ratio of the cracks. """ if inclusion_shape == "spheres": pmi, qmi = _kuster_toksoz_spheres(k1, mu1, k2, mu2) elif inclusion_shape == "needles": pmi, qmi = _kuster_toksoz_needles(k1, mu1, k2, mu2) elif inclusion_shape == "disks": pmi, qmi = _kuster_toksoz_disks(k1, mu1, k2, mu2) elif inclusion_shape == "cracks" and isinstance(alpha, float): pmi, qmi = _kuster_toksoz_cracks(k1, mu1, k2, mu2, alpha) else: raise ValueError( "Unknown inclusions_shape or alpha must be specified as float for cracks." ) eta1 = _kuster_toksoz_eta(k1, mu1) k_a = frac2 * (k2 - k1) * pmi mu_a = frac2 * (mu2 - mu1) * qmi return ( (4 / 3 * mu1 * (k_a + k1) + power(k1, 2)) / (k1 + 4 * mu1 / 3 - k_a), (eta1 * (mu_a + mu1) + power(mu1, 2)) / (mu1 + eta1 - mu_a), )
5,344,236
def fetchResearchRadius(chatId: str, reachableByFoot: bool) -> tuple: """Given a chat id and a distance type, returns the user distance preference. Args: chatId (str) - the chat_id of which the language is required reachableByFoot (bool) - true if the preferred_distance_on_foot param has to be fetched, otherwise false if the user wants to fetch preferred_distance_by_car Returns: int - the user preference in terms of distance from the restaurant """ connection = dbConnect() if reachableByFoot: result = ( connection.cursor() .execute( """SELECT preferred_distance_on_foot FROM chat WHERE chat_id = ?""", (chatId,), ) .fetchone() ) else: result = ( connection.cursor() .execute( """SELECT preferred_distance_by_car FROM chat WHERE chat_id = ?""", (chatId,), ) .fetchone() ) connection.close() return result
5,344,237
def build_train_valid_test_datasets(tokenizer, data_class, data_prefix, data_impl, splits_string, train_valid_test_num_samples, enc_seq_length, dec_seq_length, seed, skip_warmup, prompt_config): """Build train, valid, and test datasets.""" context_data_prefix = data_prefix + "_context" target_data_prefix = data_prefix + "_target" # Indexed dataset. context_indexed_dataset = get_indexed_dataset_(context_data_prefix, data_impl, skip_warmup) target_indexed_dataset = get_indexed_dataset_(target_data_prefix, data_impl, skip_warmup) total_num_of_documents = context_indexed_dataset.sizes.shape[0] splits = get_train_valid_test_split_(splits_string, total_num_of_documents) # Print stats about the splits. print_rank_0(' > dataset split:') def print_split_stats(name, index): print_rank_0(' {}:'.format(name)) print_rank_0(' document indices in [{}, {}) total of {} ' 'documents'.format(splits[index], splits[index + 1], splits[index + 1] - splits[index])) print_split_stats('train', 0) print_split_stats('validation', 1) print_split_stats('test', 2) def build_dataset(index, name): dataset = None if splits[index + 1] > splits[index]: document_ids_in_splits = np.arange(start=splits[index], stop=splits[index + 1], step=1, dtype=np.int32) dataset = data_class(tokenizer, name, data_prefix, document_ids_in_splits, context_indexed_dataset, target_indexed_dataset, train_valid_test_num_samples[index], enc_seq_length, dec_seq_length, prompt_config, seed) return dataset train_dataset = build_dataset(0, 'train') valid_dataset = build_dataset(1, 'valid') test_dataset = build_dataset(2, 'test') return (train_dataset, valid_dataset, test_dataset)
5,344,238
async def uvm_do_on_with(seq_obj, SEQ_OR_ITEM, SEQR, *CONSTRAINTS): """ This is the same as `uvm_do_with` except that it also sets the parent sequence to the sequence in which the macro is invoked, and it sets the sequencer to the specified ~SEQR~ argument. The user must supply the constraints using lambdas. An example call:: await uvm_do_on_with(self, sys0_seq, None, lambda num_blk_seq: num_blk_seq == 10, lambda blk_level_delay_ns: blk_level_delay_ns in [10, 20], ) Note that variables used in lambdas must exist, or an exception is thrown due to randomization error. """ await uvm_do_on_pri_with(seq_obj, SEQ_OR_ITEM, SEQR, -1, *CONSTRAINTS)
5,344,239
def test_images_default(fake_bar): """Test BatteryIcon() with the default theme_path Ensure that the default images are successfully loaded. """ batt = BatteryIcon() batt.fontsize = 12 batt.bar = fake_bar batt.setup_images() assert len(batt.surfaces) == len(BatteryIcon.icon_names) for name, surfpat in batt.surfaces.items(): assert isinstance(surfpat, cairocffi.SurfacePattern)
5,344,240
def _scal_sub_fp(x, scal): """Subtract a scalar scal from a vector or matrix x.""" if _type_of(x) == 'vec': return [a - scal for a in x] else: return [[a - scal for a in x_row] for x_row in x]
5,344,241
def write_source_file_test(name, in_file, out_file): """Stamp a write_source_files executable and a test to run against it""" _write_source_file( name = name + "_updater", in_file = in_file, out_file = out_file, diff_test = False, ) # Note that for testing we update the source files in the sandbox, # not the actual source tree. _write_source_file_test( name = name, write_source_file_target = name + "_updater", in_file = in_file, out_file = out_file, )
5,344,242
def asset_name(aoi_model, model, fnf=False): """return the standard name of your asset/file""" prefix = "kc_fnf" if fnf else "alos_mosaic" filename = f"{prefix}_{aoi_model.name}_{model.year}" if model.filter != "NONE": filename += f"_{model.filter.lower()}" if model.rfdi: filename += "_rfdi" if model.ls_mask: filename += "_masked" if model.dB: filename += "_dB" if model.texture: filename += "_texture" if model.aux: filename += "_aux" return filename
5,344,243
def create_joint_fusion_workflow( WFname, onlyT1, master_config, runFixFusionLabelMap=True ): """ This function... :param WFname: :param onlyT1: :param master_config: :param runFixFusionLabelMap: :return: """ from nipype.interfaces import ants if onlyT1: n_modality = 1 else: n_modality = 2 CLUSTER_QUEUE = master_config["queue"] CLUSTER_QUEUE_LONG = master_config["long_q"] JointFusionWF = pe.Workflow(name=WFname) inputsSpec = pe.Node( interface=IdentityInterface( fields=[ "subj_t1_image", # Desired image to create label map for "subj_t2_image", # Desired image to create label map for "subj_lmks", # The landmarks corresponding to t1_image "subj_fixed_head_labels", # The fixed head labels from BABC "subj_posteriors", # The BABC posteriors "subj_left_hemisphere", # The warped left hemisphere mask "atlasWeightFilename", # The static weights file name "labelBaseFilename" # Atlas label base name ex) neuro_lbls.nii.gz ] ), run_without_submitting=True, name="inputspec", ) outputsSpec = pe.Node( interface=IdentityInterface( fields=[ "JointFusion_HDAtlas20_2015_label", "JointFusion_HDAtlas20_2015_CSFVBInjected_label", "JointFusion_HDAtlas20_2015_fs_standard_label", "JointFusion_HDAtlas20_2015_lobe_label", "JointFusion_extended_snapshot", "JointFusion_HDAtlas20_2015_dustCleaned_label", "JointFusion_volumes_csv", "JointFusion_volumes_json", "JointFusion_lobe_volumes_csv", "JointFusion_lobe_volumes_json", ] ), run_without_submitting=True, name="outputspec", ) from collections import ( OrderedDict, ) # Need OrderedDict internally to ensure consistent ordering BLICreator = OrderedDict() A2SantsRegistrationPreJointFusion_SyN = OrderedDict() movingROIAuto = OrderedDict() labelMapResample = OrderedDict() NewlabelMapResample = OrderedDict() jointFusion_atlas_mergeindex = 0 merge_input_offset = 1 # Merge nodes are indexed from 1, not zero! """ multimodal ants registration if t2 exists """ sessionMakeMultimodalInput = pe.Node( Function( function=make_vector, input_names=["inFN1", "inFN2", "jointFusion"], output_names=["outFNs"], ), run_without_submitting=True, name="sessionMakeMultimodalInput", ) sessionMakeMultimodalInput.inputs.jointFusion = False JointFusionWF.connect( inputsSpec, "subj_t1_image", sessionMakeMultimodalInput, "inFN1" ) """ T2 resample to T1 average image :: BRAINSABC changed its behavior to retain image's original spacing & origin :: Since antsJointFusion only works for the identical origin images for targets, :: Resampling is placed at this stage """ subjectT2Resample = pe.Node( interface=BRAINSResample(), name="BRAINSResample_T2_forAntsJointFusion" ) if not onlyT1: subjectT2Resample.plugin_args = { "qsub_args": modify_qsub_args(master_config["queue"], 1, 1, 1), "overwrite": True, } subjectT2Resample.inputs.pixelType = "short" subjectT2Resample.inputs.interpolationMode = "Linear" subjectT2Resample.inputs.outputVolume = "t2_resampled_in_t1.nii.gz" # subjectT2Resample.inputs.warpTransform= "Identity" # Default is "Identity" JointFusionWF.connect( inputsSpec, "subj_t1_image", subjectT2Resample, "referenceVolume" ) JointFusionWF.connect( inputsSpec, "subj_t2_image", subjectT2Resample, "inputVolume" ) JointFusionWF.connect( subjectT2Resample, "outputVolume", sessionMakeMultimodalInput, "inFN2" ) else: pass # print('jointFusion_atlas_db_base') print("master_config") print(master_config) print("master_config['jointfusion_atlas_db_base']") print((master_config["jointfusion_atlas_db_base"])) jointFusionAtlasDict = read_malf_atlas_db_base( master_config["jointfusion_atlas_db_base"] ) number_of_atlas_sources = len(jointFusionAtlasDict) jointFusionAtlases = OrderedDict() atlasMakeMultimodalInput = OrderedDict() t2Resample = OrderedDict() warpedAtlasLblMergeNode = pe.Node( interface=Merge(number_of_atlas_sources), name="LblMergeAtlas" ) NewwarpedAtlasLblMergeNode = pe.Node( interface=Merge(number_of_atlas_sources), name="fswmLblMergeAtlas" ) # "HACK NOT to use T2 for JointFusion only" # warpedAtlasesMergeNode = pe.Node(interface=Merge(number_of_atlas_sources*n_modality),name="MergeAtlases") warpedAtlasesMergeNode = pe.Node( interface=Merge(number_of_atlas_sources * 1), name="MergeAtlases" ) ## if using Registration masking, then do ROIAuto on fixed and moving images and connect to registraitons UseRegistrationMasking = True if UseRegistrationMasking == True: from nipype.interfaces.semtools.segmentation.specialized import BRAINSROIAuto fixedROIAuto = pe.Node(interface=BRAINSROIAuto(), name="fixedROIAUTOMask") fixedROIAuto.inputs.ROIAutoDilateSize = 10 fixedROIAuto.inputs.outputROIMaskVolume = "fixedImageROIAutoMask.nii.gz" JointFusionWF.connect(inputsSpec, "subj_t1_image", fixedROIAuto, "inputVolume") for jointFusion_atlas_subject in list(jointFusionAtlasDict.keys()): ## Need DataGrabber Here For the Atlas jointFusionAtlases[jointFusion_atlas_subject] = pe.Node( interface=IdentityInterface( fields=["t1", "t2", "label", "lmks", "registration_mask"] ), name="jointFusionAtlasInput" + jointFusion_atlas_subject, ) jointFusionAtlases[jointFusion_atlas_subject].inputs.t1 = jointFusionAtlasDict[ jointFusion_atlas_subject ]["t1"] jointFusionAtlases[jointFusion_atlas_subject].inputs.t2 = jointFusionAtlasDict[ jointFusion_atlas_subject ]["t2"] jointFusionAtlases[ jointFusion_atlas_subject ].inputs.label = jointFusionAtlasDict[jointFusion_atlas_subject]["label"] jointFusionAtlases[ jointFusion_atlas_subject ].inputs.lmks = jointFusionAtlasDict[jointFusion_atlas_subject]["lmks"] jointFusionAtlases[ jointFusion_atlas_subject ].inputs.registration_mask = jointFusionAtlasDict[jointFusion_atlas_subject][ "registration_mask" ] ## Create BLI first ######################################################## # Run BLI atlas_to_subject ######################################################## BLICreator[jointFusion_atlas_subject] = pe.Node( interface=BRAINSLandmarkInitializer(), name="BLI_" + jointFusion_atlas_subject, ) BLICreator[ jointFusion_atlas_subject ].inputs.outputTransformFilename = "landmarkInitializer_{0}_to_subject_transform.h5".format( jointFusion_atlas_subject ) JointFusionWF.connect( inputsSpec, "atlasWeightFilename", BLICreator[jointFusion_atlas_subject], "inputWeightFilename", ) JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "lmks", BLICreator[jointFusion_atlas_subject], "inputMovingLandmarkFilename", ) JointFusionWF.connect( inputsSpec, "subj_lmks", BLICreator[jointFusion_atlas_subject], "inputFixedLandmarkFilename", ) ##### Initialize with ANTS Transform For SyN currentAtlasToSubjectantsRegistration = ( "SyN_AtlasToSubjectANTsPreJointFusion_" + jointFusion_atlas_subject ) A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject] = pe.Node( interface=ants.Registration(), name=currentAtlasToSubjectantsRegistration ) many_cpu_ANTsSyN_options_dictionary = { "qsub_args": modify_qsub_args(CLUSTER_QUEUE_LONG, 4, 2, 16), "overwrite": True, } A2SantsRegistrationPreJointFusion_SyN[ jointFusion_atlas_subject ].plugin_args = many_cpu_ANTsSyN_options_dictionary if onlyT1: JFregistrationTypeDescription = "FiveStageAntsRegistrationT1Only" else: JFregistrationTypeDescription = "FiveStageAntsRegistrationMultiModal" common_ants_registration_settings( antsRegistrationNode=A2SantsRegistrationPreJointFusion_SyN[ jointFusion_atlas_subject ], registrationTypeDescription=JFregistrationTypeDescription, output_transform_prefix=jointFusion_atlas_subject + "_ToSubjectPreJointFusion_SyN", output_warped_image=jointFusion_atlas_subject + "_2subject.nii.gz", output_inverse_warped_image=None, # NO NEED FOR THIS save_state=None, # NO NEED FOR THIS invert_initial_moving_transform=False, initial_moving_transform=None, ) ## if using Registration masking, then do ROIAuto on fixed and moving images and connect to registraitons if UseRegistrationMasking == True: from nipype.interfaces.semtools.segmentation.specialized import ( BRAINSROIAuto, ) JointFusionWF.connect( fixedROIAuto, "outputROIMaskVolume", A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "fixed_image_masks", ) # JointFusionWF.connect(inputsSpec, 'subj_fixed_head_labels', # A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],'fixed_image_masks') # NOTE: Moving image mask can be taken from Atlas directly so that it does not need to be read in # movingROIAuto[jointFusion_atlas_subject] = pe.Node(interface=BRAINSROIAuto(), name="movingROIAUTOMask_"+jointFusion_atlas_subject) # movingROIAuto.inputs.ROIAutoDilateSize=10 # movingROIAuto[jointFusion_atlas_subject].inputs.outputROIMaskVolume = "movingImageROIAutoMask.nii.gz" # JointFusionWF.connect(jointFusionAtlases[jointFusion_atlas_subject], 't1', movingROIAuto[jointFusion_atlas_subject],'inputVolume') # JointFusionWF.connect(movingROIAuto[jointFusion_atlas_subject], 'outputROIMaskVolume',A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],'moving_image_masks') JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "registration_mask", A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "moving_image_masks", ) JointFusionWF.connect( BLICreator[jointFusion_atlas_subject], "outputTransformFilename", A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "initial_moving_transform", ) """ make multimodal input for atlases """ atlasMakeMultimodalInput[jointFusion_atlas_subject] = pe.Node( Function( function=make_vector, input_names=["inFN1", "inFN2", "jointFusion"], output_names=["outFNs"], ), run_without_submitting=True, name="atlasMakeMultimodalInput" + jointFusion_atlas_subject, ) atlasMakeMultimodalInput[jointFusion_atlas_subject].inputs.jointFusion = False JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "t1", atlasMakeMultimodalInput[jointFusion_atlas_subject], "inFN1", ) if not onlyT1: JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "t2", atlasMakeMultimodalInput[jointFusion_atlas_subject], "inFN2", ) else: pass JointFusionWF.connect( sessionMakeMultimodalInput, "outFNs", A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "fixed_image", ) JointFusionWF.connect( atlasMakeMultimodalInput[jointFusion_atlas_subject], "outFNs", A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "moving_image", ) "HACK NOT to use T2 for JointFusion" # JointFusionWF.connect(A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject],'warped_image', # warpedAtlasesMergeNode,'in'+str(merge_input_offset + jointFusion_atlas_mergeindex*n_modality) ) JointFusionWF.connect( A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "warped_image", warpedAtlasesMergeNode, "in" + str(merge_input_offset + jointFusion_atlas_mergeindex * 1), ) """ Original t2 resampling """ for modality_index in range(1, n_modality): t2Resample[jointFusion_atlas_subject] = pe.Node( interface=ants.ApplyTransforms(), name="resampledT2" + jointFusion_atlas_subject, ) many_cpu_t2Resample_options_dictionary = { "qsub_args": modify_qsub_args(CLUSTER_QUEUE, 1, 1, 1), "overwrite": True, } t2Resample[ jointFusion_atlas_subject ].plugin_args = many_cpu_t2Resample_options_dictionary t2Resample[jointFusion_atlas_subject].inputs.num_threads = -1 t2Resample[jointFusion_atlas_subject].inputs.dimension = 3 t2Resample[jointFusion_atlas_subject].inputs.output_image = ( jointFusion_atlas_subject + "_t2.nii.gz" ) t2Resample[jointFusion_atlas_subject].inputs.interpolation = "BSpline" t2Resample[jointFusion_atlas_subject].inputs.default_value = 0 t2Resample[jointFusion_atlas_subject].inputs.invert_transform_flags = [ False ] JointFusionWF.connect( A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "composite_transform", t2Resample[jointFusion_atlas_subject], "transforms", ) JointFusionWF.connect( inputsSpec, "subj_t1_image", t2Resample[jointFusion_atlas_subject], "reference_image", ) JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "t2", t2Resample[jointFusion_atlas_subject], "input_image", ) "HACK NOT to use T2 for JointFusion only" # JointFusionWF.connect(t2Resample[jointFusion_atlas_subject],'output_image', # warpedAtlasesMergeNode,'in'+str(merge_input_offset + jointFusion_atlas_mergeindex*n_modality+modality_index) ) """ Original labelmap resampling """ labelMapResample[jointFusion_atlas_subject] = pe.Node( interface=ants.ApplyTransforms(), name="resampledLabel" + jointFusion_atlas_subject, ) many_cpu_labelMapResample_options_dictionary = { "qsub_args": modify_qsub_args(CLUSTER_QUEUE, 1, 1, 1), "overwrite": True, } labelMapResample[ jointFusion_atlas_subject ].plugin_args = many_cpu_labelMapResample_options_dictionary labelMapResample[jointFusion_atlas_subject].inputs.num_threads = -1 labelMapResample[jointFusion_atlas_subject].inputs.dimension = 3 labelMapResample[jointFusion_atlas_subject].inputs.output_image = ( jointFusion_atlas_subject + "_2_subj_lbl.nii.gz" ) labelMapResample[jointFusion_atlas_subject].inputs.interpolation = "MultiLabel" labelMapResample[jointFusion_atlas_subject].inputs.default_value = 0 labelMapResample[jointFusion_atlas_subject].inputs.invert_transform_flags = [ False ] JointFusionWF.connect( A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "composite_transform", labelMapResample[jointFusion_atlas_subject], "transforms", ) JointFusionWF.connect( inputsSpec, "subj_t1_image", labelMapResample[jointFusion_atlas_subject], "reference_image", ) JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "label", labelMapResample[jointFusion_atlas_subject], "input_image", ) JointFusionWF.connect( labelMapResample[jointFusion_atlas_subject], "output_image", warpedAtlasLblMergeNode, "in" + str(merge_input_offset + jointFusion_atlas_mergeindex), ) ### New labelmap resampling NewlabelMapResample[jointFusion_atlas_subject] = pe.Node( interface=ants.ApplyTransforms(), name="FSWM_WLABEL_" + jointFusion_atlas_subject, ) many_cpu_NewlabelMapResample_options_dictionary = { "qsub_args": modify_qsub_args(CLUSTER_QUEUE, 1, 1, 1), "overwrite": True, } NewlabelMapResample[ jointFusion_atlas_subject ].plugin_args = many_cpu_NewlabelMapResample_options_dictionary NewlabelMapResample[jointFusion_atlas_subject].inputs.num_threads = -1 NewlabelMapResample[jointFusion_atlas_subject].inputs.dimension = 3 NewlabelMapResample[jointFusion_atlas_subject].inputs.output_image = ( jointFusion_atlas_subject + "fswm_2_subj_lbl.nii.gz" ) NewlabelMapResample[ jointFusion_atlas_subject ].inputs.interpolation = "MultiLabel" NewlabelMapResample[jointFusion_atlas_subject].inputs.default_value = 0 NewlabelMapResample[jointFusion_atlas_subject].inputs.invert_transform_flags = [ False ] JointFusionWF.connect( A2SantsRegistrationPreJointFusion_SyN[jointFusion_atlas_subject], "composite_transform", NewlabelMapResample[jointFusion_atlas_subject], "transforms", ) JointFusionWF.connect( inputsSpec, "subj_t1_image", NewlabelMapResample[jointFusion_atlas_subject], "reference_image", ) JointFusionWF.connect( jointFusionAtlases[jointFusion_atlas_subject], "label", NewlabelMapResample[jointFusion_atlas_subject], "input_image", ) JointFusionWF.connect( NewlabelMapResample[jointFusion_atlas_subject], "output_image", NewwarpedAtlasLblMergeNode, "in" + str(merge_input_offset + jointFusion_atlas_mergeindex), ) jointFusion_atlas_mergeindex += 1 ## Now work on cleaning up the label maps from .FixLabelMapsTools import fix_label_map_from_neuromorphemetrics_2012 from .FixLabelMapsTools import recode_label_map ### Original NeuroMorphometrica merged fusion jointFusion = pe.Node(interface=ants.AntsJointFusion(), name="AntsJointFusion") many_cpu_JointFusion_options_dictionary = { "qsub_args": modify_qsub_args(CLUSTER_QUEUE, 10, 8, 16), "overwrite": True, } jointFusion.plugin_args = many_cpu_JointFusion_options_dictionary jointFusion.inputs.num_threads = -1 jointFusion.inputs.dimension = 3 jointFusion.inputs.search_radius = [3] # jointFusion.inputs.method='Joint[0.1,2]' jointFusion.inputs.out_label_fusion = "JointFusion_HDAtlas20_2015_label.nii.gz" # JointFusionWF.connect(inputsSpec, 'subj_fixed_head_labels', jointFusion, 'mask_image') JointFusionWF.connect( fixedROIAuto, "outputROIMaskVolume", jointFusion, "mask_image" ) JointFusionWF.connect( warpedAtlasLblMergeNode, "out", jointFusion, "atlas_segmentation_image" ) AdjustMergeListNode = pe.Node( Function( function=adjust_merge_list, input_names=["allList", "n_modality"], output_names=["out"], ), name="AdjustMergeListNode", ) "*** HACK JointFusion only uses T1" # AdjustMergeListNode.inputs.n_modality = n_modality AdjustMergeListNode.inputs.n_modality = 1 JointFusionWF.connect(warpedAtlasesMergeNode, "out", AdjustMergeListNode, "allList") JointFusionWF.connect(AdjustMergeListNode, "out", jointFusion, "atlas_image") AdjustTargetImageListNode = pe.Node( Function( function=adjust_merge_list, input_names=["allList", "n_modality"], output_names=["out"], ), name="AdjustTargetImageListNode", ) AdjustTargetImageListNode.inputs.n_modality = n_modality "*** HACK JointFusion only uses T1" """ Once JointFusion works with T2 properly, delete sessionMakeListSingleModalInput and use sessionMakeMultimodalInput instead """ sessionMakeListSingleModalInput = pe.Node( Function( function=make_vector, input_names=["inFN1", "inFN2", "jointFusion"], output_names=["outFNs"], ), run_without_submitting=True, name="sessionMakeListSingleModalInput", ) sessionMakeListSingleModalInput.inputs.jointFusion = False JointFusionWF.connect( inputsSpec, "subj_t1_image", sessionMakeListSingleModalInput, "inFN1" ) JointFusionWF.connect( sessionMakeListSingleModalInput, "outFNs", jointFusion, "target_image" ) JointFusionWF.connect( jointFusion, "out_label_fusion", outputsSpec, "JointFusion_HDAtlas20_2015_label" ) ## We need to recode values to ensure that the labels match FreeSurer as close as possible by merging ## some labels together to standard FreeSurfer confenventions (i.e. for WMQL) RECODE_LABELS_2_Standard_FSWM = [ (15071, 47), (15072, 47), (15073, 47), (15145, 1011), (15157, 1011), (15161, 1011), (15179, 1012), (15141, 1014), (15151, 1017), (15163, 1018), (15165, 1019), (15143, 1027), (15191, 1028), (15193, 1028), (15185, 1030), (15201, 1030), (15175, 1031), (15195, 1031), (15173, 1035), (15144, 2011), (15156, 2011), (15160, 2011), (15178, 2012), (15140, 2014), (15150, 2017), (15162, 2018), (15164, 2019), (15142, 2027), (15190, 2028), (15192, 2028), (15184, 2030), (15174, 2031), (15194, 2031), (15172, 2035), (15200, 2030), ] ## def recode_label_map(InputFileName,OutputFileName,RECODE_TABLE): RecodeToStandardFSWM = pe.Node( Function( function=recode_label_map, input_names=["InputFileName", "OutputFileName", "RECODE_TABLE"], output_names=["OutputFileName"], ), name="RecodeToStandardFSWM", ) RecodeToStandardFSWM.inputs.RECODE_TABLE = RECODE_LABELS_2_Standard_FSWM RecodeToStandardFSWM.inputs.OutputFileName = ( "JointFusion_HDAtlas20_2015_fs_standard_label.nii.gz" ) JointFusionWF.connect( RecodeToStandardFSWM, "OutputFileName", outputsSpec, "JointFusion_HDAtlas20_2015_fs_standard_label", ) ## JointFusion_SNAPSHOT_WRITER for Segmented result checking: # JointFusion_SNAPSHOT_WRITERNodeName = "JointFusion_ExtendedJointFusion_SNAPSHOT_WRITER" # JointFusion_SNAPSHOT_WRITER = pe.Node(interface=BRAINSSnapShotWriter(), name=JointFusion_SNAPSHOT_WRITERNodeName) # JointFusion_SNAPSHOT_WRITER.inputs.outputFilename = 'JointFusion_HDAtlas20_2015_CSFVBInjected_label.png' # output specification # JointFusion_SNAPSHOT_WRITER.inputs.inputPlaneDirection = [2, 1, 1, 1, 1, 0, 0] # JointFusion_SNAPSHOT_WRITER.inputs.inputSliceToExtractInPhysicalPoint = [-3, -7, -3, 5, 7, 22, -22] # JointFusionWF.connect(JointFusion_SNAPSHOT_WRITER,'outputFilename',outputsSpec,'JointFusion_extended_snapshot') myLocalDustCleanup = create_dust_cleanup_workflow( "DUST_CLEANUP", onlyT1, master_config ) JointFusionWF.connect( inputsSpec, "subj_t1_image", myLocalDustCleanup, "inputspec.subj_t1_image" ) if not onlyT1: JointFusionWF.connect( subjectT2Resample, "outputVolume", myLocalDustCleanup, "inputspec.subj_t2_image", ) if runFixFusionLabelMap: ## post processing of jointfusion injectSurfaceCSFandVBIntoLabelMap = pe.Node( Function( function=fix_label_map_from_neuromorphemetrics_2012, input_names=[ "fusionFN", "FixedHeadFN", "posteriorListOfTuples", "LeftHemisphereFN", "outFN", "OUT_DICT", ], output_names=["fixedFusionLabelFN"], ), name="injectSurfaceCSFandVBIntoLabelMap", ) injectSurfaceCSFandVBIntoLabelMap.inputs.outFN = ( "JointFusion_HDAtlas20_2015_CSFVBInjected_label.nii.gz" ) from collections import ( OrderedDict, ) # Need OrderedDict internally to ensure consistent ordering FREESURFER_DICT = OrderedDict( { "BRAINSTEM": 16, "RH_CSF": 24, "LH_CSF": 24, "BLOOD": 15000, "UNKNOWN": 999, "CONNECTED": [11, 12, 13, 9, 17, 26, 50, 51, 52, 48, 53, 58], } ) injectSurfaceCSFandVBIntoLabelMap.inputs.OUT_DICT = FREESURFER_DICT JointFusionWF.connect( jointFusion, "out_label_fusion", injectSurfaceCSFandVBIntoLabelMap, "fusionFN", ) JointFusionWF.connect( inputsSpec, "subj_fixed_head_labels", injectSurfaceCSFandVBIntoLabelMap, "FixedHeadFN", ) JointFusionWF.connect( inputsSpec, "subj_posteriors", injectSurfaceCSFandVBIntoLabelMap, "posteriorListOfTuples", ) JointFusionWF.connect( inputsSpec, "subj_left_hemisphere", injectSurfaceCSFandVBIntoLabelMap, "LeftHemisphereFN", ) JointFusionWF.connect( injectSurfaceCSFandVBIntoLabelMap, "fixedFusionLabelFN", myLocalDustCleanup, "inputspec.subj_label_atlas", ) JointFusionWF.connect( injectSurfaceCSFandVBIntoLabelMap, "fixedFusionLabelFN", outputsSpec, "JointFusion_HDAtlas20_2015_CSFVBInjected_label", ) JointFusionWF.connect( myLocalDustCleanup, "outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label", RecodeToStandardFSWM, "InputFileName", ) JointFusionWF.connect( myLocalDustCleanup, "outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label", outputsSpec, "JointFusion_HDAtlas20_2015_dustCleaned_label", ) # JointFusionWF.connect([(inputsSpec, JointFusion_SNAPSHOT_WRITER, [( 'subj_t1_image','inputVolumes')]), # (injectSurfaceCSFandVBIntoLabelMap, JointFusion_SNAPSHOT_WRITER, # [('fixedFusionLabelFN', 'inputBinaryVolumes')]) # ]) else: JointFusionWF.connect( jointFusion, "output_label_image", myLocalDustCleanup, "inputspec.subj_label_atlas", ) JointFusionWF.connect( jointFusion, "output_label_image", outputsSpec, "JointFusion_HDAtlas20_2015_CSFVBInjected_label", ) JointFusionWF.connect( myLocalDustCleanup, "outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label", RecodeToStandardFSWM, "InputFileName", ) JointFusionWF.connect( myLocalDustCleanup, "outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label", outputsSpec, "JointFusion_HDAtlas20_2015_dustCleaned_label", ) # JointFusionWF.connect([(inputsSpec, JointFusion_SNAPSHOT_WRITER, [( 'subj_t1_image','inputVolumes')]), # (jointFusion, JointFusion_SNAPSHOT_WRITER, # [('output_label_image', 'inputBinaryVolumes')]) # ]) """ Compute label volumes """ computeLabelVolumes = create_volume_measure_workflow("LabelVolume", master_config) JointFusionWF.connect( inputsSpec, "subj_t1_image", computeLabelVolumes, "inputspec.subj_t1_image" ) JointFusionWF.connect( myLocalDustCleanup, "outputspec.JointFusion_HDAtlas20_2015_dustCleaned_label", computeLabelVolumes, "inputspec.subj_label_image", ) JointFusionWF.connect( computeLabelVolumes, "outputspec.csvFilename", outputsSpec, "JointFusion_volumes_csv", ) JointFusionWF.connect( computeLabelVolumes, "outputspec.jsonFilename", outputsSpec, "JointFusion_volumes_json", ) ## Lobe Pacellation by recoding if master_config["relabel2lobes_filename"] != None: # print("Generate relabeled version based on {0}".format(master_config['relabel2lobes_filename'])) RECODE_LABELS_2_LobePacellation = read_recoding_list( master_config["relabel2lobes_filename"] ) RecordToFSLobes = pe.Node( Function( function=recode_label_map, input_names=["InputFileName", "OutputFileName", "RECODE_TABLE"], output_names=["OutputFileName"], ), name="RecordToFSLobes", ) RecordToFSLobes.inputs.RECODE_TABLE = RECODE_LABELS_2_LobePacellation RecordToFSLobes.inputs.OutputFileName = ( "JointFusion_HDAtlas20_2015_lobe_label.nii.gz" ) JointFusionWF.connect( RecodeToStandardFSWM, "OutputFileName", RecordToFSLobes, "InputFileName" ) JointFusionWF.connect( RecordToFSLobes, "OutputFileName", outputsSpec, "JointFusion_HDAtlas20_2015_lobe_label", ) """ Compute lobe volumes """ computeLobeVolumes = create_volume_measure_workflow("LobeVolume", master_config) JointFusionWF.connect( inputsSpec, "subj_t1_image", computeLobeVolumes, "inputspec.subj_t1_image" ) JointFusionWF.connect( RecordToFSLobes, "OutputFileName", computeLobeVolumes, "inputspec.subj_label_image", ) JointFusionWF.connect( computeLobeVolumes, "outputspec.csvFilename", outputsSpec, "JointFusion_lobe_volumes_csv", ) JointFusionWF.connect( computeLobeVolumes, "outputspec.jsonFilename", outputsSpec, "JointFusion_lobe_volumes_json", ) return JointFusionWF
5,344,244
def do_ktools_mem_limit(max_process_id ,filename): """ Set each Ktools pipeline to trap and terminate on hitting its memory allocation limit Limit: Maximum avalible memory / max_process_id """ cmd_mem_limit = 'ulimit -v $(ktgetmem {})'.format(max_process_id) print_command(filename, cmd_mem_limit)
5,344,245
def get_partition_info_logic(cluster_name): """ GET 请求集群隔离区信息 :return: resp, status resp: json格式的响应数据 status: 响应码 """ data = '' status = '' message = '' resp = {"status": status, "data": data, "message": message} partition_info = SfoPartitionsInfo.query.filter_by(cluster_name=cluster_name).order_by(SfoPartitionsInfo.update_time.desc()).first() if partition_info: status = 200 message = 'OK' data = partition_info else: status = 404 message = 'Not Found Record' resp.update({"status": status, "data": data, "message": message}) return resp, status
5,344,246
def choice_group_name(identifier: Identifier) -> Identifier: """ Generate the XML group name for the interface of the given class ``identifier``. >>> choice_group_name(Identifier("something")) 'something_choice' >>> choice_group_name(Identifier("URL_to_something")) 'urlToSomething_choice' """ parts = identifier.split("_") assert ( len(parts) >= 1 ), f"Expected at least one part for the valid identifier: {identifier}" if len(parts) == 1: return Identifier(f"{parts[0].lower()}_choice") return Identifier( "{}{}_choice".format( parts[0].lower(), "".join(part.capitalize() for part in parts[1:]) ) )
5,344,247
def init_all_sources_wavelets(observation, centers, min_snr=50, bulge_grow=5, disk_grow=5, use_psf=True, bulge_slice=slice(None,2), disk_slice=slice(2, -1), scales=5, wavelets=None): """Initialize all sources using wavelet detection images. This does not initialize the SED and morpholgy parameters, so `parameterize_source` must still be run to select a parameterization (optimizer) that `LiteBlend` requires for fitting. Parameters ---------- observation: `scarlet.lite.LiteObservation` The multiband observation of the blend. centers: `list` of `tuple` Peak locations for all of the sources to attempt to initialize. wavelets: `numpy.ndarray` The array of wavelet coefficients `(scale, y, x)` used for detection. bulge_slice, disk_slice: `slice` The slice used to select the wavelet scales used for the bulge/disk. bulge_grow, disk_grow: `int` The number of pixels to grow the bounding box of the bulge/disk to leave extra room for growth in the first few iterations. use_psf: `bool` Whether or not to use the PSF for single component sources. If `use_psf` is `False` then only sources with low signal at all scales are initialized with the PSF morphology. min_snr: `float` Minimum signal to noise for each component. So if `min_snr=50`, a source must have SNR > 50 to be initialized with one component and SNR > 100 for 2 components. Returns ------- sources: `list` of `scarlet.lite.LiteSource` The sources that have been initialized. """ init = WaveletInitParameters( observation, bulge_slice, disk_slice, bulge_grow, disk_grow, use_psf, scales, wavelets) sources = [] for center in centers: snr = np.floor(calculate_snr(observation.images, observation.variance, observation.psfs, center)) component_snr = snr / min_snr source = init_wavelet_source(center, component_snr, init) sources.append(source) return sources
5,344,248
def init_coreg_conversion_wf(name: str = "coreg_conversion_wf") -> pe.Workflow: """ Initiate a workflow to convert input files to NIfTI format for ease of use Parameters ---------- name : str, optional Workflow's name, by default "nii_conversion_wf" Returns ------- pe.Workflow A NIfTI conversion workflow """ wf = pe.Workflow(name=name) wf.connect(NII_CONVERSION) return wf
5,344,249
def rgb2hex(rgb_color): """ 'rgb(180, 251, 184)' => '#B4FBB8' """ rgb = [int(i) for i in rgb_color.strip('rgb()').split(',')] return '#{:02x}{:02x}{:02x}'.format(rgb[0], rgb[1], rgb[2])
5,344,250
def test_process_recipient_list_with_valid_phone_strings(): """Test ability to process recipient list with valid phone numbers.""" data = sms.process_recipient_list("+1-212-555-0001", 2) assert len(data) == 1 data = sms.process_recipient_list("+1-212-555-0001|+1-212-555-0002", 2) assert len(data) == 2 data = sms.process_recipient_list(["+1-212-555-0001", "+1-212-555-0002"], 2) assert len(data) == 2 data = sms.process_recipient_list( "+1-212-555-0001|+1-212-555-0002|+1-212-555-0003", 2 ) assert len(data) == 2 data = sms.process_recipient_list( ["+1-212-555-0001", "+1-212-555-0002", "+1-212-555-0003"], 2 ) assert len(data) == 2
5,344,251
def my_script(arg1: int, arg2: str): """Test script""" for i in range(10): log('Dummy', float(i), global_step=i)
5,344,252
def validate_tax_request(tax_dict): """Return the sales tax that should be collected for a given order.""" client = get_client() if not client: return try: tax_data = client.tax_for_order(tax_dict) except taxjar.exceptions.TaxJarResponseError as err: frappe.throw(_(sanitize_error_response(err))) else: return tax_data
5,344,253
def test_model_design_integration_risky(model): """Tests integration of model and design. Basically conducts Parameter Estimation""" D = DesignSpaceBuilder( DA=[0.0], DB=[0.0], PA=[1.0], PB=list(np.linspace(0.01, 0.99, 91)), RA=list(100 * np.linspace(0.05, 0.95, 19)), RB=[100.0], ).build() design_thing = BayesianAdaptiveDesignGenerator(D, max_trials=max_trials) model = model(n_particles=n_particles) model = model.generate_faux_true_params() simulated_experiment_trial_loop(design_thing, model)
5,344,254
def union_with(array, *others, **kargs): """This method is like :func:`union` except that it accepts comparator which is invoked to compare elements of arrays. Result values are chosen from the first array in which the value occurs. Args: array (list): List to unionize with. others (list): Lists to unionize with `array`. Keyword Args: comparator (callable, optional): Function to compare the elements of the arrays. Defaults to :func:`.is_equal`. Returns: list: Unionized list. Example: >>> comparator = lambda a, b: (a % 2) == (b % 2) >>> union_with([1, 2, 3], [2, 3, 4], comparator=comparator) [1, 2] >>> union_with([1, 2, 3], [2, 3, 4]) [1, 2, 3, 4] .. versionadded:: 4.0.0 """ if not others: return array[:] comparator, others = parse_iteratee('comparator', *others, **kargs) return uniq_with(flatten([array] + list(others)), comparator=comparator)
5,344,255
def CalculateMoranAutoVolume(mol): """ ################################################################# Calculation of Moran autocorrelation descriptors based on carbon-scaled atomic van der Waals volume. Usage: res=CalculateMoranAutoVolume(mol) Input: mol is a molecule object. Output: res is a dict form containing eight moran autocorrealtion descriptors. ################################################################# """ res = {} for i in range(8): res["MATSv" + str(i + 1)] = _CalculateMoranAutocorrelation( mol, lag=i + 1, propertylabel="V" ) return res
5,344,256
def parse_acl(acl_iter): """Parse a string, or list of ACE definitions, into usable ACEs.""" if isinstance(acl_iter, basestring): acl_iter = [acl_iter] for chunk in acl_iter: if isinstance(chunk, basestring): chunk = chunk.splitlines() chunk = [re.sub(r'#.+', '', line).strip() for line in chunk] chunk = filter(None, chunk) else: chunk = [chunk] for ace in chunk: # If this was provided as a string, then parse the permission set. # Otherwise, use it as-is, which will result in an equality test. if isinstance(ace, basestring): ace = ace.split(None, 2) state, predicate, permission_set = ace yield parse_state(state), parse_predicate(predicate), parse_permission_set(permission_set) else: state, predicate, permission_set = ace yield parse_state(state), parse_predicate(predicate), permission_set
5,344,257
def execute(cursor, query): """Secure execute for slow nodes""" while True: try: cursor.execute(query) break except Exception as e: print("Database query: {} {}".format(cursor, query)) print("Database retry reason: {}".format(e)) return cursor
5,344,258
def zip_to_gdal_path(filepath): """ Takes in a zip filepath and if the zip contains files ascii files, prepend '/viszip' to the path so that they can be opened using GDAL without extraction. """ zip_file_list = [] if zipfile.is_zipfile(filepath): try: zip_file = zipfile.ZipFile(filepath) zip_file_contents = ['/vsizip/{0}/{1}'.format(filepath, zip_info_object.filename) for zip_info_object in zip_file.filelist if zip_info_object.filename.endswith('.asc')] zip_file_list.extend(zip_file_contents) zip_file.close() except zipfile.BadZipfile: pass return zip_file_list
5,344,259
def calculateMACD(prices_data): """Calculate the MACD of EMA15 and EMA30 of an asset Args: prices_data (dataframe): prices data Returns: macd (pandas series object): macd of the asset macd_signal (pandas series object): macd signal of the asset """ ema15 = pd.Series(prices_data['prices'].ewm( span=15, min_periods=15).mean()) ema30 = pd.Series(prices_data['prices'].ewm( span=30, min_periods=30).mean()) macd = pd.Series(ema15 - ema30) macd_signal = pd.Series(macd.ewm(span=9, min_periods=9).mean()) return macd, macd_signal
5,344,260
async def async_setup(hass: HomeAssistant, config: dict): """Set up the Logitech Squeezebox component.""" return True
5,344,261
def partida_19(): """partida_19""" check50.run("python3 volleyball.py").stdin("A\nA\nA\nB\nB\nB\nA\nB\nA\nA\nB\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA B\nA 3 B 0\nSACA B\nGANA B\nA 3 B 1\nSACA B\nGANA B\nA 3 B 2\nSACA B\nGANA A\nA 3 B 2\nSACA A\nGANA B\nA 3 B 2\nSACA B\nGANA A\nA 3 B 2\nSACA A\nGANA A\nA 4 B 2\nSACA A\nGANA B\nA 4 B 2\nSACA B\nGANA A\nA 4 B 2\nSACA A\nGANA A\nA 5 B 2\nFINAL", regex=False).exit(0)
5,344,262
def generateDwcaExportFiles(request): """ Generates DarwinCore-Archive files for the 'Export formats' page. """ error_message = None # if request.method == "GET": form = forms.GenerateDwcaExportFilesForm() contextinstance = {'form' : form, 'error_message' : error_message} contextinstance.update(csrf(request)) return render(request, "generate_dwca_exportfiles.html", contextinstance) elif request.method == "POST": # form = forms.GenerateDwcaExportFilesForm(request.POST) if form.is_valid(): # datatype_list = [] year_from = request.POST['year_from'] year_to = request.POST['year_to'] monitoring_type = request.POST['monitoring_type'] user = request.POST['user'] password = request.POST['password'] # if ('phytobenthos' in request.POST) and (request.POST['phytobenthos'] == 'on'): datatype_list.append('Epibenthos') # datatype_list.append('Phytobenthos') if ('phytoplankton' in request.POST) and (request.POST['phytoplankton'] == 'on'): datatype_list.append('Phytoplankton') if ('zoobenthos' in request.POST) and (request.POST['zoobenthos'] == 'on'): datatype_list.append('Zoobenthos') if ('zooplankton' in request.POST) and (request.POST['zooplankton'] == 'on'): datatype_list.append('Zooplankton') # if password != settings.APPS_VALID_USERS_AND_PASSWORDS.get(user, None): error_message = 'Not a valid user or password. Please try again...' # if error_message == None: sharkdata_core.SharkdataAdminUtils().generateDwcaExportFilesInThread( datatype_list, year_from, year_to, monitoring_type, user) # OK. if error_message == None: return HttpResponseRedirect("/sharkdataadmin") # contextinstance = {'form' : form, 'error_message' : error_message} contextinstance.update(csrf(request)) return render(request, "generate_dwca_exportfiles.html", contextinstance) # Not a valid request method. return HttpResponseRedirect("/sharkdataadmin")
5,344,263
def composer_update(php_bin, composer_bin, memory_limit=False): """ Updates composer for project :param php_bin: path to php executable :param composer_bin: path to composer executable :param memory_limit: memory limit for composer update :return: """ if memory_limit: command = '%s -d memory_limit=%s %s update' % (php_bin, memory_limit, composer_bin) else: command = '%s %s update' % (php_bin, composer_bin) print command exit(0) message = u'Updating composer' _run_command(command, message)
5,344,264
def treeFromList(l): """ Builds tree of SNode from provided list Arguments: l: the list with tree representation Return: the tuple with root node of the tree and the sentence index of last leaf node """ root = SNode("S") s_index = 0 for child in l: node = SNode(child["name"]) _, s_index = treeFromDict(child, s_index, node) root.children.append(node) return (root, s_index)
5,344,265
def with_event_loop(func): """ This method decorates functions run on dask workers with an async function call Namely, this allows us to manage the execution of a function a bit better, and especially, to exit job execution if things take too long (1hr) Here, the function func is run in a background thread, and has access to the dask schedular through the 'runner'. Critically, sumbission to this runner/client looks the same regardless of if it occurs in a sub-process/thread Mostly, this is a workaround to impliment some form of timeout when running very long-tasks on dask. While one cannot (or should not) kill the running thread, Dask will cleanup the child tasks eventually once all jobs finish. Usage: @with_dask_event_loop my_job(args, kwargs, runner=None): runner.submit(sleep, 10) """ async def wrapped(*args, **kwargs): loop = asyncio.get_event_loop() # Get our current dask worker, functions wrapped with this method can only be run on dask workers logger.info ("Initializing job... getting parent worker") try: worker = get_worker() except ValueError as exc: logger.error("Could not get dask worker!") raise RuntimeError("Data-processing job called without parent dask worker") except Exception as exc: logger.exception(f"Unknown exception when getting dask worker") logger.info (f"Successfully found worker {worker}") logger.info (f"Running job {func} with args: {args}, kwargs: {kwargs}") # Get our worker client, and pass as a dask client exector with worker_client() as runner: # We'll run our function in a background thread # executor = ProcessPoolExecutor(max_workers=1) # Add our runner to kwargs kwargs['runner'] = runner # Kick off the job job = loop.run_in_executor(worker.executor, partial(func, *args, **kwargs)) # Move on from job if things take more than hour done, pending = await asyncio.wait([job], timeout=3600) # Do some cleanup if len(pending) != 0: logger.warning ("Killing pending tasks!") for task in pending: task.cancel() # executor.shutdown(wait=False) # Get the return value if len(done) == 1: return_value = done.pop().result() else: return_value = None # Logg that we're done! logger.info (f"Done running job, returning {return_value}") return return_value def run_loop(*args, **kwargs): """ Uses async and threading capabilities Use of background thread causes this error on shutdown: ERROR - asyncio - task: <Task pending coro=<HTTP1ServerConnection._server_request_loop() running at /gpfs/mskmindhdp_emc/sw/env/lib64/python3.6/site-packages/tornado/http1connection.py:817> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f52e8259318>()]> cb=[IOLoop.add_future.<locals>.<lambda>() at /gpfs/mskmindhdp_emc/sw/env/lib64/python3.6/site-packages/tornado/ioloop.py:690]> Seems like some async task gets hung up in the child thread... """ loop = asyncio.new_event_loop() result = loop.run_until_complete(wrapped(*args, **kwargs)) loop.close() return result
5,344,266
def test_3d_time(): """reading/writing of 3D NMRPipe time domain data""" dic, data = ng.pipe.read( os.path.join(DATA_DIR, "nmrpipe_3d", "data", "test%03d.fid")) sdic, sdata = ng.pipe.read( os.path.join(DATA_DIR, "nmrpipe_3d", "data", "test001.fid")) assert data.shape == (128, 88, 1250) assert data.dtype == 'complex64' assert round(data[0, 1, 2].real, 2) == -7.98 assert round(data[0, 1, 2].imag, 2) == 33.82 assert round(data[10, 22, 5].real, 2) == 15.71 assert round(data[10, 22, 5].imag, 2) == 15.1 # and the first slice assert sdata.shape == (88, 1250) assert sdata.dtype == 'complex64' assert round(sdata[1, 2].real, 2) == -7.98 assert round(sdata[1, 2].imag, 2) == 33.82 assert round(sdata[22, 5].real, 2) == 22.65 assert round(sdata[22, 5].imag, 2) == 13.65 # slice/data matching assert_array_equal(data[0], sdata) write_readback_3d(dic, data)
5,344,267
def create_table_of_content(steering_file,data_root,load_all_tables,indices,names): """ Create a table of content for all the tables found in the STEERING_FILE that can be used in your submission. You can put the output (snippet) directly into your 'steering_file' as a table called 'overview'. You can also customise the table of content by changing the provided snippet to suit your requirements. Args: steering_file: the path to the (hepdata_maker) steering file to create the table of content for. """ console.rule("table of content",characters="=") requested_tables=utils.get_requested_table_list(steering_file,load_all_tables,indices,names) submission=submission_for_selected_tables(steering_file,data_root,load_all_tables,requested_tables) print_which_tables="all" if load_all_tables else requested_tables console.print(f"Creating table of content for {print_which_tables} tables.") submission.create_table_of_content() toc=[table for table in submission.tables if table.name=='overview'] if(len(toc)<1): log.error("Issue encountered. Somehowe table of content was not created. Seems like bug on the side of the hepdata_submission_maker.") if(len(toc)>1): log.error("Several 'overview' tables encountered. You have probably submitted faulty data.") console.rule("retrieved table-of-content:") console.print(toc[0].title) console.rule("steering file snipped") table_json=toc[0].steering_file_snippet() console.rule("You can add following table in your json steering file:") console.print(json.dumps(table_json,indent=4))
5,344,268
def get_current_time(): """Retrieve a Django compliant pre-formated datetimestamp.""" datetime_tz_naive = datetime.datetime.now() django_timezone = settings.TIME_ZONE datetime_tz = pytz.timezone(django_timezone).localize(datetime_tz_naive) return datetime_tz
5,344,269
def is_eval_epoch(cfg, cur_epoch): """ Determine if the model should be evaluated at the current epoch. Args: cfg (CfgNode): configs. Details can be found in sgs/config/defaults.py cur_epoch (int): current epoch. """ return ( cur_epoch + 1 ) % cfg.TRAIN.EVAL_PERIOD == 0 or cur_epoch + 1 == cfg.SOLVER.MAX_EPOCH
5,344,270
def get_app_run_sleep(): """Returns the entrypoint command that starts the app.""" return get(cs.ODIN_CONF, cs.APP_SECTION, cs.RUN_SLEEP)
5,344,271
def plot_sphere(Radius, Point, part="Part::Feature", name="Sphere", grp="WorkObjects"): """ makeSphere(radius,[pnt, dir, angle1,angle2,angle3]) -- Make a sphere with a given radius By default pnt=Vector(0,0,0), dir=Vector(0,0,1), angle1=0, angle2=90 and angle3=360 """ if not(App.ActiveDocument.getObject(grp)): App.ActiveDocument.addObject("App::DocumentObjectGroup", grp) sphere = App.ActiveDocument.addObject(part, name) Sphere = Part.makeSphere(Radius, Point) sphere.Shape = Sphere App.ActiveDocument.getObject(grp).addObject(sphere) sphere_User_Name = sphere.Label Gui.ActiveDocument.getObject(sphere_User_Name).PointColor = (1.00, 0.67, 0.00) Gui.ActiveDocument.getObject(sphere_User_Name).LineColor = (1.00, 0.67, 0.00) Gui.ActiveDocument.getObject(sphere_User_Name).ShapeColor = (0.00, 0.33, 1.00) Gui.ActiveDocument.getObject(sphere_User_Name).Transparency = 75 return sphere_User_Name, sphere
5,344,272
def test_harvester(mocker): """Test the reddit harvester.""" mocker.patch('praw.Reddit', new_callable=PrawMock) source = Source.objects.get(code='reddit') harvester = Harvester(source) jobs = harvester.harvest() assert jobs is not None assert len(list(jobs)) >= 100
5,344,273
def process_test_set(num_workers, failed_save_file): """ Extract video frames for the test set. :param num_workers: Number of worker processes. :param failed_save_file: Path to a log of failed extractions. :return: None. """ pool = parallel.Pool(None, config.TEST_ROOT, config.TEST_FRAMES_ROOT, num_workers, failed_save_file) pool.start_workers() pool.feed_videos() pool.stop_workers()
5,344,274
def write_json_path(out_json_path, json_data): """ 写入 JSON 数据 :param out_json_path: :param json_data: :return: """ with open(out_json_path, 'w') as f: json.dump(json_data, f, indent=4)
5,344,275
async def my_job_async_gen(my_job_manager): """Fixture provides the job definition (async generator). Returns: The object yielded by the fixture `my_job_manager` with one extra attribute: `job` - job function decorated with `@job` and wrapped into `sync_to_async` for convenience (tests are async). """ @my_job_manager.job_manager_class.job() async def my_job_async_gen(yieldsteps, *, mustfail): """Job function which yields the progress.""" for i in range(yieldsteps): progress = { 'message': 'step %s or %s' % (i + 1, yieldsteps), 'payload': dict({'step': i + 1, 'total': yieldsteps}), 'readiness': (i + 1) / yieldsteps, } yield progress if mustfail: raise RuntimeError('Job failed, as requested!') my_job_manager.job_orig = my_job_async_gen my_job_manager.job = channels.db.database_sync_to_async(my_job_async_gen) return my_job_manager
5,344,276
def test(): """Test. """ all_boards = glob.glob("src/boards/*") boards = set(all_boards) - set("cygwin") for board in boards: git_clean_dfx() # Building one application is enough to ensure that all code # in src/ compiles. command = [ "make", "APPS=examples/default-configuration", "BOARD=" + os.path.basename(board), "all" ] print(" ".join(command)) subprocess.check_call(command) # Run linux tests. git_clean_dfx() command = [ "make", "test" ] print(" ".join(command)) subprocess.check_call(command) generate_platformio() # Build a PlatformIO application. command = [ "platformio", "run", "-v" ] subprocess.check_call(command, cwd="examples/platformio/blink") generate_arduino() # Build an application using the Arduino builder. for family, board in [("avr", "nano"), ("avr", "uno"), ("avr", "mega2560"), ("avr", "pro-micro"), ("sam", "arduino_due_x_dbg"), ("esp", "esp01"), ("esp", "esp12e"), ("esp32", "nano32"), ("esp32", "esp32_devkitc"), ("esp32", "maple_esp32")]: command = [ "make", "all", "FAMILY=" + family, "BOARD=" + board ] subprocess.check_call(command, cwd="examples/arduino/blink")
5,344,277
def VerifyLatestAFDOFile(afdo_release_spec, buildroot, gs_context): """Verify that the latest AFDO profile for a release is suitable. Find the latest AFDO profile file for a particular release and check that it is not too stale. The latest AFDO profile name for a release can be found in a file in GS under the name latest-chrome-<arch>-<release>.afdo. Args: afdo_release_spec: architecture and release to find the latest AFDO profile for. buildroot: buildroot where AFDO data should be stored. gs_context: GS context to retrieve data. Returns: The name of the AFDO profile file if a suitable one was found. None otherwise. """ latest_afdo_url = LATEST_CHROME_AFDO_URL % afdo_release_spec # Check if latest-chrome-<arch>-<release>.afdo exists. latest_detail = None if gs_context.Exists(latest_afdo_url): latest_detail = gs_context.LSWithDetails(latest_afdo_url) if not latest_detail: cros_build_lib.Info('Could not find latest AFDO info file %s' % latest_afdo_url) return None # Verify the AFDO profile file is not too stale. mod_date = latest_detail[0][2] curr_date = datetime.datetime.now() allowed_stale_days = datetime.timedelta(days=AFDO_ALLOWED_STALE) if (curr_date - mod_date) > allowed_stale_days: cros_build_lib.Info('Found latest AFDO info file %s but it is too old' % latest_afdo_url) return None # Then get the name of the latest valid AFDO profile file. local_dir = AFDO_BUILDROOT_LOCAL % {'build_root': buildroot } latest_afdo_file = LATEST_CHROME_AFDO_FILE % afdo_release_spec latest_afdo_path = os.path.join(local_dir, latest_afdo_file) gs_context.Copy(latest_afdo_url, latest_afdo_path) return osutils.ReadFile(latest_afdo_path).strip()
5,344,278
def cluster(self, net_cvg, net_boxes): """ Read output of inference and turn into Bounding Boxes """ batch_size = net_cvg.shape[0] boxes = np.zeros([batch_size, MAX_BOXES, 5]) for i in range(batch_size): cur_cvg = net_cvg[i] cur_boxes = net_boxes[i] if (self.is_groundtruth): # Gather proposals that pass a threshold - propose_boxes, propose_cvgs, mask = gridbox_to_boxes( cur_cvg, cur_boxes, self) # Remove duplicates from ground truth new_array = list({tuple(row) for row in propose_boxes}) boxes_cur_image = np.asarray(new_array, dtype=np.float16) else: # Gather proposals that pass a threshold - propose_boxes, propose_cvgs, mask = gridbox_to_boxes(cur_cvg, cur_boxes, self) # Vote across the proposals to get bboxes boxes_cur_image = vote_boxes(propose_boxes, propose_cvgs, mask, self) boxes_cur_image = np.asarray(boxes_cur_image, dtype=np.float16) if (boxes_cur_image.shape[0] != 0): [r, c] = boxes_cur_image.shape boxes[i, 0:r, 0:c] = boxes_cur_image return boxes
5,344,279
def _disable_autopx(self): """Disable %autopx by restoring the original runsource.""" if hasattr(self, 'autopx'): if self.autopx == True: self.runsource = self._original_runsource self.autopx = False print "Auto Parallel Disabled"
5,344,280
def _getMissingResidues(lines): """Returns the missing residues, if applicable.""" try: missing_residues = [] for i, line in lines['REMARK 465']: if len(line.split()) == 5 and int(line.split()[4]) > 0: missing_residues.append("{0:<3s} {1}{2:>4d}".format(line.split()[2], line.split()[3], int(line.split()[4]))) return missing_residues except: return "no missing residue information"
5,344,281
def test_changing_font_size(): """Test that the font_size property properly scales DecimalNumber.""" num = DecimalNumber(0, font_size=12) num.font_size = 48 assert num.height == DecimalNumber(0, font_size=48).height
5,344,282
def register_single_sampler(name): """ A decorator with a parameter. This decorator returns a function which the class is passed. """ name = name.lower() def _register(sampler): if name in _registered_single_sampler: raise ValueError("Name {} already chosen, choose a different name.".format(name)) _registered_single_sampler[name] = sampler return sampler return _register
5,344,283
def export_txt(obj, file_name, two_dimensional=False, **kwargs): """ Exports control points as a text file. For curves the output is always a list of control points. For surfaces, it is possible to generate a 2-D control point output file using ``two_dimensional`` flag. Please see the supported file formats for more details on the text file format. Please see :py:func:`.exchange.import_txt()` for detailed description of the keyword arguments. :param obj: a curve or a surface object :type obj: abstract.Curve, abstract.Surface :param file_name: file name of the text file to be saved :type file_name: str :param two_dimensional: type of the text file (only works for Surface objects) :type two_dimensional: bool :raises IOError: an error occurred writing the file """ # Check if the user has set any control points if obj.ctrlpts is None or len(obj.ctrlpts) == 0: raise ValueError("There are no control points to save!") # Check the usage of two_dimensional flag if isinstance(obj, abstract.Curve) and two_dimensional: # Silently ignore two_dimensional flag two_dimensional = False # File delimiters col_sep = kwargs.get('col_separator', ";") sep = kwargs.get('separator', ",") content = exch.export_text_data(obj, sep, col_sep, two_dimensional) return exch.write_file(file_name, content)
5,344,284
def add_permissions_to_file(filename, add_stat): """ Adds file permission on a existing file path """ st = os.stat(filename) try: os.chmod(filename, st.st_mode | add_stat) except Exception as e: logging.error("Path '%s' stat is %s", filename, st) raise e
5,344,285
def compile(expr): """ Force compilation of expression for the Postgres target """ from .client import PostgresDialect from ibis.sql.alchemy import to_sqlalchemy return to_sqlalchemy(expr, dialect=PostgresDialect)
5,344,286
def build_bar_chart(x_axis_name, request, **kwargs): """This abstract function is used to call submethods/specific model""" base_query = request.GET.get("base_query", None) bar_chart_input = [] if base_query == 'group_users': bar_chart_input = group_users_per_column(x_axis_name) elif base_query == 'group_job_user': user_id = request.GET.get("user_id", None) bar_chart_input = user_jobs_groups(x_axis_name, user_id) elif base_query == 'popular_skills_market': limit_skills = int(request.GET.get("limit_skills", 10)) asc = request.GET.get("asc", "False") bar_chart_input = popular_skills(asc, limit_skills) elif base_query == 'popular_courses_market': limit_skills = int(request.GET.get("limit_courses", 10)) asc = request.GET.get("asc", "False") bar_chart_input = popular_courses(asc, limit_skills) elif base_query == 'popular_skills_users': limit_skills = int(request.GET.get("limit_skills", 10)) asc = request.GET.get("asc", "False") bar_chart_input = popular_user_skills(asc, limit_skills) elif base_query == 'popular_courses_users': limit_skills = int(request.GET.get("limit_courses", 10)) asc = request.GET.get("asc", "False") bar_chart_input = popular_user_courses(asc, limit_skills) elif base_query == 'group_course_professor': limit_professors = int(request.GET.get("limit_professors", 10)) asc_ordering = request.GET.get("asc", "False") bar_chart_input = group_courses_users(limit_professors, asc_ordering) elif base_query == 'salary_info': y_column = request.GET.get('y_column', None) y_var_names = request.GET.getlist("y_var_names[]", []) agg = request.GET.get('agg', 'mean') if y_column and y_var_names: bar_chart_input = salary_information(data=y_var_names, y_column=y_column, aggregation=agg) print(bar_chart_input) else: bar_chart_input = salary_information(aggregation=agg) elif base_query == 'skill_demand_per_column': limit_results = int(request.GET.get("limit_results", 10)) asc_ordering = request.GET.get("asc", "False") y_var_names = request.GET.getlist("y_var_names[]", []) column = request.GET.get("x_axis_name", "specialization") bar_chart_input = skill_demand_per_column(asc_ordering, y_var_names, limit_results, column) elif base_query == 'user_grades': user_id = request.GET.get('user_id', None) bar_chart_input = user_grades(user_id) elif base_query == 'courses_avg_grades': courses = request.GET.getlist('courses[]', []) print(courses) if courses: bar_chart_input = get_avg_course_names(courses) return bar_chart_input
5,344,287
def diff_hours(t1,t2): """ Number of hours between two dates """ return (t2-t1).days*hours_per_day + (t2-t1).seconds/seconds_per_hour
5,344,288
def fatorial(num=1, show=False): """ Calcula o fatorial de um número: :param n: O número a ser calculado. :param show: (opcional) Mostrar ou não os cálculos. :return: O valor do fatorial. """ f = 1 c = num if show==True: while c > 0: print(c, end='') print(' x ' if c > 1 else ' = ' f'{f}', end='') f *= c c -= 1 return f if show==False: while c > 0: f *= c c -= 1 return f
5,344,289
def get_season(msg, info_fields): """find season in message""" seasonDICT = {'2016':['二零一六球季', '二零一六賽季', '2016球季', '2016賽季', '2016年', '2016'], '2017':['二零一七球季', '二零一七賽季', '2017球季', '2017賽季', '2017年', '2017'], '2018':['二零一八球季', '二零一八賽季', '2018球季', '2018賽季', '2018年', '2018'], '2019':['二零一九球季', '二零一九賽季', '2019球季', '2019賽季', '2019年', '2019'], '2020':['二零二零球季', '二零二零賽季', '2020球季', '2020賽季', '2020年', '2020']} for season_key in seasonDICT.keys(): for year in seasonDICT[season_key]: if year in msg: info_fields['season'] = season_key msg = msg.replace(year, '').strip() return msg, info_fields return msg, info_fields
5,344,290
def gray_img(img:'numpy.ndarray'): """ 对读取的图像进行灰度化处理 :param img: 通过cv2.imread(imgPath)读取的图像数组对象 :return: 灰度化的图像 """ grayImage=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) return grayImage pass
5,344,291
def draw_circles(window, points, radius, color): """ What comes in: -- An rg.RoseWindow -- A sequence of rg.Point objects -- A positive number -- A string that can be used as a RoseGraphics color What goes out: Nothing (i.e., None). Side effects: See draw_circles_picture.pdf in this project for pictures that may help you better understand the following specification: For each point in the given sequence of rg.Points, constructs and draws an rg.Circle centered at that point, with the given radius and fill color, on the given rg.RoseWindow. Renders but does NOT close the given rg.RoseWindow. Type hints: :type window: rg.RoseWindow :type points: [rg.Point] :type radius: int | float :type color: str """ # ------------------------------------------------------------------------- # DONE: 6. Implement and test this function. # Tests have been written for you (above). # ------------------------------------------------------------------------- for k in range(len(points)): circle = rg.Circle(points[k], radius) circle.fill_color = color circle.attach_to(window) window.render()
5,344,292
def main(): """ウインドウの表示 すでに表示されていた場合は閉じるだけ """ global window window = NN_ToolWindow() if pm.window(window.window, exists=True): pm.deleteUI(window.window, window=True) else: window.create()
5,344,293
def class_acc(label_threshold_less): """ Wrapper function to return keras accuracy logger Args: label_threshold_less (int): all label IDs strictly less than this number will be ignored in class accuracy calculations Returns: argument_candidate_acc (function) """ def argument_candidate_acc(y_true, y_pred): """ Function which returns argument candidate accuracy using the Keras backend Args: y_true (np.ndarray): true labels y_pred (np.ndarray): predicted labels Returns: class_accuracy (int): simple accuracy of argument candidates """ class_id_true = K.cast(y_true, 'int64') class_id_preds = K.argmax(y_pred, axis=-1) accuracy_mask = K.cast(K.less(class_id_preds, label_threshold_less), 'float32') accuracy_mask = 1 - accuracy_mask class_acc_tensor = ( K.cast(K.equal(class_id_true, class_id_preds), 'float32') * accuracy_mask) class_accuracy = (K.sum(class_acc_tensor) / K.maximum(K.sum(accuracy_mask), 1)) return class_accuracy return argument_candidate_acc
5,344,294
def left_turn(degree: float): """ Turn turtle left (counter-clockwise) by \"degree\" degree. :param degree: the degree to turn """ _check_turtle() _turtle.left_turn(degree)
5,344,295
def write_wordsearch_svg(filename, grid, wordlist): """Save the wordsearch grid as an SVG file to filename.""" width, height = 1000, 1414 with open(filename, 'w') as fo: svg_preamble(fo, width, height) y0, svg_grid = grid_as_svg(grid, width, height) print(svg_grid, file=fo) # If there's room print the word list. if y0 + 25 * len(wordlist) // 2 < height: print(wordlist_svg(wordlist, width, height, y0), file=fo) print('</svg>', file=fo)
5,344,296
def reduce_30Hz(meas_run_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, scan_index=1, template_reference=None): """ Perform 30Hz reduction @param meas_run_30Hz: run number of the data we want to reduce @param ref_run_30Hz: run number of the reference data, take with the same config @param ref_data_60Hz: file path of the reduce data file at 60Hz @param template_30Hz: file path of the template file for 30Hz @param scan_index: scan index to use within the template. """ # Load the template template_data = read_template(template_30Hz, scan_index) # Reduce the quartz at 30Hz ref_ws_30Hz = api.LoadEventNexus("REF_L_%s" % ref_run_30Hz) # Reduce the sample data at 30Hz meas_ws_30Hz = api.LoadEventNexus("REF_L_%s" % meas_run_30Hz) # Load the 60Hz reference data data_60Hz = np.loadtxt(ref_data_60Hz).T return reduce_30Hz_from_ws(meas_ws_30Hz, ref_ws_30Hz, data_60Hz, template_data, scan_index=scan_index, template_reference=template_reference)
5,344,297
def input_bed_message(driver): """输入床铺信息""" driver.find_element_class_name_and_click('床铺信息') driver.find_element_class_name_and_click('添加床铺') driver.find_element_id_and_click_wait(driver.ele.FBXT_tv_title_publish_lu_bed_add_item) driver.find_element_id_and_click_wait(driver.ele.FBXT_btn_submit_publish_lu_bed_add) driver.find_element_id_and_click_wait(driver.ele.actionbarwidget_back)
5,344,298
def test_sgts_in_database(arg): """Whitelisted SGTs must have Dashboard and ISE IDs in the DB; Default SGTs must have ISE IDs in the DB""" success = True default_vals = [d['value'] for d in sync._config.ise_default_sgts] sgts = Tag.objects.order_by("tag_number") for s in sgts: ds = s.tagdata_set.all() if s.tag_number in sync._config.whitelisted_sgts: for d in ds: if d.source_id is None or d.source_id == "": success = False print("1 (FAIL) :", model_to_dict(s)) else: print("1 (SUCCESS) :", model_to_dict(s)) if s.tag_number in default_vals: for d in ds: if d.iseserver and (d.source_id is None or d.source_id == ""): success = False print("2 (FAIL) :", model_to_dict(s)) else: print("2 (SUCCESS) :", model_to_dict(s)) if len(sgts) != len(sync._config.whitelisted_sgts + sync._config.ise_default_sgts + sync._config.meraki_default_sgts): success = False print("3 (FAIL) : ", sgts, (sync._config.whitelisted_sgts + sync._config.ise_default_sgts + sync._config.meraki_default_sgts)) else: print("3 (SUCCESS) : ", sgts, (sync._config.whitelisted_sgts + sync._config.ise_default_sgts + sync._config.meraki_default_sgts)) assert success
5,344,299