content
stringlengths
22
815k
id
int64
0
4.91M
def Str(*args): """(s1, s2, ...) -> match s1 or s2 or ...""" if len(args) == 1: return Str1(args[0]) return Expression.Alt(tuple(map(Str, args)))
10,400
def image_noise_gaussian(image): """ Adds Gaussian noise to the provided image """ float_img = image.astype(np.float) gauss = np.random.normal(0.0, 4.0, (IMG_SIZE, IMG_SIZE, IMG_CHANNELS)) gauss = gauss.reshape(IMG_SIZE, IMG_SIZE, IMG_CHANNELS).astype(np.float) result = float_img + gauss result = np.clip(result, 0, 255) result = result.astype(np.uint8) return result
10,401
def argsort(x: T.FloatTensor, axis: int = None) -> T.LongTensor: """ Get the indices of a sorted tensor. If axis=None this flattens x. Args: x: A tensor: axis: The axis of interest. Returns: tensor (of ints): indices of sorted tensor """ if axis is None: return flatten(x).sort()[1] else: return x.sort(dim=axis)[1]
10,402
def dirPickledSize(obj,exclude=[]): """For each attribute of obj (excluding those specified and those that start with '__'), compute the size using getPickledSize(obj) and return as a pandas Series of KBs""" return pd.Series({o:getPickledSize(getattr(obj, o))/1024. for o in dir(obj) if not np.any([o[:2]=='__', o in exclude, getattr(obj, o) is None])})
10,403
def audit_log() -> Any: """ List all events related to the connected member. """ if "member_id" not in session: abort(404) return render_template( "audit_log.html", full_audit_log=fetch_audit_log(session["member_id"]), )
10,404
def load_comparisonXL(method, evaluate="train", dropna=True): """Load comparison table.""" if evaluate == "test": e = "['Test']" elif evaluate == "in bag": e = "['In Bag']" elif evaluate == "out of bag": e = "['Out of Bag']" else: e = "['Train']" # Import methods table = [] for i in method: table.append(pd.read_excel(i + ".xlsx")) # Concatenate table df = pd.DataFrame() for i in range(len(table)): df = pd.concat([df, table[i].loc[table[i]['evaluate'] == e].T.squeeze()], axis=1, sort=False) df = df.T.drop(columns="evaluate") # Remove [ ] from string for i in range(len(df)): for j in range(len(df.T)): if type(df.iloc[i, j]) is str: df.iloc[i, j] = df.iloc[i, j][2: -2] # Reset index and add methods column method_name = [] for i in range(len(method)): name_i = method[i].rsplit('/', 1)[1] method_name.append(name_i) df = df.reset_index() df = pd.concat([pd.Series(method_name, name="method"), df], axis=1, sort=False) df = df.drop("index", 1) #df = df.set_index("method") # drop columns with just nans if dropna is True: df = df.dropna(axis=1, how='all') return df
10,405
def display_countries(): """ Display all countries in Yahoo Finance data. [Source: Finance Database] """ for country in financedatabase_model.get_countries(): print(country)
10,406
def verify_user_password(user: User, password: str) -> bool: """Verify User's password with the one that was given on login page.""" return pwd_context.verify(password, user.password)
10,407
def __valid_ddb_response_q(response): """private function to validate a given DynamoDB query response.""" if 'ResponseMetadata' in response: if 'HTTPStatusCode' in response['ResponseMetadata']: if response['ResponseMetadata']['HTTPStatusCode'] == 200: return True return False
10,408
def GetPipelineResultsPathInGCS(artifacts_path): """Gets a full Cloud Storage path to a pipeline results YAML file. Args: artifacts_path: string, the full Cloud Storage path to the folder containing pipeline artifacts, e.g. 'gs://my-bucket/artifacts'. Returns: A string representing the full Cloud Storage path to the pipeline results YAML file. """ return '{0}/results/results.yaml'.format(artifacts_path)
10,409
def Pnm_p(n, m, x): """Eq:II.77 """ return lpmn(m, n, x)[1][-1, -1]
10,410
def _remove_empty_subspace(subspaces, n_clusters, m, P, centers, labels, scatter_matrices): """ Check if after rotation and rearranging the dimensionalities a empty subspaces occurs. Empty subspaces will be removed for the next iteration. Therefore all necessary lists will be updated. :param subspaces: number of subspaces :param n_clusters: :param m: list containing number of dimensionalities for each subspace :param P: list containing projections for each subspace :param centers: list containing the cluster centers for each subspace :param labels: list containing cluster assignments for each subspace :param scatter_matrices: list containing scatter matrices for each subspace :return: subspaces, n_clusters, m, P, centers, labels, scatter_matrices """ if 0 in m: np_m = np.array(m) empty_spaces = np.where(np_m == 0)[0] print( "[NrKmeans] ATTENTION:\nSubspaces were lost! Number of lost subspaces:\n" + str( len(empty_spaces)) + " out of " + str( len(m))) subspaces -= len(empty_spaces) n_clusters = [x for i, x in enumerate( n_clusters) if i not in empty_spaces] m = [x for i, x in enumerate(m) if i not in empty_spaces] P = [x for i, x in enumerate(P) if i not in empty_spaces] centers = [x for i, x in enumerate(centers) if i not in empty_spaces] labels = [x for i, x in enumerate(labels) if i not in empty_spaces] scatter_matrices = [x for i, x in enumerate( scatter_matrices) if i not in empty_spaces] return subspaces, n_clusters, m, P, centers, labels, scatter_matrices
10,411
def _VerifyExtensionHandle(message, extension_handle): """Verify that the given extension handle is valid.""" if not isinstance(extension_handle, FieldDescriptor): raise KeyError('HasExtension() expects an extension handle, got: %s' % extension_handle) if not extension_handle.is_extension: raise KeyError('"%s" is not an extension.' % extension_handle.full_name) if not extension_handle.containing_type: raise KeyError('"%s" is missing a containing_type.' % extension_handle.full_name) if extension_handle.containing_type is not message.DESCRIPTOR: raise KeyError('Extension "%s" extends message type "%s", but this ' 'message is of type "%s".' % (extension_handle.full_name, extension_handle.containing_type.full_name, message.DESCRIPTOR.full_name))
10,412
def main(): """Evaluate model performance """ # construct the argument parse and parse the arguments args = argparse.ArgumentParser() args.add_argument("-i", "--input", required=True, help="path to input directory of images") args.add_argument("-m", "--model", required=True, help="path to input model") args = vars(args.parse_args()) # load the pre-trained network print("[INFO] loading pre-trained network...") model = load_model(args["model"]) # randomy sample a few of the input images image_paths = list(paths.list_images(args["input"])) image_paths = np.random.choice(image_paths, size=(10,), replace=False) # loop over the image paths for image_path in image_paths: # load the image and convert it to grayscale, then pad the image to ensure digits # caught only the border of the image are retained image = cv2.imread(image_path) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.copyMakeBorder(gray, 20, 20, 20, 20, cv2.BORDER_REPLICATE) # threshold the image to reveal the digits thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] # find contours in the image, keeping only the four largest ones, # then sort them from left-to-right cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:4] cnts = contours.sort_contours(cnts)[0] # initialize the output image as a "grayscale" image with 3 # channels along with the output predictions output = cv2.merge([gray] * 3) predictions = [] # loop over the contours for contour in cnts: # compute the bounding box for the contour then extract the digit (x, y, w, h) = cv2.boundingRect(contour) roi = gray[y - 5 : y + h + 5, x - 5 : x + w + 5] # pre-process the ROI and classify it then classify it roi = preprocess(roi, 28, 28) roi = np.expand_dims(img_to_array(roi), axis=0) / 255.0 pred = model.predict(roi).argmax(axis=1)[0] + 1 predictions.append(str(pred)) # draw the prediction on the output image cv2.rectangle(output, (x - 2, y - 2), (x + w + 4, y + h + 4), (0, 255, 0), 1) cv2.putText(output, str(pred), (x - 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2) # show the output image print("[INFO] captcha: {}".format("".join(predictions))) cv2.imshow("Output", output) cv2.waitKey()
10,413
def main(): """ Main function """ concrete_utim = None cm1 = None session_key = None logging.info("INIT rxQ") rx_queue = queue.Queue() logging.info("INIT txQ") tx_queue = queue.Queue() try: cm1 = ConnectivityManager() cm1.connect(dl_type=DataLinkManager.TYPE_QUEUE, rx=tx_queue, tx=rx_queue) concrete_utim = Utim() concrete_utim.connect(dl_type=DataLinkManager.TYPE_QUEUE, rx=rx_queue, tx=tx_queue) concrete_utim.run() data1 = [TopDataType.DEVICE, Tag.INBOUND.NETWORK_READY] print("cm1 - send {0}".format(data1)) cm1.send(data1) while True: data = cm1.receive() if data: print("RECEIVED DATA: {0}".format(data)) session_key = data[1] concrete_utim.stop() break print("RECEIVED SESSION KEY: {key}".format(key=session_key)) except ConnectivityConnectError: logging.error("Connectivity error") print("Connectivity error") except UtimConnectionException as er: logging.error(er) except UtimInitializationError as er: logging.error(er) print('Invalid UTIM_MASTER_KEY') except (KeyboardInterrupt, EOFError): logging.info('Program interrupted') print('Program interrupted') finally: if concrete_utim: concrete_utim.stop() if cm1: cm1.stop()
10,414
def _get_citekeys_action(elem, doc): """ Panflute action to extract citationId from all Citations in the AST. """ if not isinstance(elem, pf.Citation): return None manuscript_citekeys = global_variables["manuscript_citekeys"] manuscript_citekeys.append(elem.id) return None
10,415
def average_gradients(tower_grads): """Calculate the average gradient for each shared variable across all towers. Note that this function provides a synchronization point across all towers. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns: List of pairs of (gradient, variable) where the gradient has been averaged across all towers. """ average_grads = [] for grad_and_vars in zip(*tower_grads): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [] for each_grad, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(each_grad, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension. grad = tf.concat(axis=0,values=grads) grad = tf.reduce_mean(grad, 0) # The variables are redundant because they are shared # across towers. So we will just return the first tower's pointer to # the Variable. weights = grad_and_vars[0][1] grad_and_var = (grad, weights) average_grads.append(grad_and_var) return average_grads
10,416
def compute_neighbours_probability_matrix(n_matrix, src, d_matrix, sigma_neigh): """Compute neighbours' probability matrix. Parameters ----------- n_matrix : :py:class:`~numpy.ndarray` of :py:class:`~int`, shape (n_verts, n_neigh_max) The sets of neighbours. src : :py:class:`~numpy.ndarray` of :py:class:`~float`, shape (n_verts, 3) The coordinates of the points in the brain discretization. d_matrix : :py:class:`~numpy.ndarray` of :py:class:`~float`, shape (n_verts x n_verts) The Euclidean distance between the points in the brain discretization. sigma_neigh : :py:class:`~float` The standard deviation of the Gaussian distribution that defines the neighbours' probability. Returns -------- np_matrix : :py:class:`~numpy.ndarray` of :py:class:`~float`, shape (n_verts, n_neigh_max) The neighbours' probability. """ np_matrix = np.zeros(n_matrix.shape, dtype=float) for i in range(src.shape[0]): n_neig = len(np.where(n_matrix[i] > -1)[0]) np_matrix[i, 0:n_neig] = \ np.exp(-d_matrix[i, n_matrix[i, 0:n_neig]] ** 2 / (2 * sigma_neigh ** 2)) np_matrix[i] = np_matrix[i] / np.sum(np_matrix[i]) return np_matrix
10,417
def ar_cosmap(inmap): """ Get the cosine map and off-limb pixel map using WCS. Generate a map of the solar disk that is 1 at disk center and goes radially outward as the cos(angle to LOS), which is = 2 at 60 degrees from LOS. Other outputs: - rrdeg: gives degrees from disk center - offlimb: map of 1=on-disk and 0=off-disk """ ## Take off an extra half percent from the disk to get rid of limb effects fudge=0.999 # ## Get helioprojective_coordinates # Below is deprecated so commented out and updated # xx, yy = wcs.convert_pixel_to_data(inmap.data.shape, # [inmap.meta["CDELT1"], inmap.meta["CDELT2"]], # [inmap.meta["CRPIX1"], inmap.meta["CRPIX2"]], # [inmap.meta["CRVAL1"], inmap.meta["CRVAL2"]]) x, y = (np.meshgrid(*[np.arange(v.value) for v in inmap.dimensions]) * u.pixel) hpc = inmap.pixel_to_world(x, y)#NEED TO CHECK RE WHAT ORIGIN TO USE, origin=1) xx = hpc.Tx.value yy = hpc.Ty.value rr = ((xx**2.) + (yy**2.))**(0.5) # coscor = np.copy(rr) rrdeg = np.arcsin(coscor / inmap.meta["RSUN_OBS"]) coscor = 1. / np.cos(rrdeg) wgt = np.where(rr > (inmap.meta["RSUN_OBS"]*fudge)) coscor[wgt] = 1. # offlimb = np.copy(rr) wgtrr = np.where(rr >= (inmap.meta["RSUN_OBS"]*fudge)) offlimb[wgtrr] = 0. wltrr = np.where(rr < (inmap.meta["RSUN_OBS"]*fudge)) offlimb[wltrr] = 1. # return coscor, rrdeg, offlimb
10,418
def as_array(request: SubRequest) -> bool: """ Boolean fixture to support ExtensionDtype _from_sequence method testing. """ b = request.param assert isinstance(b, bool) return b
10,419
def test_day_generation_empty(): """Test that empty day generation is as expected""" cal = models.Calendar() cal.title = "Test" cal.start_date = date(2021, 1, 4) cal.end_date = date(2021, 1, 15) cal.monday = True cal.tuesday = True cal.wednesday = True cal.thursday = True cal.friday = True cal.saturday = False cal.sunday = False cal.save() expected = { date(2021, 1, 4): None, date(2021, 1, 5): None, date(2021, 1, 6): None, date(2021, 1, 7): None, date(2021, 1, 8): None, date(2021, 1, 11): None, date(2021, 1, 12): None, date(2021, 1, 13): None, date(2021, 1, 14): None, date(2021, 1, 15): None, } actual = cal.get_date_letter_map() assert actual == expected
10,420
def gram_matrix(x): """Create the gram matrix of x.""" b, c, h, w = x.shape phi = x.view(b, c, h * w) return phi.bmm(phi.transpose(1, 2)) / (c * h * w)
10,421
def create_presentation(path): """Creates ppt report from files in the specified folder. """ import os import pandas as pd from datetime import date from pptx import Presentation from pptx.util import Inches, Pt report = Presentation() #report = Presentation('test_data//templates//ppt_template.pptx') #pic = slide.shapes.add_picture('hts_data//templates//company_logo.png', left = Inches(3), top = Inches(0.2)) slide = report.slides.add_slide(report.slide_layouts[6]) subtitle = slide.shapes.add_textbox(left = Inches(5.), top = Inches(3.5), width = Inches(3), height = Inches(0.5),).text_frame p = subtitle.paragraphs[0] run = p.add_run() run.text = 'Technical Report\nGenerated on {:%m-%d-%Y}'.format(date.today()) font = run.font font.size = Pt(18) files_list = os.listdir(path) for myfile in files_list: if 'heatmap.png' in myfile: slide = report.slides.add_slide(report.slide_layouts[6]) left = top = Inches(0.7) height = Inches(6) pic = slide.shapes.add_picture(path + '//' + myfile, left, top, width = Inches(5.8), height= Inches(4)) elif '.png' in myfile and 'heatmap.png' not in myfile: slide = report.slides.add_slide(report.slide_layouts[6]) subtitle = slide.shapes.add_textbox(left = Inches(0.5), top = Inches(0.3), width = Inches(2), height = Inches(0.5)).text_frame subtitle.text = myfile left = top = Inches(0.7) pic = slide.shapes.add_picture(path +'//' + myfile, left, top = Inches(0.8), height= Inches(6)) left = Inches(0.7) elif 'csv' in myfile: try: table = pd.read_csv(path +'//' + myfile) if table.shape[0]<30: slide = report.slides.add_slide(report.slide_layouts[6]) subtitle = slide.shapes.add_textbox(left = Inches(0.5), top = Inches(0.3), width = Inches(2), height = Inches(0.5)).text_frame subtitle.text = myfile slide_table = df_to_table(table, slide, left = Inches(0.3), top = Inches(1), width = Inches(12.5), height = Inches(0.3)) left = Inches(0.7) except Exception as e: print(e) return report
10,422
def flake8_package(): """Style only checks files that have been modified. This fixture makes a small change to the ``flake8`` mock package, yields the filename, then undoes the change on cleanup. """ repo = spack.repo.Repo(spack.paths.mock_packages_path) filename = repo.filename_for_package_name("flake8") tmp = filename + ".tmp" try: shutil.copy(filename, tmp) package = FileFilter(filename) package.filter("state = 'unmodified'", "state = 'modified'", string=True) yield filename finally: shutil.move(tmp, filename)
10,423
async def overview(ctx): """Describe the rules of the dungeon""" rules = """ I am Alaveus. For the meager price of 10 chalupas, I can transport you deep into the heart of a mythic dungeon where treasure and glory await those who dare enter! """ await ctx.send(rules)
10,424
def targz_pack(tgz_name: Path, source_path: Path): """ Create a new .tar.gz from the specified folder Examples: history/current -> history/current.tar.gz history/generated/current -> history/generated/current.tar.gz """ with tarfile.open(tgz_name, "w:gz") as tar: tar.add(source_path, arcname=source_path.name)
10,425
def publish_engine_py(dirs): """ Publish the Python RESTler engine as .py files. Will also do a quick compilation of the files to verify that no exception occurs """ # Copy files to a build directory to test for basic compilation failure print("Testing compilation of Python files...") try: copy_python_files(dirs.repository_root_dir, dirs.engine_build_dir) output = subprocess.run(f'{dirs.python_path} -m compileall {dirs.engine_build_dir}', shell=True, capture_output=True) if output.stderr: print("Build failed!") print(output.stderr) sys.exit(-1) stdout = str(output.stdout) errors = get_compilation_errors(stdout) if errors: for err in errors: print("\nError found!\n") print(err.replace('\\r\\n', '\r\n')) print("Build failed!") sys.exit(-1) finally: print("Removing compilation build directory...") shutil.rmtree(dirs.engine_build_dir) # Copy files to drop copy_python_files(dirs.repository_root_dir, dirs.engine_dest_dir)
10,426
async def async_setup_sdm_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Set up the client entities.""" device_manager: DeviceManager = hass.data[DOMAIN][DATA_DEVICE_MANAGER] entities = [] for device in device_manager.devices.values(): if ThermostatHvacTrait.NAME in device.traits: entities.append(ThermostatEntity(device)) async_add_entities(entities)
10,427
def wall_clock_time_fedavg_vs_fedfs() -> None: """Comparision of FedAvg vs FedFS.""" bar_chart( y_values=[ np.array( [ RESULTS_WALL_CLOCK_TIME["fedavg-14"], RESULTS_WALL_CLOCK_TIME["fedavg-16"], ] ), np.array( [ RESULTS_WALL_CLOCK_TIME["fedfs-t14"], RESULTS_WALL_CLOCK_TIME["fedfs-16"], ] ), ], bar_labels=["FedAvg", "FedFS"], x_label="Timeout", x_tick_labels=["T=14", "T=16"], y_label="Completion time", filename="fmnist-time_fedavg_vs_fedfs", )
10,428
def assign(ctx): """Assign spatial objects to brain regions""" pass
10,429
def connect_db(app): """Connect to our database""" db.app = app db.init_app(app)
10,430
def open_file(path, mode): """ Attempts to open file at path. Tried up to max_attempts times because of intermittent permission errors on Windows """ max_attempts = 100 f = None for _ in range(max_attempts): try: f = open(path, mode) except PermissionError: continue break return f
10,431
def get_sts_token(current_refresh_token): """ Retrieves an authentication token. :param current_refresh_token: Refresh token retrieved from a previous authentication, used to retrieve a subsequent access token. If not provided (i.e. on the initial authentication), the password is used. """ url = 'https://{}:{}/{}'.format(opts.authHostname, opts.authPort, auth_path) if not current_refresh_token: # First time through, send password data = {'username': opts.user, 'password': opts.password, 'grant_type': 'password', 'takeExclusiveSignOnControl': True, 'scope': scope} print("Sending authentication request with password to ", url, "...") else: # Use the given refresh token data = {'username': opts.user, 'refresh_token': current_refresh_token, 'grant_type': 'refresh_token', 'takeExclusiveSignOnControl': True} print("Sending authentication request with refresh token to ", url, "...") try: r = requests.post(url, headers={'Accept': 'application/json'}, data=data, auth=(opts.clientid, client_secret), verify=True) except requests.exceptions.RequestException as e: print('RDP-GW authentication exception failure:', e) return None, None, None if r.status_code != 200: print('RDP-GW authentication result failure:', r.status_code, r.reason) print('Text:', r.text) if r.status_code in [401,400] and current_refresh_token: # Refresh token may have expired. Try again using machinedID + password. return get_sts_token(None) return None, None, None auth_json = r.json() print("RDP-GW Authentication succeeded. RECEIVED:") print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':'))) return auth_json['access_token'], auth_json['refresh_token'], auth_json['expires_in']
10,432
async def device_climate_fan(device_climate_mock): """Test thermostat with fan device.""" return await device_climate_mock(CLIMATE_FAN)
10,433
def make_training_config(args): """ Create training config by parsing args from command line and YAML config file, filling the rest with default values. Args args : Arguments parsed from command line. Returns config : Dictionary containing training configuration. """ # Parse the configuration file. config = {} if args.config: config = parse_yaml(args.config) config = set_defaults(config, default_training_config) # Additional config; start from this so it can be overwritten by the other command line options. if args.o: config = parse_additional_options(config, args.o) if args.backbone: config['backbone']['name'] = args.backbone if args.generator: config['generator']['name'] = args.generator # Backbone config. if args.freeze_backbone: config['backbone']['details']['freeze'] = args.freeze_backbone if args.backbone_weights: config['backbone']['details']['weights'] = args.backbone_weights # Generator config. if args.random_transform: config['generator']['details']['transform_generator'] = 'random' if args.random_visual_effect: config['generator']['details']['visual_effect_generator'] = 'random' if args.batch_size: config['generator']['details']['batch_size'] = args.batch_size if args.group_method: config['generator']['details']['group_method'] = args.group_method if args.shuffle_groups: config['generator']['details']['shuffle_groups'] = args.shuffle_groups if args.image_min_side: config['generator']['details']['image_min_side'] = args.image_min_side if args.image_max_side: config['generator']['details']['image_max_side'] = args.image_max_side # Train config. if args.gpu: config['train']['gpu'] = args.gpu if args.epochs: config['train']['epochs'] = args.epochs if args.steps: config['train']['steps_per_epoch'] = args.steps if args.lr: config['train']['lr'] = args.lr if args.multiprocessing: config['train']['use_multiprocessing'] = args.multiprocessing if args.workers: config['train']['workers'] = args.workers if args.max_queue_size: config['train']['max_queue_size'] = args.max_queue_size if args.weights: config['train']['weights'] = args.weights return config
10,434
def main(): """Main""" # Create cache dir if it does not exist cachedir = '%s/cache' % os.path.dirname(os.path.realpath(__file__)) if not os.path.exists(cachedir): os.makedirs(cachedir) # Check if MANAGED_INSTALL_REPORT exists if not os.path.exists(MANAGED_INSTALL_REPORT): print '%s is missing.' % MANAGED_INSTALL_REPORT install_report = {} else: install_report = dict_from_plist(MANAGED_INSTALL_REPORT) # Collect Errors, Warnings (as JSON?) # EndTime, StartTime, ManifestName, (Conditions->catalogs?) # ManagedInstallVersion # Some statistics # pylint: disable=E1103 report_list = {} items = ['EndTime', 'StartTime', 'ManifestName', 'ManagedInstallVersion', 'Errors', 'Warnings', 'RunType'] for item in items: if install_report.get(item): report_list[item] = install_report[item] # pylint: enable=E1103 if DEBUG: PP.pprint(report_list) # Write report to cache plistlib.writePlist(report_list, "%s/munkireport.plist" % cachedir)
10,435
def main(): """Generates a results file with the entities names """ #process_results_individual('corpora/go_phenotype_xml_100/', 'results/model_ontologies_go_phenotype_results_100.txt', # 'results/go_phenotype_100_relations_names.tsv', 'results/go_phenotype_100_relations_identifiers.tsv') #process_results_individual('corpora/drug_disease_xml_100/', 'results/model_ontologies_drug_disease_results_100.txt', # 'results/drug_disease_100_relations_names.tsv', # 'results/drug_disease_100_relations_identifiers.tsv') process_results_individual('corpora/drug_disease_xml/', 'results/model_ontologies_drug_disease_results.txt', 'results/drug_disease_relations_names.tsv', 'results/drug_disease_relations_identifiers.tsv') process_results_individual('corpora/go_phenotype_xml/', 'results/model_ontologies_go_phenotype_results.txt', 'results/go_phenotype_relations_names.tsv', 'results/go_phenotype_relations_identifiers.tsv') join_results('results', 'joint_results') return
10,436
def _crown_relu_relaxer(inp: Bound) -> Tuple[LinFun, LinFun]: """Obtain the parameters of a linear ReLU relaxation as in CROWN. This relaxes the ReLU with the adaptive choice of lower bounds as described for CROWN-ada in https://arxiv.org/abs/1811.00866. Args: inp: Input to the ReLU. Returns: lb_linfun, ub_linfun: Linear functions bounding the ReLU """ inp_lower, inp_upper = inp.lower, inp.upper relu_on = (inp_lower >= 0.) relu_amb = jnp.logical_and(inp_lower < 0., inp_upper >= 0.) ub_slope = relu_on.astype(jnp.float32) ub_slope += jnp.where(relu_amb, inp_upper / jnp.maximum(inp_upper - inp_lower, 1e-12), jnp.zeros_like(inp_lower)) ub_offset = jnp.where(relu_amb, - ub_slope * inp_lower, jnp.zeros_like(inp_lower)) lb_slope = (ub_slope >= 0.5).astype(jnp.float32) lb_offset = jnp.zeros_like(inp_lower) return (eltwise_linfun_from_coeff(lb_slope, lb_offset), eltwise_linfun_from_coeff(ub_slope, ub_offset))
10,437
def run_single_measurement(model_name, produce_model, run_model, teardown, inp, criterion, extra_params, use_dtr, use_profiling): """ This function initializes a model and performs a single measurement of the model on the given input. While it might seem most reasonable to initialize the model outside of the loop, DTR's logs have shown that certain constants in the model persist between loop iterations; performing these actions in a separate *function scope* turned out to be the only way to prevent having those constants hang around. Returns a dict of measurements """ torch.cuda.reset_max_memory_allocated() # resetting means the count should be reset to # only what's in scope, meaning only the input input_mem = torch.cuda.max_memory_allocated() model = produce_model(extra_params=extra_params) params = [] for m in model: if hasattr(m, 'parameters'): params.extend(m.parameters()) model_mem = torch.cuda.max_memory_allocated() optimizer = torch.optim.SGD(model[0].parameters(), 1e-3, momentum=0.9, weight_decay=1e-4) start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) # start timing torch.cuda.synchronize() start_time = time.time() if use_dtr: torch.reset_profile() start.record() # with torch.autograd.profiler.profile(use_cuda=True) as prof: run_model(criterion, *model, *inp, optimizer=optimizer) end.record() start_sync = time.time() torch.cuda.synchronize() end_sync = time.time() end_time = time.time() # end timing if use_dtr: # operators-only time, tracked by DTR cuda_time = torch.compute_time() base_compute_time = -1 remat_compute_time = -1 search_time = -1 cost_time = -1 if use_profiling: base_compute_time = torch.base_compute_time() remat_compute_time = torch.remat_compute_time() search_time = torch.search_time() cost_time = torch.cost_time() torch.reset_profile() total_mem = torch.cuda.max_memory_allocated() teardown(*model) torch.cuda.reset_max_memory_allocated() del model if use_dtr: torch.toggle_log(False) del params batch_size = len(inp[0]) ips = batch_size / (end_time - start_time) result = { 'time': end_time - start_time, 'sync_time': end_sync - start_sync, 'gpu_time': start.elapsed_time(end), 'input_mem': input_mem, 'model_mem': model_mem, 'total_mem': total_mem, 'base_compute_time': base_compute_time, 'remat_compute_time': remat_compute_time, 'search_time': search_time, 'cost_time': cost_time, 'batch_size': batch_size, 'ips': ips } if use_dtr: result['cuda_time'] = cuda_time else: result['cuda_time'] = -1.0 return result
10,438
def make_noise(fid, snr, decibels=True): """Given a synthetic FID, generate an array of normally distributed complex noise with zero mean and a variance that abides by the desired SNR. Parameters ---------- fid : numpy.ndarray Noiseless FID. snr : float The signal-to-noise ratio. decibels : bool, default: True If `True`, the snr is taken to be in units of decibels. If `False`, it is taken to be simply the ratio of the singal power and noise power. Returns _______ noise : numpy.ndarray """ components = [ (fid, 'fid', 'ndarray'), (snr, 'snr', 'float'), (decibels, 'decibels', 'bool'), ] ArgumentChecker(components) size = fid.size shape = fid.shape # Compute the variance of the noise if decibels: var = np.real((np.sum(np.abs(fid) ** 2)) / (size * (20 ** (snr / 10)))) else: var = np.real((np.sum(np.abs(fid) ** 2)) / (2 * size * snr)) # Make a number of noise instances and check which two are closest # to the desired variance. # These two are then taken as the real and imaginary noise components instances = [] var_discrepancies = [] for _ in range(100): instance = nrandom.normal(loc=0, scale=np.sqrt(var), size=shape) instances.append(instance) var_discrepancies.append(np.abs(np.var(instances) - var)) # Determine which instance's variance is the closest to the desired # variance first, second, *_ = np.argpartition(var_discrepancies, 1) # The noise is constructed from the two closest arrays in a variance-sense # to the desired SNR return instances[first] + 1j * instances[second]
10,439
def viterbi(prob_matrix): """ find the most likely sequence of labels using the viterbi algorithm on prob_matrix """ TINY = 1e-6 # to avoid NaNs in logs # if prob_matrix is 1D, make it 2D if len(np.shape(prob_matrix)) == 1: prob_matrix = [prob_matrix] length = len(prob_matrix) probs = np.zeros_like(prob_matrix) backpt = np.ones_like(prob_matrix, dtype=np.int32) * -1 for i in [0,1,2,3,4]: probs[0][i] = np.log(prob_matrix[0][i]+TINY) # {B, M, E, S} <=== 0:begin, 1:middle, 2:end, 3:single for t in range(1, length): # E, S -> B | B, M -> M | B, M -> E | E, S -> S previous_of = [[0,0], [3,4], [1,2], [1,2], [3,4]] for i in range(5): prevs = previous_of[i] max_id = prevs[np.argmax([probs[t-1][prevs[0]], probs[t-1][prevs[1]]])] backpt[t][i] = max_id probs[t][i] = np.log(prob_matrix[t][i]+TINY) + probs[t-1][max_id] seq = np.ones(length, 'int32') * -1 #print(probs[length-1]) seq[length-1] = np.argmax(probs[length-1]) #print(seq[length-1]) max_prob = probs[length-1][seq[length-1]] for t in range(1, length): seq[length-1-t] = backpt[length-t][seq[length-t]] return seq
10,440
def disp_test_res(result_list, base_list, full=True, paired_test=True): """ Read in a result list and trace out the disp_curve_list for different methods """ # initialize constraint = "DP" err_bar = False # First calcuate the baseline loss vector base_res = base_list[0] dataset = base_res['dataset'] if dataset == 'law_school': x, a, y = parser.clean_lawschool_full() elif dataset == 'communities': x, a, y = parser.clean_communities_full() elif dataset == 'adult': x, a, y = parser.clean_adult_full() if not full: x, a, y = run_exp.subsample(x, a, y, 2000) _, _, _, x_test, a_test, y_test = run_exp.train_test_split_groups(x, a, y, random_seed=DATA_SPLIT_SEED) n = len(y_test) loss = base_res['loss'] if paired_test: base_pred = base_res['base_test_eval']['pred'] base_loss_vec = evaluate.loss_vec2(base_pred, y_test, loss) base_mean_loss = np.mean(base_loss_vec) print(base_mean_loss) for result in result_list: train_eval = result['train_eval'] test_eval = result['test_eval'] constraint = result['constraint'] learner = result['learner'] dataset = result['dataset'] eps_vals = train_eval.keys() train_disp_dic = {} test_disp_dic = {} train_err_dic = {} test_err_dic = {} test_loss_std_dic = {} test_disp_dev_dic = {} linestyle = '-' if learner == 'SVM_LP': color = 'orange' err_bar = True elif learner == 'OLS': color = 'red' err_bar = True elif learner[:2] == "RF": color = 'brown' err_bar = True elif learner == "XGB Classifier": color = 'blue' err_bar = True linestyle = '--' elif learner == "XGB Regression": color = 'red' err_bar = True linestyle = '--' elif learner == "LR": color = "blue" err_bar = True else: color = 'tan' err_bar = True for eps in eps_vals: if constraint == "DP": train_disp = train_eval[eps]["DP_disp"] test_disp = test_eval[eps]["DP_disp"] elif constraint == "QEO": train_disp = train_eval[eps]["QEO_disp"] test_disp = test_eval[eps]["QEO_disp"] else: raise Exception('Constraint not supported: ', str(constraint)) train_disp_dic[eps] = train_disp test_disp_dic[eps] = test_disp if paired_test: test_total_pred = test_eval[eps]['pred'] test_res_weights = test_eval[eps]['classifier_weights'] weighted_loss_vec = evaluate.loss_vec(test_total_pred, y_test, test_res_weights, loss) diff_vec = weighted_loss_vec - base_loss_vec loss_mean, loss_std = norm.fit(diff_vec) test_loss_std_dic[eps] = loss_std / np.sqrt(n) else: test_loss_std_dic[eps] = test_eval[eps]['loss_std'] test_disp_dev_dic[eps] = test_eval[eps]['disp_std'] if loss == "square": # taking the RMSE train_err_dic[eps] = np.sqrt(train_eval[eps]['weighted_loss']) test_err_dic[eps] = np.sqrt(test_eval[eps]['weighted_loss']) - np.sqrt(base_mean_loss) else: train_err_dic[eps] = (train_eval[eps]['weighted_loss']) test_err_dic[eps] = (test_eval[eps]['weighted_loss'] - base_mean_loss) if _PARETO: pareto_epsilons_train = convex_env_train(train_disp_dic, train_err_dic) pareto_epsilons_test = convex_env_test(pareto_epsilons_train, test_disp_dic, test_err_dic) else: pareto_epsilons_train = eps_vals pareto_epsilons_test = eps_vals # taking the pareto frontier train_disp_list = [train_disp_dic[k] for k in pareto_epsilons_train] test_disp_list = [test_disp_dic[k] for k in pareto_epsilons_test] train_err_list = [train_err_dic[k] for k in pareto_epsilons_train] test_err_list = [test_err_dic[k] for k in pareto_epsilons_test] # Getting error bars if loss == "square": err_upperconf = [np.sqrt(test_eval[k]['weighted_loss'] + 2 * test_loss_std_dic[k]) - np.sqrt(test_eval[k]['weighted_loss']) for k in pareto_epsilons_test] err_lowerconf = [np.sqrt(test_eval[k]['weighted_loss']) - np.sqrt(test_eval[k]['weighted_loss'] - 2 * test_loss_std_dic[k]) for k in pareto_epsilons_test] else: err_upperconf = [2 * test_loss_std_dic[k] for k in pareto_epsilons_test] err_lowerconf = [2 * test_loss_std_dic[k] for k in pareto_epsilons_test] disp_conf = [test_disp_dev_dic[k] for k in pareto_epsilons_test] plt.fill_between(np.array(test_disp_list), np.array(test_err_list) - np.array(err_lowerconf), np.array(test_err_list) + np.array(err_upperconf), alpha=0.2, facecolor=color, antialiased=True) plt.errorbar(test_disp_list, test_err_list, color=color, capthick=1, markersize=5, capsize=2, linewidth=2, linestyle=linestyle) # Plotting benchmark for base_res in base_list: base_train_eval = base_res['base_train_eval'] base_test_eval = base_res['base_test_eval'] loss = base_res['loss'] learner = base_res['learner'] base_test_disp_conf = base_test_eval['disp_std'] base_test_loss_std = base_test_eval['loss_std'] dataset = base_res['dataset'] marker = '^' label = 'unconstrained' if learner == 'OLS': # color = 'red' color = 'darksalmon' err_bar = True elif learner == "SEO": marker = 'v' color = 'deepskyblue' err_bar = True label='SEO' elif learner[:2] == "RF": color = 'brown' err_bar = True elif learner[:3] == "XGB": color = "green" err_bar = True elif learner == "LR": color = "darksalmon" err_bar = True else: color = 'tan' err_bar = False # Getting error bars if loss == "square": err_upperconf = np.sqrt(base_test_eval['average_loss'] + 2 * base_test_loss_std) - np.sqrt(base_test_eval['average_loss']) err_lowerconf = np.sqrt(base_test_eval['average_loss']) - np.sqrt(base_test_eval['average_loss'] - 2 * base_test_loss_std) else: err_upperconf = 2 * base_test_loss_std err_lowerconf = 2 * base_test_loss_std if loss == "square": if err_bar: plt.errorbar([base_test_eval[constraint+'_disp']], [np.sqrt(base_test_eval['average_loss']) - np.sqrt(base_mean_loss)], xerr=base_test_disp_conf, marker=marker, markeredgecolor = 'black', color='black', markerfacecolor=color, ecolor='black', capthick=1, markersize=11, capsize=2) else: plt.scatter([base_test_eval[constraint+'_disp']], [np.sqrt(base_test_eval['average_loss']) - np.sqrt(base_mean_loss)], marker=marker, edgecolors = 'black', s=95, label= label) else: if err_bar: plt.errorbar([base_test_eval[constraint+'_disp']], [base_test_eval['average_loss']] - base_mean_loss, xerr=base_test_disp_conf, marker=marker, markeredgecolor = 'black', markerfacecolor=color, color='black', ecolor='black', capthick=1, markersize=11, capsize=2) else: plt.scatter([base_test_eval[constraint+'_disp']], [(base_test_eval['average_loss'])] - base_mean_loss, marker=marker, label=label)
10,441
def slsn_constraint(parameters): """ Place constraints on the magnetar rotational energy being larger than the total output energy, and the that nebula phase does not begin till at least a 100 days. :param parameters: dictionary of parameters :return: converted_parameters dictionary where the violated samples are thrown out """ converted_parameters = parameters.copy() mej = parameters['mej'] * solar_mass vej = parameters['vej'] * km_cgs kappa = parameters['kappa'] mass_ns = parameters['mass_ns'] p0 = parameters['p0'] kinetic_energy = 0.5 * mej * vej**2 rotational_energy = 2.6e52 * (mass_ns/1.4)**(3./2.) * p0**(-2) tnebula = np.sqrt(3 * kappa * mej / (4 * np.pi * vej ** 2)) / 86400 neutrino_energy = 1e51 total_energy = kinetic_energy + neutrino_energy # ensure rotational energy is greater than total output energy converted_parameters['erot_constraint'] = rotational_energy - total_energy # ensure t_nebula is greater than 100 days converted_parameters['t_nebula_min'] = tnebula - 100 return converted_parameters
10,442
def grover_circuit(n,o,iter): """Grover Search Algorithm :param n: Number of qubits (not including ancilla) :param o: Oracle int to find :return qc: Qiskit circuit """ def apply_hadamard(qc, qubits,a=None) -> None: """Apply a H-gate to 'qubits' in qc""" for q in qubits: qc.h(q) if a is not None: qc.h(a) def initialize_bits(qc,qubits,a) -> None: "Start qubits at 0 and ancilla bit at 1" for q in qubits: qc.reset(q) qc.reset(a[0]) qc.x(a[0]) def apply_mean_circuit(qc, qubits) -> None: """Apply a H-gate to 'qubits' in qc""" control_qubits = [] for q in qubits: qc.h(q) qc.x(q) control_qubits.append(q) cZ = control_qubits[-1] control_qubits.pop() qc.h(cZ) qc.mcx(control_qubits,cZ) qc.h(cZ) for q in qubits: qc.x(q) qc.h(q) def create_oracle(qc,qubit,ancilla,oracle,n) -> None: """Creates a quantum oracle.""" test_list = [] for q in qubit: test_list.append(q) _oracle_logic(qc, qubit, oracle,n) qc.mcx(test_list,ancilla[0]) _oracle_logic(qc, qubit, oracle,n) def _oracle_logic(qc, qubit, oracle,n) -> None: if 0 <= oracle <= 2**len(qubit)-1: bin_list = [int(i) for i in list('{0:0b}'.format(oracle))] if len(bin_list) < n: for _ in range(0,n-len(bin_list)): bin_list.insert(0,0) for i in range(0,len(bin_list)): if bin_list[i] == 0: qc.x(q[i]) else: raise ValueError('Oracle must be between 0 and 2^n-1') # print(f"Creating circuit with {n} qubits") q = QuantumRegister(n, 'q') a = QuantumRegister(1, 'a') c = ClassicalRegister(n, 'c') qc = QuantumCircuit(q,a,c) i2b = "{0:b}".format(o) # print(f"Oracle set to: {o} ({i2b})") # print(" ") initialize_bits(qc,q,a) qc.barrier(q,a) apply_hadamard(qc,q,a) # print(f"Generating {iter} Grover module(s)") # print("=====================================") for _ in range(1,iter+1): qc.barrier(q,a) create_oracle(qc,q,a,o,n) qc.barrier(q,a) apply_mean_circuit(qc, q) qc.barrier(q,a) for i in range(0,len(q)): qc.measure(q[i],c[len(q)-1-i]) return qc
10,443
def reference_cluster(envs, in_path): """ Return set of all env in_paths referencing or referenced by given in_path. >>> cluster = sorted(reference_cluster([ ... {'in_path': 'base', 'refs': []}, ... {'in_path': 'test', 'refs': ['base']}, ... {'in_path': 'local', 'refs': ['test']}, ... ], 'test')) >>> cluster == ['base', 'local', 'test'] True """ edges = [ set([env['in_path'], fix_reference_path(env['in_path'], ref)]) for env in envs for ref in env['refs'] ] prev, cluster = set(), set([in_path]) while prev != cluster: # While cluster grows prev = set(cluster) to_visit = [] for edge in edges: if cluster & edge: # Add adjacent nodes: cluster |= edge else: # Leave only edges that are out # of cluster for the next round: to_visit.append(edge) edges = to_visit return cluster
10,444
def scheme_apply(procedure, args, env): """Apply Scheme PROCEDURE to argument values ARGS in environment ENV.""" if isinstance(procedure, PrimitiveProcedure): return apply_primitive(procedure, args, env) elif isinstance(procedure, UserDefinedProcedure): new_env = make_call_frame(procedure, args, env) return eval_all(procedure.body, new_env) else: raise SchemeError("cannot call: {0}".format(str(procedure)))
10,445
def _validate_num_clusters(num_clusters, initial_centers, num_rows): """ Validate the combination of the `num_clusters` and `initial_centers` parameters in the Kmeans model create function. If the combination is valid, determine and return the correct number of clusters. Parameters ---------- num_clusters : int Specified number of clusters. initial_centers : SFrame Specified initial cluster center locations, in SFrame form. If the number of rows in this SFrame does not match `num_clusters`, there is a problem. num_rows : int Number of rows in the input dataset. Returns ------- _num_clusters : int The correct number of clusters to use going forward """ ## Basic validation if num_clusters is not None and not isinstance(num_clusters, int): raise _ToolkitError("Parameter 'num_clusters' must be an integer.") ## Determine the correct number of clusters. if initial_centers is None: if num_clusters is None: raise ValueError("Number of clusters cannot be determined from " + "'num_clusters' or 'initial_centers'. You must " + "specify one of these arguments.") else: _num_clusters = num_clusters else: num_centers = initial_centers.num_rows() if num_clusters is None: _num_clusters = num_centers else: if num_clusters != num_centers: raise ValueError("The value of 'num_clusters' does not match " + "the number of provided initial centers. " + "Please provide only one of these arguments " + "or ensure the values match.") else: _num_clusters = num_clusters if _num_clusters > num_rows: raise ValueError("The desired number of clusters exceeds the number " + "of data points. Please set 'num_clusters' to be " + "smaller than the number of data points.") return _num_clusters
10,446
def urlinline(filename, mime=None): """ Load the file at "filename" and convert it into a data URI with the given MIME type, or a guessed MIME type if no type is provided. Base-64 encodes the data. """ infile = open(filename, 'rb') text = infile.read() infile.close() enc = b64.standard_b64encode(text) if mime is None: mime, _ = mimetypes.guess_type(filename) mime = mime or DEFAULT_MIME_TYPE ret = "data:%s;base64,%s" % (mime, enc) return ret
10,447
def parse_matching_criteria(filters, filter_operator): """ build the filter criteria, if present :param filters:field opr value[;]... :param filter_operator: any|all :return dictionary of parsed filter settings, True/False for "all"/"any" setting """ LOG.debug("%s %s", filters, filter_operator) if filter_operator and filter_operator.strip().lower() not in ('all', 'any'): raise ValueError("operator must be 'all' or 'any': {}".format(filter_operator)) match_operator_and = (filter_operator.strip().lower() == 'all') if filter_operator else True # parse the filters and produce a tuple of (field, operator, value) match_list = {} if filters: for filter_str in filters.split(';'): m = REGEX_OPERATORS.match(filter_str.strip()) if not m or len(m.groups()) != 3: raise ValueError("Unable to parse filter '{}'".format(filter_str)) match_field = m.group(1) match_opr = m.group(2) # correct mistyped comparison if match_opr.strip() == '=': match_opr = '==' match_value = m.group(3) # restore lists to actual lists if match_value.startswith("["): try: match_value = json.loads(match_value.replace("'", '"')) # make sure correct json format except Exception as err: LOG.error(str(err)) pass # determine if working with a string, boolean, or int elif match_value in ["true", "True", "false", "False"]: match_value = str_to_bool(match_value) elif match_value == 'None': match_value = None else: try: match_value = int(match_value) # this will fail for numbers, which will be trapped except: pass compare_tuple = (match_field, match_opr, match_value) LOG.debug(compare_tuple) match_list[match_field] = compare_tuple return match_list, match_operator_and
10,448
def star_rating(new_rating=None, prev_rating=None): """ Generates the query to update the product's star ratings. Inc method is from https://docs.mongodb.com/manual/reference/operator/update/inc/ """ add_file = { 1: {"one_star": 1}, 2: {"two_stars": 1}, 3: {"three_stars": 1}, 4: {"four_stars": 1}, 5: {"five_stars": 1} } delete_file = { 1: {"one_star": -1}, 2: {"two_stars": -1}, 3: {"three_stars": -1}, 4: {"four_stars": -1}, 5: {"five_stars": -1} } if new_rating and prev_rating: return {"$inc": {add_file[new_rating], delete_file[prev_rating]}} elif new_rating: return {"$inc": add_file[new_rating]} else: return {"$inc": delete_file[prev_rating]}
10,449
def import_references(directory="./", disable_progress_bar=False): """ Import references file. Args: directory (str): Directory where the file is located. scopus_file (str): Name of the file. Returns: None """ documents = read_all_records(directory) # references = read_raw_csv_files(join(directory, "raw", "references")) references = _delete_and_rename_columns(references) references = _process__authors_id__column(references) references = _process__raw_authors_names__column(references) references = _disambiguate_authors(references) references = _process__doi__column(references) references = _process__source_name__column(references) references = _process__iso_source_name__column(references) references = _search_for_new_iso_source_name(references) references = _complete__iso_source_name__colum(references) references = _repair__iso_source_name__column(references) references = _create__record_no__column(references) references = _create__document_id__column(references) # cited_references_table = _create_references_file( documents=documents, references=references, directory=directory, disable_progress_bar=disable_progress_bar, ) # cited_references_frequency = cited_references_table.groupby( "cited_id", as_index=True ).count() references = references.assign(local_citations=0) references.index = references.record_no references.loc[ cited_references_frequency.index, "local_citations" ] = cited_references_frequency.citing_id references = references.reset_index(drop=True) references["local_citations"].fillna(1, inplace=True) references["local_citations"] = references["local_citations"].astype(int) file_name = join(directory, "processed", "references.csv") references.to_csv(file_name, index=False) logging.info(f"References table saved to {file_name}")
10,450
def bsp_split_recursive( node: tcod.bsp.BSP, randomizer: Optional[tcod.random.Random], nb: int, minHSize: int, minVSize: int, maxHRatio: int, maxVRatio: int, ) -> None: """ .. deprecated:: 2.0 Use :any:`BSP.split_recursive` instead. """ node.split_recursive( nb, minHSize, minVSize, maxHRatio, maxVRatio, randomizer )
10,451
def add_rpaths(env, install_off, set_cgo_ld, is_bin): """Add relative rpath entries""" if GetOption('no_rpath'): if set_cgo_ld: env.AppendENVPath("CGO_LDFLAGS", env.subst("$_LIBDIRFLAGS "), sep=" ") return env.AppendUnique(RPATH_FULL=['$PREFIX/lib64']) rpaths = env.subst("$RPATH_FULL").split() prefix = env.get("PREFIX") if not is_bin: path = r'\$$ORIGIN' env.AppendUnique(RPATH=[DaosLiteral(path)]) for rpath in rpaths: if rpath.startswith('/usr'): env.AppendUnique(RPATH=[rpath]) continue if install_off is None: env.AppendUnique(RPATH=[os.path.join(prefix, rpath)]) continue relpath = os.path.relpath(rpath, prefix) if relpath != rpath: joined = os.path.normpath(os.path.join(install_off, relpath)) path = r'\$$ORIGIN/%s' % (joined) if set_cgo_ld: env.AppendENVPath("CGO_LDFLAGS", "-Wl,-rpath=$ORIGIN/%s/%s" % (install_off, relpath), sep=" ") else: env.AppendUnique(RPATH=[DaosLiteral(path)]) for rpath in rpaths: path = os.path.join(prefix, rpath) if is_bin: # NB: Also use full path so intermediate linking works env.AppendUnique(LINKFLAGS=["-Wl,-rpath-link=%s" % path]) else: # NB: Also use full path so intermediate linking works env.AppendUnique(RPATH=[path]) if set_cgo_ld: env.AppendENVPath("CGO_LDFLAGS", env.subst("$_LIBDIRFLAGS $_RPATH"), sep=" ")
10,452
def check_encoder(value: EncoderArg) -> EncoderFactory: """Checks value and returns EncoderFactory object. Returns: d3rlpy.encoders.EncoderFactory: encoder factory object. """ if isinstance(value, EncoderFactory): return value if isinstance(value, str): return create_encoder_factory(value) raise ValueError("This argument must be str or EncoderFactory object.")
10,453
def mypy(ctx): """Runs mypy against the codebase""" ctx.run("mypy --config mypy.ini")
10,454
def apply_dep_update(recipe_dir, dep_comparison): """Upodate a recipe given a dependency comparison. Parameters ---------- recipe_dir : str The path to the recipe dir. dep_comparison : dict The dependency comparison. Returns ------- update_deps : bool True if deps were updated, False otherwise. """ recipe_pth = os.path.join(recipe_dir, "meta.yaml") with open(recipe_pth) as fp: lines = fp.readlines() sections_to_update = ["host", "run"] if _ok_for_dep_updates(lines) and any( len(dep_comparison.get(s, {}).get("df_minus_cf", set())) > 0 for s in sections_to_update ): recipe = CondaMetaYAML("".join(lines)) updated_deps = _update_sec_deps( recipe, dep_comparison, sections_to_update, ) if updated_deps: with open(recipe_pth, "w") as fp: recipe.dump(fp)
10,455
def allowed_task_name(name: str) -> bool: """Determine whether a task, which is a 'non-core-OSCAL activity/directory is allowed. args: name: the task name which is assumed may take the form of a relative path for task/subtasks. Returns: Whether the task name is allowed or not allowed (interferes with assumed project directories such as catalogs). """ # Task must not use an OSCAL directory # Task must not self-interfere with a project pathed_name = pathlib.Path(name) root_path = pathed_name.parts[0] if root_path in const.MODEL_TYPE_TO_MODEL_DIR.values(): logger.error('Task name is the same as an OSCAL schema name.') return False elif root_path[0] == '.': logger.error('Task name must not start with "."') return False elif pathed_name.suffix != '': # Does it look like a file logger.error('tasks name must not look like a file path (e.g. contain a suffix') return False return True
10,456
def get_ignored_classes(uppercase, lowercase, digit): """ get tuple of ignored classes based on selected classes :param uppercase: whether to keep uppercase classes :param lowercase: whether to keep lowercase classes :param digit: whether to keep digit classes :return: tuple of ignored classes """ # result placeholder ignored = [] # add digit classes to the ignore list if not digit: ignored.append(dataset.get_classes('digit')) # add uppercase classes to the ignore list if not uppercase: ignored.append(dataset.get_classes('uppercase')) # add lowercase classes to the ignore list if not lowercase: ignored.append(dataset.get_classes('lowercase')) # return tuple return tuple(ignored)
10,457
def unscaled_prediction_rmse(model, input_tensor, label_tensor, scalar, loading_length=0, return_loading_error=False, device=None): """ Prediction RMSE. :param model: model :param input_tensor: input tensor :param label_tensor: label tensor :param scalar: scalar for transforming output data :param loading_length: time length used for loading the NARX :param return_loading_error: return the loading RMSE with the multi-step ahead RMSE :param device: specified device to use (Default: None - select what is available) :return: prediction rmse """ # Create Network on GPU/CPU if device is None: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(device) # Training Data on GPU/CPU input_tensor, label_tensor = input_tensor.to(device), label_tensor.to(device) # Sort data for loading and k-step ahead predictions. inputs, labels, itdl, otdl = init_tdl(model, input_tensor, label_tensor, device) loading_labels, k_step_labels = labels[:, :loading_length, :], labels[:, loading_length:, :] # Perform a k-step ahead prediction k_step_outputs, loading_outputs = __multi_step_ahead_prediction(model, input_tensor, label_tensor, loading_length, device) if return_loading_error: # Combine loading and multi-step predictions/labels outputs = torch.cat([loading_outputs, k_step_outputs], dim=1) labels = torch.cat([loading_labels, k_step_labels], dim=1) else: # Use the multi-step predictions/labels outputs = k_step_outputs labels = k_step_labels labels = labels.cpu().data.numpy() labels = labels.reshape((labels.shape[0], labels.shape[1])) labels = (labels - scalar.min_[1]) / scalar.scale_[1] outputs = outputs.cpu().data.numpy() outputs = outputs.reshape((outputs.shape[0], outputs.shape[1])) outputs = (outputs - scalar.min_[1]) / scalar.scale_[1] error = labels - outputs error = np.sqrt((np.power(error, 2)).mean(axis=0)) return error
10,458
def __copyList__(fromList, initialValues = None): """ Returns a copy of the provided list. Initial values must either be a single value, or a list of exactly the same size as the provided list. """ if __isListType__(fromList) is False: raise ValueError('The provided value to copy was not a list!') fromList = copy.deepcopy(fromList) if initialValues is not None: initialValues = copy.deepcopy(initialValues) if initialValues is None or __isNonStringIterableType__(initialValues) is False: copySingleValue = True elif __isNonStringIterableType__(initialValues) and len(initialValues) == 1 or __isListType__(initialValues) is False: # Treat an initialValue object with 1 element the same as a non-iterable, so we could set every value to a list, or to a non-list value copySingleValue = True else: if len(initialValues) != len(fromList): raise ValueError('The initial values list must be the same size as the list to copy!') else: copySingleValue = False returnList = fromList[:] for itemIndex in range(len(returnList)): if copySingleValue is True: returnList[itemIndex] = initialValues else: returnList[itemIndex] = initialValues[itemIndex] return returnList
10,459
def build_or_pattern(patterns, escape=False): """Build a or pattern string from a list of possible patterns """ or_pattern = [] for pattern in patterns: if not or_pattern: or_pattern.append('(?:') else: or_pattern.append('|') or_pattern.append('(?:%s)' % re.escape(pattern) if escape else pattern) or_pattern.append(')') return ''.join(or_pattern)
10,460
def concatenate_and_process_data( data_consent: pd.DataFrame, data_noconsent: pd.DataFrame, conversion_column: str = CONVERSION_COLUMN, drop_columns: Tuple[Any, ...] = DROP_COLUMNS, non_dummy_columns: Tuple[Any, ...] = NON_DUMMY_COLUMNS ) -> Tuple[pd.DataFrame, pd.DataFrame]: """Concatenates consent and no-consent data and preprocesses them. Args: data_consent: Dataframe of consent customers. data_noconsent: Dataframe of no-consent customers. conversion_column: Name of the conversion column in the data. drop_columns: Names of columns that should be dropped from the data. non_dummy_columns: Names of (categorical) columns that should be kept, but not dummy-coded. Raises: ValueError: if concatenating consent and no-consent data doesn't match the expected length. Returns: Processed dataframes for consent and no-consent customers. """ data_noconsent["consent"] = 0 data_consent["consent"] = 1 data_concat = pd.concat([data_noconsent, data_consent]) data_concat.reset_index(inplace=True, drop=True) if len(data_concat) != (len(data_noconsent) + len(data_consent)): raise ValueError( "Length of concatenated data does not match sum of individual dataframes." ) data_preprocessed = preprocess_data( data=data_concat, drop_columns=list(drop_columns), non_dummy_columns=list(non_dummy_columns), conversion_column=conversion_column) data_noconsent_processed = data_preprocessed[data_preprocessed["consent"] == 0] data_consent_processed = data_preprocessed[data_preprocessed["consent"] == 1] return data_consent_processed, data_noconsent_processed
10,461
def plot(nRows=1, nCols=1, figSize=5): """ Generate a matplotlib plot and axis handle Parameters ----------------- nRows : An int, number of rows for subplotting nCols : An int, number of columns for subplotting figSize : Numeric or array (xFigSize, yFigSize). The size of each axis. """ if isinstance(figSize, (list, tuple)): xFigSize, yFigSize = figSize elif isinstance(figSize, (int, float)): xFigSize = yFigSize = figSize else: raise Exception('figSize type {} not recognised'.format(type(figSize))) fig, axs = plt.subplots(nRows, nCols, figsize=(nCols * xFigSize, nRows * yFigSize)) if nRows * nCols > 1: axs = axs.ravel() return fig, axs
10,462
def make_rate_data(grp, valuevars, query="none == 'All'", data=ob): """Filters, Groups, and Calculates Rates Params: grp [list]: A list detailing the names of the variables to group by. valuevars [list]: A list detailing the names of the quantitative variable summarise and calculate a rate for (as a function of population). query [string]: A query string used to subset the data prior to aggregation. data [pd.DataFrame]: The obesity dataset. Returns: [pd.DataFrame]: A pandas data frame containing the grouping variables and rates for the value variables (carrying the same column name). Cells where a rate could not be calculated due to missing information are return as np.NaN. """ grp_plus = grp + ["none"] ratedata = ( data.query(query) .loc[:, grp + ["pop"] + valuevars] .melt(id_vars=grp + ["pop"], var_name="variable", value_name="value") .dropna() .groupby(grp + ["variable"])[["pop", "value"]] .sum() .reset_index() .assign(rate=lambda x: x["value"] / x["pop"]) .drop(columns=["value", "pop"]) .pivot(index=grp, columns="variable", values="rate") .reset_index() ) return ratedata
10,463
def test_compare_sir_vs_seir(sir_data_wo_policy, seir_data, monkeypatch): """Checks if SEIR and SIR return same results if the code enforces * alpha = gamma * E = 0 * dI = dE """ x_sir, pars_sir = sir_data_wo_policy x_seir, pars_seir = seir_data pars_seir["alpha"] = pars_sir["gamma"] # will be done by hand def mocked_seir_step(data, **pars): data["exposed"] = 0 new_data = SEIRModel.simulation_step(data, **pars) new_data["infected"] += new_data["exposed_new"] return new_data seir_model = SEIRModel() monkeypatch.setattr(seir_model, "simulation_step", mocked_seir_step) sir_model = SIRModel() predictions_sir = sir_model.propagate_uncertainties(x_sir, pars_sir) predictions_seir = seir_model.propagate_uncertainties(x_seir, pars_seir) assert_frame_equal( predictions_sir[COLS_TO_COMPARE], predictions_seir[COLS_TO_COMPARE], )
10,464
def login(client, username='', password=''): """ Log a specific user in. :param client: Flask client :param username: The username :type username: str :param password: The password :type password: str :return: Flask response """ user = dict(login=username, password=password) response = client.post(url_for('blog.login'), data=user, follow_redirects=True) return response
10,465
def test_liststatements(): """Turn list of dictionaries into list of Statement objects.""" csvrows_list = [ {"shape_id": "@a", "prop_id": "dct:creator", "value_type": "URI"}, {"shape_id": "@a", "prop_id": "dct:subject", "value_type": "URI"}, {"shape_id": "@a", "prop_id": "dct:date", "value_type": "String"}, {"shape_id": "@b", "prop_id": "foaf:name", "value_type": "String"}, ] assert list_statements(csvrows_list) == [ Statement(start=True, shape_id="@a", prop_id="dct:creator", value_type="URI"), Statement(start=True, shape_id="@a", prop_id="dct:subject", value_type="URI"), Statement(start=True, shape_id="@a", prop_id="dct:date", value_type="String"), Statement(start=False, shape_id="@b", prop_id="foaf:name", value_type="String"), ]
10,466
def func(x, params): """The GNFW radial profile. Args: x (:obj:`np.ndarray`): Radial coordinate. params (:obj:`dict`): Dictionary with keys `alpha`, `beta`, `gamma`, `c500`, and `P0` that defines the GNFW profile shape. Returns: Profile (1d :obj:`np.ndarray`). """ G, A, B, c500, P0 = params['gamma'], params['alpha'], params['beta'], params['c500'], params['P0'] prof=np.zeros(x.shape) mask=np.greater(x, 0) prof[mask]=P0*((x[mask]*c500)**-G * (1+(x[mask]*c500)**A)**((G-B)/A)) #prof[x == 0]=np.inf return prof
10,467
def get_double_image_blob(roidb): """Builds an input blob from the images in the roidb at the specified scales. """ num_images = len(roidb) # Sample random scales to use for each image in this batch scale_inds = np.random.randint( 0, high=len(cfg.TRAIN.SCALES), size=num_images) processed_ims = [] im_scales = [] pad = cfg.TRAIN.PADDING for i in range(num_images): # Process A image im = get_a_img(roidb[i]) other_im = cv2.imread(roidb[i]['b_image']) # Process B image h, w, _ = im.shape b_y1, b_x1, b_y2, b_x2 = roidb[i]['b_bbox'] if cfg.TRAIN.AUG_LRV_BBOX: b_x1, b_y1, b_x2, b_y2 = boxes_utils.aug_align_box((b_x1,b_y1,b_x2,b_y2), (other_im.shape[1], other_im.shape[0]), pad=0) tmp = other_im[b_y1:b_y2, b_x1:b_x2, 0] other_im = cv2.resize(other_im[b_y1:b_y2, b_x1:b_x2, 0], dsize=(w-2*pad, h-2*pad), interpolation=cv2.INTER_CUBIC) if cfg.HIST_EQ: if cfg.A_HIST_EQ: clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) other_im = clahe.apply(other_im) else: other_im = cv2.equalizeHist(other_im) other_im = pad_image(other_im, pad=pad)[:, ::-1] other_im = np.tile(other_im, (3, 1, 1)) other_im = np.transpose(other_im, (1, 2, 0)).astype(np.uint8) assert im is not None, \ 'Failed to read image \'{}\''.format(roidb[i]['image']) # If NOT using opencv to read in images, uncomment following lines # if len(im.shape) == 2: # im = im[:, :, np.newaxis] # im = np.concatenate((im, im, im), axis=2) # # flip the channel, since the original one using cv2 # # rgb -> bgr # im = im[:, :, ::-1] if roidb[i]['flipped']: im = im[:, ::-1, :] other_im = other_im[:, ::-1, :] # generate the cropped image if cfg.TRAIN.ONLINE_RANDOM_CROPPING: x1, y1, x2, y2 = roidb[i]['cropped_bbox'] im = im[y1:y2+1, x1:x2+1, :] other_im = other_im[y1:y2+1, x1:x2+1, :] target_size = cfg.TRAIN.SCALES[scale_inds[i]] if cfg.TRAIN.AUGMENTATION: transform_cv = cv_transforms.Compose([ cv_transforms.ColorJitter(brightness=0.5, contrast=0.25, gamma=0.5)]) else: transform_cv = None # TODO: add augmentation im, im_scale = blob_utils.prep_im_for_blob( im, cfg.PIXEL_MEANS, [target_size], cfg.TRAIN.MAX_SIZE, transform_cv) other_im, other_im_scale = blob_utils.prep_im_for_blob( other_im, cfg.PIXEL_MEANS, [target_size], cfg.TRAIN.MAX_SIZE, transform_cv) im_scales.append(im_scale[0]) processed_ims.append(im[0]) processed_ims.append(other_im[0]) # Create a blob to hold the input images [n, c, h, w] blob = blob_utils.im_list_to_blob(processed_ims) return blob, im_scales
10,468
def test_successful_delete_sbi(): """Test deleting an SBI successfully"""
10,469
def _readFromSettings(self, key): """Loads the settings object associated with the program and returns the value at the key.""" COMPANY, APPNAME, _ = SELMAGUISettings.getInfo() COMPANY = COMPANY.split()[0] APPNAME = APPNAME.split()[0] settings = QtCore.QSettings(COMPANY, APPNAME) val = None try: val = settings.value(key) except: self._signalObject.errorMessageSignal.emit( "Wrong setting accessed.") return val #Return the right type if val == "true": return True if val == "false": return False return float(val)
10,470
def run_overlay_resources_score_motifs(normal_expression_per_tissue_origin_per_TF, matching_cell_name_representative_dict, motifTFName_TFNames_matches_dict, cells_assays_dict, cell_tfs, tf_cells, assay_cells_datatypes, header): """pairs matching chromosomes in motif_sites_input_dir and all_chromatin_makrs_all_cells_input_dir and calls overlay_resources_score_motifs Input: moitf instances input dir (one file per chr) chromatin data collection dir (one file per chr, bed4 format; track pos, track cell#assaytype#value or cell#TFname in case of chip-seq) Return: a list of motif_overlapping_track files Precondition: files in motif_sites_input_dir and chromatin_tracks_input_dir should have the same names Recommended: name files in both dirs as chrNumber, chrX or chrY (where number is between 1-22) """ motif_files = [] if not os.path.isdir(params['motif_sites_dir']) and os.path.isfile(params['motif_sites_dir']): motif_files = [params['motif_sites_dir']] params['motif_sites_dir'] = "." else: motif_files = os.listdir(params['motif_sites_dir']) chromatin_tracks_files = os.listdir(params['all_chromatin_makrs_all_cells_combined_dir_path']) if not os.path.exists(params['motifs_overlapping_tracks_output_dir']): os.mkdir(params['motifs_overlapping_tracks_output_dir']) motifs_overlapping_tracks_files = [] scored_motifs_overlapping_tracks_files = [] if get_value(params['run_in_parallel_param']) and len(motif_files)>1: p = Pool(int(params['number_processes_to_run_in_parallel'])) for motif_file in motif_files: if motif_file.split('/')[-1] in chromatin_tracks_files:#it is assumed for every motif file name there exists a matching file name in the chromatin_tracks_input_dir motifs_overlapping_tracks_file = params['motifs_overlapping_tracks_output_dir']+'/' + '.'.join(motif_file.split('/')[-1].split('.')[0:-1])+'_overlapping_tracks' + '.bed7' scored_motifs_chromatin_tracks_output_file = '.'.join(motifs_overlapping_tracks_file.split('.')[0:-1]) + '_scored.bed10' if not (os.path.exists(motifs_overlapping_tracks_file) and os.path.exists(scored_motifs_chromatin_tracks_output_file)): if get_value(params['run_in_parallel_param']) and len(motif_files)>1: p.apply_async(overlay_resources_score_motifs, args=(params['motif_sites_dir']+'/'+motif_file, params['all_chromatin_makrs_all_cells_combined_dir_path']+'/'+motif_file.split('/')[-1], scored_motifs_chromatin_tracks_output_file, motifs_overlapping_tracks_file, normal_expression_per_tissue_origin_per_TF, matching_cell_name_representative_dict, motifTFName_TFNames_matches_dict, cells_assays_dict, cell_tfs, tf_cells, assay_cells_datatypes, header)) else: overlay_resources_score_motifs(params['motif_sites_dir']+'/'+motif_file, params['all_chromatin_makrs_all_cells_combined_dir_path']+'/'+motif_file.split('/')[-1], scored_motifs_chromatin_tracks_output_file, motifs_overlapping_tracks_file, normal_expression_per_tissue_origin_per_TF, matching_cell_name_representative_dict, motifTFName_TFNames_matches_dict, cells_assays_dict, cell_tfs, tf_cells, assay_cells_datatypes, header) motifs_overlapping_tracks_files.append(motifs_overlapping_tracks_file) scored_motifs_overlapping_tracks_files.append(scored_motifs_chromatin_tracks_output_file) if get_value(params['run_in_parallel_param']) and len(motif_files)>1: p.close() p.join() return motifs_overlapping_tracks_files, scored_motifs_overlapping_tracks_files
10,471
def compare(file1, file2): """子主函数""" try: f1 = 'C:\\Users\\seamus\\Downloads\\configure_data\\' + file1 f2 = 'C:\\Users\\seamus\\Downloads\\configure_data\\' + file2 except Exception as e: print("Error: " + str(e)) print("Usage : python compareFile.py filename1 filename2") sys.exit() if f1 == "" or f2 == "": # 参数不够 print("Usage : python compareFile.py filename1 filename2") sys.exit() tf1 = readFile(f1) tf2 = readFile(f2) d = difflib.HtmlDiff() # 创建一个实例difflib.HtmlDiff writeFile(d.make_file(tf1, tf2))
10,472
def history_directory(repo_loc: str) -> str: """Retrieve the directory containing job logs for the specified repository Parameters ---------- repo_loc : str FAIR-CLI repository path Returns ------- str location of the job logs directory """ return os.path.join( fdp_com.find_fair_root(repo_loc), fdp_com.FAIR_FOLDER, "logs" )
10,473
def test_calc_node_coords_1(): """ Attempt to run on edges which are out of order. This should raise an exception. """ test_input_tp_as_text = test_1_lines['000000F'] + """\n000000F 000062240:B 000083779:B 000083779 862 0 30696 99.79""" all_sl = [line.strip().split() for line in test_input_tp_as_text.splitlines()] edges = [mod.TilingPathEdge(sl) for sl in all_sl] with pytest.raises(Exception): coord_map, contig_len = mod.calc_node_coords(edges, first_node_offset=0)
10,474
def validate_input_path(path: str) -> None: """ Validate input file/directory path argument, raises a ValueError if the path is not valid .sql file or directory. :param path: input path argument specified by the user """ if not os.path.isdir(path) and not (os.path.isfile(path) and is_pathname_valid(path) and path.endswith('.sql')): raise ValueError(f'{path} is not a valid directory or .sql file path')
10,475
def _h5_overwrite_array_slice(model, h5_key_pair, slice_tuple, array_slice): """Overwrites (updates) a slice of an hdf5 array.""" h5_root = _h5_access(model, h5_key_pair[0], mode = 'a') dset = h5_root[h5_key_pair[1]] dset[slice_tuple] = array_slice
10,476
def is_statu(search_data): """ 判断是否有参数,且为正常还是停用 :param search_data: :return: """ logging.info('is_statu') if search_data: if search_data == '正常': return '1' elif search_data == '停用': return '0' else: return search_data else: return ''
10,477
def get_mysql_exception(errno, msg, sqlstate=None): """Get the exception matching the MySQL error This function will return an exception based on the SQLState. The given message will be passed on in the returned exception. The exception returned can be customized using the mysql.connector.custom_error_exception() function. Returns an Exception """ try: return _CUSTOM_ERROR_EXCEPTIONS[errno]( msg=msg, errno=errno, sqlstate=sqlstate) except KeyError: # Error was not mapped to particular exception pass try: return _ERROR_EXCEPTIONS[errno]( msg=msg, errno=errno, sqlstate=sqlstate) except KeyError: # Error was not mapped to particular exception pass if not sqlstate: return DatabaseError(msg=msg, errno=errno) try: return _SQLSTATE_CLASS_EXCEPTION[sqlstate[0:2]]( msg=msg, errno=errno, sqlstate=sqlstate) except KeyError: # Return default InterfaceError return DatabaseError(msg=msg, errno=errno, sqlstate=sqlstate)
10,478
def teardown_module(module): """ teardown any state that was previously setup with a setup_module method. """ import time # temp file will be removed soon time.sleep(1.0) for p in ["1.zip", "2.zip", "3.zip"]: try: os.remove(p) except: pass
10,479
def clear_pkt_loss(): """ :return: """ pkt_loss_file_path = os.path.join(os.getcwd(), 'pkt_loss.yaml') if os.path.isfile(pkt_loss_file_path): os.remove(pkt_loss_file_path) return pkt_loss_file_path
10,480
def get_image_resize_transform_steps(config, dataset) -> List: """ Resizes the image to a slightly larger square. """ assert dataset.original_resolution is not None assert config.resize_scale is not None scaled_resolution = tuple( int(res * config.resize_scale) for res in dataset.original_resolution ) return [ transforms.Resize(scaled_resolution) ]
10,481
def get_page_title(page_src, meta_data): """Returns the title of the page. The title in the meta data section will take precedence over the H1 markdown title if both are provided.""" return ( meta_data['title'] if 'title' in meta_data and isinstance(meta_data['title'], str) else get_markdown_title(page_src) )
10,482
def get_args(): """引数解析 Returns: argparse.Namespace: 引数情報 """ parser = argparse.ArgumentParser( prog="app.py", usage="realtime or audio file", description="detect music change point.", add_help=True ) parser.add_argument( "--cfg", type=str, default="./settings.yaml", help="setting file path" ) parser.add_argument( "--file", type=str, default=None, help="audio file path" ) return parser.parse_args()
10,483
def make_3dplot(fname_inp, fname_fig, clim=[None,None], vnames=[], data_processor=None, verbose='debug', **kws): """ make 3D plot with a radial and longitudinal cuts """ logger.setLevel(getattr(logging, verbose.upper())) assert len(vnames)>0, ' [-] We need names in vnames!\n' # we'll obtain: # - same data but in structured way; friendly for plot_surface(). # - fill 'vdict'' with original ASCII data d = get_array_vars(fname_inp, checks=False, complete_domain_walk=True, vnames=vnames, data_processor=data_processor) # NOTE: d['data'] is processed sutff built from the original (from # the ASCII file) simulation data. Such processing was made # by 'data_processor()'. # NOTE: a this point, 'vdict' has the original data from the ASCII file. r, ph, th = d['coords'] Bmod = d['data']; print ' [+] global extremes:', np.nanmin(Bmod), np.nanmax(Bmod) cbmin, cbmax = clim if clim is not [None,None] else (np.nanmin(Bmod),np.nanmax(Bmod)) figsize = kws.get('figsize', None) # [inches] 2-tuple if figsize is None: # Deduce the 'figsize' as a function of: # * the dpi of the monitor # * the desired size in pixels of the figure # Grab the dpi value of this monitor. Source: # * https://stackoverflow.com/questions/13714454/specifying-and-saving-a-figure-with-exact-size-in-pixels#13714720 # * https://stackoverflow.com/questions/3129322/how-do-i-get-monitor-resolution-in-python/45467999#45467999 tk = Tkinter.Tk() dpi_w = tk.winfo_screenwidth()/(tk.winfo_screenmmwidth()/25.4) dpi_h = tk.winfo_screenheight()/(tk.winfo_screenmmheight()/25.4) # size in pixels pixels = kws.get('pixels', [128.,100.]) figsize = (pixels[0]/dpi_w, pixels[1]/dpi_h) # [inches] #--- figure fig_stuff = { 'fig' : figure(1, figsize=figsize), } fig_stuff.update({ 'ax' : fig_stuff['fig'].add_subplot(111, projection='3d'), 'norm' : LogNorm(cbmin,cbmax) if kws.get('cscale','log')=='log' else Normalize(cbmin,cbmax), }) fig = fig_stuff['fig'] ax = fig_stuff['ax'] norm = fig_stuff['norm'] #--- plot for fixed "r" o__fixed_r = PlotCut_fixed_r(fig_stuff, d, ro = kws.get('ro', 5.0), pazim = kws.get('pazim',-60.), verbose = verbose, ) fig, ax = o__fixed_r['FigAx'] r_plot = o__fixed_r['r_plot'] surf_r = o__fixed_r['surf'] #--- plot for fixed "ph" r_range = kws.get('r_range', [1.0,7.0]) pho = kws.get('pho', 10.0) o__fixed_r = PlotCut_fixed_ph(fig_stuff, d, pho = pho, r_range=r_range, pazim = kws.get('pazim',-60.), verbose = verbose, ) fig, ax = o__fixed_r['FigAx'] ph_plot = o__fixed_r['ph_plot'] surf_ph = o__fixed_r['surf'] # uniform axis limits axmin = np.min([getattr(ax,'get_%slim'%dim)() for dim in ('x','y','z')]) axmax = np.max([getattr(ax,'get_%slim'%dim)() for dim in ('x','y','z')]) ax.set_xlim(axmin,axmax) ax.set_ylim(axmin,axmax) ax.set_zlim(axmin,axmax) # perspective azimuth ax.azim = kws.get('pazim', -60.) sm = cm.ScalarMappable(cmap=surf_r.cmap, norm=fig_stuff['norm']) sm.set_array(d['data']); #surf.set_array(var) # labels && title ax.set_xlabel('X [Ro]') ax.set_ylabel('Y [Ro]') ax.set_zlabel('Z [Ro]') TITLE = '$r_o$ = %.2g $R_o$' % r_plot +\ '\n($\phi_o$,r1,r2) : ($%g^o,%g\,Ro,%g\,Ro$)' % (pho,r_range[0],r_range[1]) # extract the step number from the input filename if kws.get('wtimelabel',False): tlabel = fname_inp.split('/')[-1].split('.h5')[0].split('_')[-1].replace('n','') TITLE += '\n step: '+tlabel ax.set_title(TITLE) #--- colorbar cb_label = '|B| [G]' cb_fontsize = 13 axcb = fig.colorbar(sm, ax=ax) axcb.set_label(cb_label, fontsize=cb_fontsize) sm.set_clim(vmin=cbmin, vmax=cbmax) # save figure #show() fig.savefig(fname_fig, dpi=kws.get('dpi',100), bbox_inches='tight') close(fig) del fig return None
10,484
def saveReplayBuffer(): """ Flush and save the contents of the Replay Buffer to disk. This is basically the same as triggering the "Save Replay Buffer" hotkey. Will return an `error` if the Replay Buffer is not active. """ return __createJSON("SaveReplayBuffer", {})
10,485
def view_or_basicauth(view, request, test_func, realm = "", *args, **kwargs): """ This is a helper function used by both 'logged_in_or_basicauth' and 'has_perm_or_basicauth' that does the nitty of determining if they are already logged in or if they have provided proper http-authorization and returning the view if all goes well, otherwise responding with a 401. """ if request.user is None or not request.user.is_authenticated() or not user_has_student(request.user) or ALWAYS_LOGIN: key = 'HTTP_AUTHORIZATION' if key not in request.META: key = 'REDIRECT_HTTP_AUTHORIZATION' if key not in request.META: key = 'HTTP_X_AUTHORIZATION' if key in request.META: auth = request.META[key].split() if len(auth) == 2: if auth[0].lower() == "basic": # Basic authentication - this is not an API client uname, passwd = base64.b64decode(auth[1]).split(':') user = authenticate(username=uname, password=passwd) permissions = APIClient.universal_permission_flag() elif auth[0].lower() == "bearer": # The client bears a FireRoad-issued token user, permissions, error = extract_token_info(request, auth[1]) if error is not None: return HttpResponse(json.dumps(error), status=401, content_type="application/json") user.backend = 'django.contrib.auth.backends.ModelBackend' else: raise PermissionDenied request.session['permissions'] = permissions if user is not None: if user.is_active: login(request, user) request.user = user return view(request, *args, **kwargs) raise PermissionDenied #return redirect('login') else: if 'permissions' not in request.session: print("Setting universal permission flag - this should only occur in dev or from FireRoad-internal login.") request.session['permissions'] = APIClient.universal_permission_flag() return view(request, *args, **kwargs)
10,486
def evaluate_srl_1step(find_preds_automatically=False, gold_file=None): """ Evaluates the network on the SRL task performed with one step for id + class. """ md = Metadata.load_from_file('srl') nn = taggers.load_network(md) r = taggers.create_reader(md, gold_file=gold_file) itd = r.get_inverse_tag_dictionary() if find_preds_automatically: tagger = taggers.SRLTagger() else: iter_predicates = iter(r.predicates) for sent in iter(r.sentences): # the other elements in the list are the tags for each proposition actual_sent = sent[0] if find_preds_automatically: pred_positions = tagger.find_predicates(sent) else: pred_positions = iter_predicates.next() verbs = [(position, actual_sent[position].word) for position in pred_positions] sent_codified = np.array([r.converter.convert(token) for token in actual_sent]) answers = nn.tag_sentence(sent_codified, pred_positions) tags = [convert_iob_to_iobes([itd[x] for x in pred_answer]) for pred_answer in answers] print(prop_conll(verbs, tags, len(actual_sent)))
10,487
def ring_bells(): """Rings the school bells in a pattern for the given schedule/time.""" # Need to get the pattern for this time slot and apply it. curTime = time.strftime("%H:%M") if curTime not in jsonConfig["schedules"][curSchedule]: logging.error("Couldn't find time record for time " + curTime + " in schedule " + curSchedule) return # Obtain the pattern to use. pattern = jsonConfig["schedules"][curSchedule][curTime] if pattern not in jsonConfig["patterns"]: logging.error("Could not find pattern '" + pattern + "'.") return # Play the pattern. logging.debug("Playing bell: " + pattern) bellRings = jsonConfig["patterns"][pattern]["rings"] bellDuration = jsonConfig["patterns"][pattern]["duration"] bellSpacing = jsonConfig["patterns"][pattern]["spacing"] for _ in range(bellRings): power_bells(True) time.sleep(bellDuration) power_bells(False) time.sleep(bellSpacing)
10,488
def _subtract_background_one_line(data_line, e_off, e_lin, e_quad, width): """ Subtract background from spectra in a single line of the image Parameters ---------- data_line : ndarray spectra for one line of an image, size NxM, N-the number of pixels in the line, M - the number of points in one spectrum (typically 4096) e_off : float offset - coefficient for polynomial approximation of energy axis e_lin : float linear coefficient of polynomial approximation of energy axis e_quad : float quadratic coefficient of polynomial approximation of energy axis background_width : float parameter of snip algorithm for background estimation Returns ------- ndarray of the same shape as data_line. Contains spectra with subtracted background. """ data_line = np.copy(data_line) xx, _ = data_line.shape for n in range(xx): bg = snip_method(data_line[n, :], e_off=e_off, e_lin=e_lin, e_quad=e_quad, width=width) data_line[n, :] -= bg return data_line
10,489
def extract_module(fo, modname): """Extract a single modules test plan documents.""" mod = __import__(modname) mod_doc(fo, mod)
10,490
def clean_principals_output(sql_result, username, shell=False): """ Transform sql principals into readable one """ if not sql_result: if shell: return username return [username] if shell: return sql_result return sql_result.split(',')
10,491
def update_globals(column_dict: Dict[str, Tuple[Hashable, Hashable]]) -> None: """Update the column names stored in the global variable ``_GLOBVAR``. Parameters ---------- column_dict: :class:`dict` [:class:`str`, :class:`tuple` [:class:`Hashable`, :class:`Hashable`]] A dictionary which maps column names, present in ``_GLOBVAR``, to new values. Tuples, consisting of two hashables, are expected as values (*e.g.* ``("info", "new_name")``). The following keys (and default values) are available in ``_GLOBVAR``: ===================== ============================== Key Value ===================== ============================== ``"ACTIVE"`` ``("info", "active")`` ``"NAME"`` ``("info", "name")`` ``"PI"`` ``("info", "PI")`` ``"PROJECT"`` ``("info", "project")`` ``"SBU_REQUESTED"`` ``("info", "SBU requested")`` ``"TMP"`` ``("info", "tmp")`` ===================== ============================== Raises ------ TypeError Raised if a value in **column_dict** does not consist of a tuple of hashables. ValueError Raised if the length of a value in **column_dict** is not equal to ``2``. """ for k, v in column_dict.items(): name = v.__class__.__name__ if not isinstance(v, tuple): raise TypeError(f"Invalid type: '{name}'. " "A 'tuple' consisting of two hashables was expected.") elif len(v) != 2: raise ValueError(f"Invalid tuple length: '{len(v):d}'. '2' hashables were expected.") elif not isinstance(v[0], Hashable) or not isinstance(v[1], Hashable): raise TypeError(f"Invalid type: '{name}'. A hashable was expected.") for k, v in column_dict.items(): _GLOBVAR[k] = v _populate_globals()
10,492
def table(ctx): """CRM configuration for ACL table resource""" ctx.obj["crm"].res_type = 'acl_table'
10,493
def test_versions(gen3_index): """ Test creation of a record and a new version of it index.py functions tested: create_record create_new_version get_versions get_latest_version """ # put a new record in the index newrec = gen3_index.create_record( acl=["prog1", "proj1"], hashes={"md5": "437283456782738abcfe387492837483"}, size=0, version="1", ) # update the record newversion = gen3_index.create_new_version( newrec["did"], acl=["prog1", "proj1"], hashes={"md5": "437283456782738abcfe387492837483"}, size=1, version="2", ) newrec = get_rec(gen3_index, newrec["did"]) newversion = get_rec(gen3_index, newversion["did"]) assert newrec["did"] != newversion["did"] assert newrec["baseid"] == newversion["baseid"] # These functions do not recognize the records for some reason! versions = gen3_index.get_versions(newversion["did"]) latest_version = gen3_index.get_latest_version(newrec["did"], "false") assert versions[0]["did"] == newrec["did"] assert versions[1]["did"] == newversion["did"] assert latest_version["did"] == newversion["did"] assert latest_version["version"] == "2" drec = gen3_index.delete_record(newrec["did"]) assert drec._deleted drec = gen3_index.delete_record(newversion["did"]) assert drec._deleted
10,494
def matchesType(value, expected): """ Returns boolean for whether the given value matches the given type. Supports all basic JSON supported value types: primitive, integer/int, float, number/num, string/str, boolean/bool, dict/map, array/list, ... """ result = type(value) expected = expected.lower() if result is int: return expected in ("integer", "number", "int", "num", "primitive") elif result is float: return expected in ("float", "number", "num", "primitive") elif result is str: return expected in ("string", "str", "primitive") elif result is bool: return expected in ("boolean", "bool", "primitive") elif result is dict: return expected in ("dict", "map") elif result is list: return expected in ("array", "list") return False
10,495
def __find_surplus_locks_and_remove_them(datasetfiles, locks, replicas, source_replicas, rseselector, rule, source_rses, session=None, logger=logging.log): """ Find surplocks locks for a rule and delete them. :param datasetfiles: Dict holding all datasets and files. :param locks: Dict holding locks. :param replicas: Dict holding replicas. :param source_replicas: Dict holding all source replicas. :param rseselector: The RSESelector to be used. :param rule: The rule. :param source_rses: RSE ids for eglible source RSEs. :param session: Session of the db. :param logger: Optional decorated logger that can be passed from the calling daemons or servers. :raises: InsufficientAccountLimit, IntegrityError, InsufficientTargetRSEs :attention: This method modifies the contents of the locks and replicas input parameters. """ logger(logging.DEBUG, "Finding surplus locks for rule %s [%d/%d/%d]", str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt) account_counter_decreases = {} # {'rse_id': [file_size, file_size, file_size]} # Put all the files in one dictionary files = {} for ds in datasetfiles: for file in ds['files']: files[(file['scope'], file['name'])] = True for key in locks: if key not in files: # The lock needs to be removed for lock in deepcopy(locks[key]): if lock.rule_id == rule.id: __delete_lock_and_update_replica(lock=lock, purge_replicas=rule.purge_replicas, nowait=True, session=session) if lock.rse_id not in account_counter_decreases: account_counter_decreases[lock.rse_id] = [] account_counter_decreases[lock.rse_id].append(lock.bytes) if lock.state == LockState.OK: rule.locks_ok_cnt -= 1 elif lock.state == LockState.REPLICATING: rule.locks_replicating_cnt -= 1 elif lock.state == LockState.STUCK: rule.locks_stuck_cnt -= 1 locks[key].remove(lock) logger(logging.DEBUG, "Finished finding surplus locks for rule %s [%d/%d/%d]", str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)
10,496
def csv_logging(record): """generate output in csv format""" csv_record = ('{ts},{si},{di},{sp},{dp},{t},"{p}",{h},{v},"{ha}",' '"{k}","{e}","{m}","{c}"') if 'hassh' in record: hasshType = 'client' kexAlgs = record['ckex'] encAlgs = record['ceacts'] macAlgs = record['cmacts'] cmpAlgs = record['ccacts'] hassh = record['hassh'] hasshAlgorithms = record['hasshAlgorithms'] identificationString = record['client'] elif 'hasshServer' in record: hasshType = 'server' kexAlgs = record['skex'] encAlgs = record['seastc'] macAlgs = record['smastc'] cmpAlgs = record['scastc'] hassh = record['hasshServer'] hasshAlgorithms = record['hasshServerAlgorithms'] identificationString = record['server'] csv_record = csv_record.format( ts=record['timestamp'], si=record['sourceIp'], di=record['destinationIp'], sp=record['sourcePort'], dp=record['destinationPort'], t=hasshType, p=identificationString, h=hassh, v=HASSH_VERSION, ha=hasshAlgorithms, k=kexAlgs, e=encAlgs, m=macAlgs, c=cmpAlgs) return csv_record
10,497
def harvest_outfile_pass(outtext): """Function to read NWChem output file *outtext* and parse important quantum chemical information from it in """ psivar = PreservingDict() psivar_coord = None psivar_grad = None version = "" module = None error = "" # TODO (wardlt): The error string is never used. NUMBER = r"(?x:" + regex.NUMBER + ")" # Process version mobj = re.search( r"^\s+" + r"Northwest Computational Chemistry Package (NWChem)" + r"\s+" + r"(?:<version>\d+.\d+)" + r"\s*$", outtext, re.MULTILINE, ) if mobj: logger.debug("matched version") version = mobj.group("version") # Process SCF # 1)Fail to converge mobj = re.search(r"^\s+" + r"(?:Calculation failed to converge)" + r"\s*$", outtext, re.MULTILINE) if mobj: logger.debug("failed to converge") # 2)Calculation converged else: mobj = re.search(r"^\s+" + r"(?:Total SCF energy)" + r"\s+=\s*" + NUMBER + r"s*$", outtext, re.MULTILINE) if mobj: logger.debug("matched HF") psivar["HF TOTAL ENERGY"] = mobj.group(1) # Process Effective nuclear repulsion energy (a.u.) mobj = re.search( r"^\s+" + r"Effective nuclear repulsion energy \(a\.u\.\)" + r"\s+" + NUMBER + r"\s*$", outtext, re.MULTILINE, ) if mobj: logger.debug("matched NRE") # logger.debug (mobj.group(1)) psivar["NUCLEAR REPULSION ENERGY"] = mobj.group(1) # Process DFT dispersion energy (a.u.) mobj = re.search(r"^\s+" + r"(?:Dispersion correction)" + r"\s+=\s*" + NUMBER + r"\s*$", outtext, re.MULTILINE) if mobj: logger.debug("matched Dispersion") logger.debug(mobj.group(1)) psivar["DISPERSION CORRECTION ENERGY"] = mobj.group(1) # Process DFT (RDFT, RODFT,UDFT, SODFT [SODFT for nwchem versions before nwchem 6.8]) mobj = re.search(r"^\s+" + r"(?:Total DFT energy)" + r"\s+=\s*" + NUMBER + r"\s*$", outtext, re.MULTILINE) if mobj: logger.debug("matched DFT") logger.debug(mobj.group(1)) psivar["DFT TOTAL ENERGY"] = mobj.group(1) # SODFT [for nwchem 6.8+] mobj = re.search( # fmt: off r'^\s+' + r'Total SO-DFT energy' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Nuclear repulsion energy' + r'\s+' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE, ) if mobj: logger.debug("matched DFT") # print (mobj.group(1)) psivar["DFT TOTAL ENERGY"] = mobj.group(1) psivar["NUCLEAR REPULSION ENERGY"] = mobj.group(2) # MCSCF mobj = re.search( # fmt: off r'^\s+' + r'Total MCSCF energy' + r'\s+=\s+' + NUMBER + r'\s*$', # fmt: off outtext, re.MULTILINE | re.DOTALL, ) if mobj: logger.debug("matched mcscf 2") # MCSCF energy calculation psivar["MCSCF TOTAL ENERGY"] = mobj.group(1) mobj = re.findall( # fmt: off r'^\s+' + r'Total SCF energy' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'One-electron energy' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Two-electron energy' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Total MCSCF energy' + r'\s+' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE | re.DOTALL, ) # for mobj_list in mobj: if mobj: # Need to change to accommodate find all instances logger.debug("matched mcscf") # MCSCF energy calculation psivar["HF TOTAL ENERGY"] = mobj.group(1) psivar["ONE-ELECTRON ENERGY"] = mobj.group(2) psivar["TWO-ELECTRON ENERGY"] = mobj.group(3) psivar["MCSCF TOTAL ENERGY"] = mobj.group(4) # for mobj_list in mobj: # for i in mobj_list: # count += 0 # logger.debug('matched mcscf iteration %i', count) # psivar['HF TOTAL ENERGY'] = mobj.group(1) # psivar['ONE-ELECTRON ENERGY'] = mobj.group(2) # psivar['TWO-ELECTRON ENERGY'] = mobj.group(3) # psivar['MCSCF TOTAL ENERGY'] = mobj.group(4) # Process MP2 (Restricted, Unrestricted(RO n/a)) # 1)SCF-MP2 mobj = re.search( # fmt: off r'^\s+' + r'SCF energy' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Correlation energy' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Singlet pairs' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Triplet pairs' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Total MP2 energy' + r'\s+' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE, ) # MP2 if mobj: logger.debug("matched scf-mp2") module = "mp2grad" psivar["HF TOTAL ENERGY"] = mobj.group(1) psivar["MP2 CORRELATION ENERGY"] = mobj.group(2) psivar["MP2 TOTAL ENERGY"] = mobj.group(5) # SCS-MP2 mobj = re.search( # fmt: off r'^\s+' + r'Same spin pairs' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Same spin scaling factor' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Opposite spin pairs' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Opposite spin scaling fact.' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'SCS-MP2 correlation energy' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Total SCS-MP2 energy' + r'\s+' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE, ) if mobj: logger.debug("matched scs-mp2", mobj.groups()) psivar["MP2 SAME-SPIN CORRELATION ENERGY"] = mobj.group(1) psivar["MP2 OPPOSITE-SPIN CORRELATION ENERGY"] = mobj.group(3) logger.debug(mobj.group(1)) # ess logger.debug(mobj.group(2)) # fss logger.debug(mobj.group(3)) # eos logger.debug(mobj.group(4)) # fos logger.debug(mobj.group(5)) # scs corl logger.debug(mobj.group(6)) # scs-mp2 # 2) DFT-MP2 mobj = re.search( # fmt: off r'^\s+' + r'DFT energy' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Unscaled MP2 energy' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Total DFT+MP2 energy' + r'\s+' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE, ) if mobj: logger.debug("matched dft-mp2") psivar["DFT TOTAL ENERGY"] = mobj.group(1) psivar["MP2 CORRELATION ENERGY"] = mobj.group(2) psivar["MP2 TOTAL ENERGY"] = mobj.group(3) # 3) MP2 with CCSD or CCSD(T) calculation (through CCSD(T) directive) mobj = re.search( # fmt: off r'^\s+' + r'MP2 Energy \(coupled cluster initial guess\)' + r'\s*' + r'^\s+' + r'------------------------------------------' + r'\s*' + r'^\s+' + r'Reference energy:' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'MP2 Corr\. energy:' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Total MP2 energy:' + r'\s+' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE, ) if mobj: logger.debug("matched coupled cluster-mp2") psivar["MP2 CORRELATION ENERGY"] = mobj.group(2) psivar["MP2 TOTAL ENERGY"] = mobj.group(3) mobj3 = re.search(r"Final RHF results", outtext) if mobj3: psivar["MP2 DOUBLES ENERGY"] = mobj.group(2) # 4) Direct MP2 mobj = re.search( # fmt: off r'^\s+' + r'SCF energy' + r'\s+' + r"(?P<hf>" + NUMBER + r")" + r'\s*' + r'^\s+' + r'Correlation energy' + r'\s+' + r"(?P<mp2corl>" + NUMBER + r")" + r'\s*' + r'^\s+' + r'Total MP2 energy' + r'\s+' + r"(?P<mp2>" + NUMBER + r")" + r'\s*$', # fmt: on outtext, re.MULTILINE, ) mobj2 = re.search(r"Direct MP2", outtext) if mobj and mobj2: logger.debug("matched direct-mp2") module = "directmp2" psivar["HF TOTAL ENERGY"] = mobj.group("hf") psivar["MP2 CORRELATION ENERGY"] = mobj.group("mp2corl") psivar["MP2 TOTAL ENERGY"] = mobj.group("mp2") # direct-mp2 is RHF only psivar["MP2 DOUBLES ENERGY"] = mobj.group("mp2corl") # 5) RI-MP2 # Process calculation through tce [dertype] command tce_cumm_corl = 0.0 for cc_name in [r"MBPT\(2\)", r"MBPT\(3\)", r"MBPT\(4\)"]: mobj = re.search( # fmt: off r'^\s+' + cc_name + r'\s+' + r'correlation energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*' + r'^\s+' + cc_name + r'\s+' + r'total energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE, ) mobj3 = re.search(r"Wavefunction type : Restricted open-shell Hartree-Fock", outtext, re.MULTILINE) if mobj: mbpt_plain = cc_name.replace("\\", "").replace("MBPT", "MP").replace("(", "").replace(")", "") logger.debug(f"matched tce mbpt {mbpt_plain}", mobj.groups()) tce_cumm_corl += float(mobj.group(1)) if mbpt_plain == "MP2": mobj3 = re.search(r"Wavefunction type : Restricted open-shell Hartree-Fock", outtext, re.MULTILINE) if mobj3: psivar[f"{mbpt_plain} DOUBLES ENERGY"] = mobj.group(1) psivar[f"CURRENT CORRELATION ENERGY"] = mobj.group(1) psivar[f"CURRENT ENERGY"] = Decimal(mobj.group(1)) + psivar[f"HF TOTAL ENERGY"] else: psivar[f"{mbpt_plain} DOUBLES ENERGY"] = mobj.group(1) psivar[f"{mbpt_plain} CORRELATION ENERGY"] = mobj.group(1) else: psivar[f"{mbpt_plain} CORRECTION ENERGY"] = mobj.group(1) if not mobj3 and mbpt_plain not in ["MP4"]: psivar[f"{mbpt_plain} DOUBLES ENERGY"] = tce_cumm_corl psivar[f"{mbpt_plain} TOTAL ENERGY"] = mobj.group(2) module = "tce" # TCE dipole- MBPT(n) mobj2 = re.search( # fmt: off r'^\s+' + r'dipole moments / hartree & Debye' + r'\s*' + r'^\s+' + r'X' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Y' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Z' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Total' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE, ) if mobj2: mbpt_plain = cc_name.replace("\\", "").replace("MBPT", "MP").replace("(", "").replace(")", "") logger.debug(f"matched tce {mbpt_plain} dipole moment") # only pulling Debye psivar[f"{mbpt_plain} DIPOLE"] = np.array([mobj2.group(1), mobj2.group(3), mobj2.group(5)]) # TCE with () or [] for cc_name in [ r"CCSD\(T\)", r"CCSD\[T\]", r"CCSD\(2\)_T", r"CCSD\(2\)", r"CCSDT\(2\)_Q", r"CR-CCSD\[T\]", r"CR-CCSD\(T\)", r"LR-CCSD\(T\)", r"LR-CCSD\(TQ\)-1", r"CREOMSD\(T\)", ]: mobj = re.search( # fmt: off r'^\s+' + cc_name + r'\s+' + r'correction energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*' + r'^\s+' + cc_name + r'\s+' + r'correlation energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*' + r'^\s+' + cc_name + r'\s+' + r'total energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE, ) if mobj: cc_plain = cc_name.replace("\\", "") cc_corr = cc_plain.replace("CCSD", "") logger.debug(f"matched tce cc {cc_plain}") if cc_plain == "CCSD[T]": psivar[f"CCSD+T(CCSD) CORRELATION ENERGY"] = mobj.group(2) psivar[f"CCSD+T(CCSD) TOTAL ENERGY"] = mobj.group(3) else: # psivar[f"{cc_corr} CORRECTION ENERGY"] = mobj.group(1) psivar[f"{cc_plain} CORRELATION ENERGY"] = mobj.group(2) psivar[f"{cc_plain} TOTAL ENERGY"] = mobj.group(3) module = "tce" # TCE dipole with () or [] mobj2 = re.search( # fmt: off r'^\s+' + cc_name + r'dipole moments / hartree & Debye' + r'\s*' + r'^\s+' + r'X' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Y' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Z' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Total' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE, ) if mobj2: cc_plain = cc_name.replace("\\", "") cc_corr = cc_plain.replace("CCSD", "") logger.debug(f"matched tce {cc_plain} dipole moment") # only pulling Debye psivar[f"{cc_plain} DIPOLE"] = np.array([mobj2.group(1), mobj2.group(3), mobj2.group(5)]) # Process other TCE cases for cc_name in [ r"CISD", r"QCISD", r"CISDT", r"CISDTQ", r"CCD", r"CC2", r"CCSD", r"CCSDT", r"CCSDTQ", r"LCCSD", r"LCCD", r"CCSDTA", ]: mobj = re.search( # fmt: off r'^\s+' + r'Iterations converged' + r'\s*' + r'^\s+' + cc_name + r'\s+' + r'correlation energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*' + r'^\s+' + cc_name + r'\s+' + r'total energy / hartree' + r'\s+=\s*' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE, ) if mobj: mobj3 = re.search(r"Wavefunction type : Restricted open-shell Hartree-Fock", outtext, re.MULTILINE) logger.debug(f"matched {cc_name}", mobj.groups()) if mobj3: pass else: psivar[f"{cc_name} DOUBLES ENERGY"] = mobj.group(1) psivar[f"{cc_name} CORRELATION ENERGY"] = mobj.group(1) psivar[f"{cc_name} TOTAL ENERGY"] = mobj.group(2) module = "tce" # TCE dipole mobj2 = re.search( # fmt: off r'^\s+' + r'dipole moments / hartree & Debye' + r'\s*' + r'^\s+' + r'X' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Y' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Z' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Total' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE, ) if mobj2: logger.debug(f"matched tce dipole moment") # only pulling Debye psivar[f"CURRENT DIPOLE"] = np.array([mobj2.group(1), mobj2.group(3), mobj2.group(5)]) # Process CCSD/CCSD(T) using nwchem CCSD/CCSD(T) [dertype] command mobj = re.search( # fmt: off r'^\s+' + r'-----------' + r'\s*' + r'^\s+' + r'CCSD Energy' + r'\s*' + r'^\s+' + r'-----------' + r'\s*' + r'^\s+' + r'Reference energy:' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'CCSD corr\. energy:' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Total CCSD energy:' + r'\s+' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE | re.DOTALL, ) if mobj: logger.debug("matched ccsd") psivar["CCSD CORRELATION ENERGY"] = mobj.group(2) psivar["CCSD TOTAL ENERGY"] = mobj.group(3) module = "cc" mobj = re.search( # fmt: off r'^\s+' + r'T\(CCSD\) corr\. energy:' + r'\s+' + r"(?P<tccsdcorr>" + NUMBER + r")" + r'\s*' + r'^\s+' + r'Total CCSD\+T\(CCSD\) energy:' + r'\s+' + r"(?P<tccsdtot>" + NUMBER + r")" + r'\s*$', # fmt: on outtext, re.MULTILINE | re.DOTALL, ) if mobj: logger.debug("matched ccsd+t(ccsd)") psivar["T(CCSD) CORRECTION ENERGY"] = mobj.group("tccsdcorr") psivar["CCSD+T(CCSD) CORRELATION ENERGY"] = Decimal(mobj.group("tccsdtot")) - psivar["HF TOTAL ENERGY"] psivar["CCSD+T(CCSD) TOTAL ENERGY"] = mobj.group("tccsdtot") module = "cc" mobj = re.search( # fmt: off r'^\s+' + r'--------------' + r'\s*' + r'^\s+' + r'CCSD\(T\) Energy' + r'\s*' + r'^\s+' + r'--------------' + r'\s*' + r'(?:.*?)' + r'^\s+' + r'\(T\) corr\. energy:' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Total CCSD\(T\) energy:' + r'\s+' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE | re.DOTALL, ) if mobj: logger.debug("matched ccsd(t)") psivar["(T) CORRECTION ENERGY"] = mobj.group(1) psivar["CCSD(T) CORRELATION ENERGY"] = Decimal(mobj.group(2)) - psivar["HF TOTAL ENERGY"] psivar["CCSD(T) TOTAL ENERGY"] = mobj.group(2) module = "cc" mobj = re.search( # fmt: off r'^\s+' + r'Spin Component Scaled \(SCS\) CCSD' + r'\s*' + r'^\s+' + r'-*' + r'\s*' + r'^\s+' + r'Same spin contribution:' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Same spin scaling factor:' + r'\s+' + NUMBER + r'\s*' r'^\s+' + r'Opposite spin contribution:' + r'\s+' + NUMBER + r'\s*' + #r'^\s+' + r'Opposite spin scaling factor' + r'\s+' + NUMBER + r'\s*' r'^\s+' + r'Opposite spin scaling fact.:' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'SCS-CCSD correlation energy:' + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'Total SCS-CCSD energy:' + r'\s+' + NUMBER + r'\s*$', # fmt: on outtext, re.MULTILINE | re.DOTALL, ) # SCS-CCSD included if mobj: logger.debug("matched scs-ccsd", mobj.groups()) psivar["CCSD SAME-SPIN CORRELATION ENERGY"] = mobj.group(1) psivar["CCSD OPPOSITE-SPIN CORRELATION ENERGY"] = mobj.group(3) # psivar['CCSD SAME-SPIN CORRELATION ENERGY'] = psivar['SCS-CCSD SAME-SPIN CORRELATION ENERGY'] = ( # Decimal(mobj.group(1)) * Decimal(mobj.group(2))) # psivar['CCSD OPPOSITE-SPIN CORRELATION ENERGY'] = psivar['SCS-CCSD OPPOSITE-SPIN CORRELATION ENERGY'] = ( # Decimal(mobj.group(4)) * Decimal(mobj.group(3))) # psivar['SCS-CCSD CORRELATION ENERGY'] = mobj.group(5) # psivar['SCS-CCSD TOTAL ENERGY'] = mobj.group(6) # psivar['CUSTOM SCS-CCSD CORRELATION ENERGY'] = 0.5 * (float( # psivar['CCSD SAME-SPIN CORRELATION ENERGY']) + float(psivar['CCSD OPPOSITE-SPIN CORRELATION ENERGY'])) # psivar['CUSTOM SCS-CCSD TOTAL ENERGY'] = float(mobj.group(6)) + float( # psivar['CUSTOM SCS-CCSD CORRERLATION ENERGY']) # Process EOM-[cc_name] #nwchem_tce_dipole = false # Parsed information: each symmetry, root excitation energy in eV and total energy in hartree # psivar name might need to be fixed # each root excitation energy is extracted from the last iteration of right hand side mobj = re.findall( # fmt: off r'^\s+(?:Excited-state calculation \( )(.*)\s+(?:symmetry\))\s+(?:.*\n)*^\s+EOM-' + cc_name + # (..) captures symmetry r'right-hand side iterations\s+(?:.*\n)*(?:Excited state root)\s+' + NUMBER + #root r'\s*(?:Excitation energy / hartree)\s+.\s+' + NUMBER + #excitation energy hartree r'\s*(?:/ eV)\s+.\s+' + NUMBER + r'\s*$', # excitation energy eV # fmt: on outtext, re.MULTILINE | re.DOTALL, ) # regex should be more dynamic in finding values, need to revisit # mobj.group(0) = symmetry value # mobj.group(1) = cc_name # mobj.group(2) = root number # mobj.group(3) = excitation energy (hartree) # mobj.group(4) = excitation energy (eV) if mobj: logger.debug(mobj) ext_energy = {} # dic ext_energy_list = [] logger.debug(f"matched eom-{cc_name}") for mobj_list in mobj: logger.debug("matched EOM-%s - %s symmetry" % (cc_name, mobj_list[0])) # cc_name, symmetry logger.debug(mobj_list) count = 0 for line in mobj_list[1].splitlines(): lline = line.split() logger.debug(lline[1]) # in hartree logger.debug(lline[2]) # in eV count += 1 logger.debug("matched excitation energy #%d - %s symmetry" % (count, mobj_list[0])) ext_energy_list.append(lline[1]) # Collect all excitation energies sym = str(mobj_list[0]) ext_energy.setdefault(sym, []) ext_energy[sym].append(lline[1]) # Dictionary: symmetries(key), energies(value) ext_energy_list.sort(key=float) for nroot in range(len(ext_energy_list)): for k, e_val in ext_energy.items(): if ext_energy_list[nroot] in e_val: symm = k # in hartree psivar[ f"EOM-{cc_name} ROOT 0 -> ROOT {nroot + 1} EXCITATION ENERGY - {symm} SYMMETRY" ] = ext_energy_list[nroot] psivar[f"EOM-{cc_name} ROOT 0 -> ROOT {nroot + 1} TOTAL ENERGY - {symm} SYMMETRY"] = psivar[ f"{cc_name} TOTAL ENERGY" ] + Decimal(ext_energy_list[nroot]) gssym = "" gs = re.search(r"^\s+" + r"Ground-state symmetry is" + gssym + r"\s*$", outtext, re.MULTILINE) if gs: logger.debug("matched ground-state symmetry") psivar["GROUND-STATE SYMMETRY"] = gssym.group(1) # Process TDDFT # 1) Spin allowed mobj = re.findall( # fmt: off r'^\s+(?:Root)\s+(\d+)\s+(.*?)\s+' + NUMBER + r'\s+(?:a\.u\.)\s+' + NUMBER + r"\s+eV\s*" + r"^\s+" + r"<S2>\s+=\s+" + NUMBER + r"\s*" #Root | symmetry | a.u. | eV # unkn units for dip/quad + r'\s+(?:.*\n)\s+Transition Moments\s+X\s+'+ NUMBER + r'\s+Y\s+'+ NUMBER+ r'\s+Z\s+'+ NUMBER #dipole + r'\s+Transition Moments\s+XX\s+'+ NUMBER + r'\s+XY\s+'+ NUMBER+ r'\s+XZ\s+'+ NUMBER #quadrople + r'\s+Transition Moments\s+YY\s+'+ NUMBER + r'\s+YZ\s+'+ NUMBER+ r'\s+ZZ\s+'+ NUMBER #quadrople + r"\s+" + r"Dipole Oscillator Strength" + r"\s+" + NUMBER + r"\s*$", # fmt: on outtext, re.MULTILINE, ) if mobj: logger.debug("matched TDDFT with transition moments") for mobj_list in mobj: logger.debug(mobj_list) iroot = mobj_list[0] sym = mobj_list[1] # in eV psivar[f"TDDFT ROOT {iroot} EXCITATION ENERGY - {sym} SYMMETRY"] = mobj_list[2] psivar[f"TDDFT ROOT {iroot} EXCITED STATE ENERGY - {sym} SYMMETRY"] = psivar[ "DFT TOTAL ENERGY" ] + Decimal(mobj_list[2]) psivar[f"TDDFT ROOT 0 -> ROOT {iroot} DIPOLE"] = [ float(mobj_list[5]), float(mobj_list[6]), float(mobj_list[7]), ] psivar[f"TDDFT ROOT 0 -> ROOT {iroot} QUADRUPOLE"] = [ float(mobj_list[8]), float(mobj_list[9]), float(mobj_list[10]), float(mobj_list[9]), float(mobj_list[11]), float(mobj_list[12]), float(mobj_list[10]), float(mobj_list[12]), float(mobj_list[13]), ] psivar[f"TDDFT ROOT 0 -> ROOT {iroot} OSCILLATOR STRENGTH (LEN)"] = mobj_list[14] # 2) Spin forbidden mobj = re.findall( # fmt: off r'^\s+(?:Root)\s+(\d+)\s+(.*?)\s+' + NUMBER + r'\s(?:a\.u\.)\s+' + NUMBER + r'\s+(?:\w+)' # Root | symmetry | a.u. | eV + r'\s+(?:.\w+.\s+.\s+\d+.\d+)' # s2 value + r'\s+Transition Moments\s+(?:Spin forbidden)' + r'\s*$', # fmt: on outtext, re.MULTILINE, ) # mobj.group(0) = Root # mobj.group(1) = symmetry # mobj.group(2) a.u. # mobj.group(3) e.V # mobj.group(4) Excitation energy # mobj.group(5) Excited state energy if mobj: logger.debug("matched TDDFT - spin forbidden") for mobj_list in mobj: #### temporary psivars #### # in eV psivar[f"TDDFT ROOT {mobj_list[0]} EXCITATION ENERGY - {mobj_list[2]} SYMMETRY"] = mobj_list[4] psivar[f"TDDFT ROOT {mobj_list[0]} EXCITED STATE ENERGY - {mobj_list[2]} SYMMETRY"] = psivar[ "DFT TOTAL ENERGY" ] + qcel.constants.converstion_factor("eV", "hartree") * Decimal(mobj_list[4]) # psivar['TDDFT ROOT %s %s %s EXCITATION ENERGY' % # (mobj_list[0], mobj_list[1], mobj_list[2])] = mobj_list[3] # in a.u. # psivar['TDDFT ROOT %s %s %s EXCITED STATE ENERGY' %(mobj_list[0], mobj_list[1], mobj_list[2])] = \ # psivar['DFT TOTAL ENERGY'] + Decimal(mobj_list[3]) if mobj: logger.debug("Non-variation initial energy") # prints out energy, 5 counts # Process geometry # 1) CHARGE # Read charge from SCF module mobj = re.search( r"^\s+" + r"charge =" + r"\s+" + NUMBER + r"\s*$", outtext, re.MULTILINE | re.IGNORECASE ) if mobj: logger.debug("matched charge") out_charge = int(float(mobj.group(1))) # Read charge from General information (not scf module) mobj = re.search( r"^\s+" + r"Charge :" + r"\s+" + r"(-?\d+)" + r"\s*$", outtext, re.MULTILINE | re.IGNORECASE ) if mobj: logger.debug("matched charge") out_charge = int(float(mobj.group(1))) # 2) MULTIPLICITY # Read multiplicity from SCF module mobj = re.search( r"^\s+" + r"open shells =" + r"\s+" + r"(\d+)" + r"\s*$", outtext, re.MULTILINE | re.IGNORECASE ) calcinfo = False if mobj: logger.debug("matched multiplicity") out_mult = int(mobj.group(1)) + 1 # Read multiplicity from SCF module through alpha, beta electrons mobj = re.search( # fmt: off r'^\s+' + r'alpha electrons =' + r'\s+' + r'(\d+)' + r'\s*' + r'^\s+' + r'beta electrons =' + r'\s+' + r'(\d+)' + r'\s*$', # fmt: on outtext, re.MULTILINE | re.IGNORECASE, ) if mobj: logger.debug("matched multiplicity via alpha and beta electrons 0") out_mult = int(mobj.group(1)) - int(mobj.group(2)) + 1 # nopen + 1 psivar["N ALPHA ELECTRONS"] = mobj.group(1) psivar["N BETA ELECTRONS"] = mobj.group(2) mobj = re.search( # fmt: off r"^\s+" + r"Basis functions" + r"\s+=\s+" + r"(?P<nbf>\d+)" + r"\s*" + r"^\s+" + r"Molecular orbitals" + r"\s+=\s+" + r"(?P<nmo>\d+)" + r"\s*" + r"^\s+" + r"Frozen core" + r"\s+=\s+" + r"(?P<nfc>\d+)" + r"\s*" + r"^\s+" + r"Frozen virtuals" + r"\s+=\s+" + r"(?P<nfv>\d+)" + r"\s*" + r"^\s+" + r"Active alpha occupied" + r"\s+=\s+" + r"(?P<nao>\d+)" + r"\s*" + r"^\s+" + r"Active beta occupied" + r"\s+=\s+" + r"(?P<nbo>\d+)" + r"\s*" + r"^\s+" + r"Active alpha virtual" + r"\s+=\s+" + r"(?P<nav>\d+)" + r"\s*" + r"^\s+" + r"Active beta virtual" + r"\s+=\s+" + r"(?P<nbv>\d+)" + r"\s*", # fmt: on outtext, re.MULTILINE | re.IGNORECASE, ) if mobj: logger.debug("matched alpha and beta electrons 1", mobj.groups()) calcinfo = True psivar["N BASIS FUNCTIONS"] = mobj.group("nbf") psivar["N MOLECULAR ORBITALS"] = mobj.group("nmo") psivar["N ALPHA ELECTRONS"] = int(mobj.group("nao")) + int(mobj.group("nfc")) psivar["N BETA ELECTRONS"] = int(mobj.group("nbo")) + int(mobj.group("nfc")) mobj = re.search( # fmt: off r"^\s+" + "No. of electrons" + r"\s+:\s+" + r"(?P<ne>\d+)" + r"\s*" + r"^\s+" + "Alpha electrons" + r"\s+:\s+" + r"(?P<nae>\d+)" + r"\s*" + r"^\s+" + "Beta electrons" + r"\s+:\s+" + r"(?P<nbe>\d+)" + r"\s*" + r"^\s+" + "No. of orbitals" + r"\s+:\s+" + r"(?P<no>\d+)" + r"\s*" + r"^\s+" + "Alpha orbitals" + r"\s+:\s+" + r"(?P<namo>\d+)" + r"\s*" + r"^\s+" + "Beta orbitals" + r"\s+:\s+" + r"(?P<nbmo>\d+)" + r"\s*" + r"^\s+" + "Alpha frozen cores" + r"\s+:\s+" + r"(?P<nafc>\d+)" + r"\s*" + r"^\s+" + "Beta frozen cores" + r"\s+:\s+" + r"(?P<nbfc>\d+)" + r"\s*" + r"^\s+" + "Alpha frozen virtuals" + r"\s+:\s+" + r"(?P<nafv>\d+)" + r"\s*" + r"^\s+" + "Beta frozen virtuals" + r"\s+:\s+" + r"(?P<nbfv>\d+)" + r"\s*" + r"^\s+" + "Spin multiplicity" + r"\s+:\s+\w+" + r"\s*" + r"^\s+" + "Number of AO functions" + r"\s+:\s+" + r"(?P<nbf>\d+)" + r"\s*", # fmt: on outtext, re.MULTILINE | re.IGNORECASE, ) if mobj and not calcinfo: logger.debug("matched alpha and beta electrons 2", mobj.groups()) calcinfo = True psivar["N BASIS FUNCTIONS"] = mobj.group("nbf") psivar["N MOLECULAR ORBITALS"] = (int(mobj.group("namo")) + int(mobj.group("nbmo"))) / 2 psivar["N ALPHA ELECTRONS"] = mobj.group("nae") psivar["N BETA ELECTRONS"] = mobj.group("nbe") mobj = re.search( # fmt: off r"^\s+" + "functions" + r"\s+=\s+" + r"(?P<nbf>\d+)" + r"\s*" + r"^\s+" + "atoms" + r"\s+=\s+" + r"(?P<nat>\d+)" + r"\s*" + r"^\s+" + "alpha electrons" + r"\s+=\s+" + r"(?P<nae>\d+)" + r"\s*" + r"^\s+" + "beta electrons" + r"\s+=\s+" + r"(?P<nbe>\d+)" + r"\s*", # fmt: on outtext, re.MULTILINE | re.IGNORECASE, ) if mobj and not calcinfo: logger.debug("matched alpha and beta electrons 3", mobj.groups()) calcinfo = True psivar["N BASIS FUNCTIONS"] = mobj.group("nbf") psivar["N MOLECULAR ORBITALS"] = mobj.group("nbf") psivar["N ALPHA ELECTRONS"] = mobj.group("nae") psivar["N BETA ELECTRONS"] = mobj.group("nbe") mobj = re.search( # fmt: off r"^\s+" + "functions" + r"\s+=\s+" + r"(?P<nbf>\d+)" + r"\s*" + r"^\s+" + "atoms" + r"\s+=\s+" + r"(?P<nat>\d+)" + r"\s*" + r"^\s+" + "closed shells" + r"\s+=\s+" + r"(?P<ncl>\d+)" + r"\s*" + r"^\s+" + "open shells" + r"\s+=\s+" + r"(?P<nop>\d+)" + r"\s*", # fmt: on outtext, re.MULTILINE | re.IGNORECASE, ) if mobj and not calcinfo: logger.debug("matched alpha and beta electrons 4", mobj.groups()) calcinfo = True psivar["N BASIS FUNCTIONS"] = mobj.group("nbf") psivar["N MOLECULAR ORBITALS"] = mobj.group("nbf") # BAD! TODO psivar["N ALPHA ELECTRONS"] = int(mobj.group("ncl")) + int(mobj.group("nop")) psivar["N BETA ELECTRONS"] = mobj.group("ncl") # Read multiplicity from General information (not scf module) mobj = re.search( r"^\s+" + r"Spin multiplicity:" + r"\s+" + r"(\d+)" + r"\s*$", outtext, re.MULTILINE | re.IGNORECASE ) if mobj: logger.debug("matched multiplicity") out_mult = int(mobj.group(1)) # 3) Initial geometry mobj = re.search( # fmt: off r'^\s+' + r'Geometry' + r'.*' + r'\s*' + r'^\s+' + r'(?:-+)\s*' + r'\s+' + r'\n' + r'^\s' + r'Output coordinates in ' + r'(.*?)' + r'\s' + r'\(scale by' + r'.*' + r'\s' + r'to convert to a\.u\.\)' + r'\s+' + r'\n' + r'^\s+' + r'No\.\ Tag Charge X Y Z' + r'\s*' + r'^\s+' + r'---- ---------------- ---------- -------------- -------------- --------------' + r'\s*' + r'((?:\s+([1-9][0-9]*)+\s+([A-Z][a-z]*)+\s+\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s*\n)+)' + r'\s*$', # fmt: on outtext, re.MULTILINE | re.IGNORECASE, ) if mobj: logger.debug("matched geom") # dinky molecule w/ charge and multiplicity if mobj.group(1) == "angstroms": molxyz = "%d \n%d %d tag\n" % (len(mobj.group(2).splitlines()), out_charge, out_mult) # unit = angstrom for line in mobj.group(2).splitlines(): lline = line.split() molxyz += "%s %16s %16s %16s\n" % (lline[-5], lline[-3], lline[-2], lline[-1]) # Jiyoung was collecting charge (-4)? see if this is ok for ghosts # Tag , X, Y, Z psivar_coord = Molecule( validate=False, **qcel.molparse.to_schema( qcel.molparse.from_string(molxyz, dtype="xyz+", fix_com=True, fix_orientation=True)["qm"], dtype=2, ), ) else: # unit = a.u. molxyz = "%d au\n%d %d tag\n" % (len(mobj.group(2).splitlines()), out_charge, out_mult) for line in mobj.group(2).splitlines(): lline = line.split() molxyz += "%s %16s %16s %16s\n" % (int(float(lline[-4])), lline[-3], lline[-2], lline[-1]) # Tag , X, Y, Z psivar_coord = Molecule( validate=False, **qcel.molparse.to_schema( qcel.molparse.from_string(molxyz, dtype="xyz+", fix_com=True, fix_orientation=True)["qm"], dtype=2, ), ) # Process gradient mobj = re.search( # fmt: off r'^\s+' + r'.*' + r'ENERGY GRADIENTS' + r'\s*' + r'\s+' + r'\n' + r'^\s+' + r'atom coordinates gradient' + r'\s*' + r'^\s+' + r'x y z x y z' + r'\s*' + r'((?:\s+([1-9][0-9]*)+\s+([A-Z][a-x]*)+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s*\n)+)' + r'\s*$', # fmt: on outtext, re.MULTILINE, ) if mobj: logger.debug("matched molgrad") atoms = [] psivar_grad = [] for line in mobj.group(1).splitlines(): lline = line.split() # Num, Tag, coord x, coord y, coord z, grad x, grad y, grad z # print (lline) if lline == []: pass else: atoms.append(lline[1]) # Tag psivar_grad.append([float(lline[-3]), float(lline[-2]), float(lline[-1])]) psivar_grad = np.array(psivar_grad).reshape((-1, 3)) # Process dipole (Properties) mobj = re.search( # fmt: off r'^\s+' + r'Dipole moment' + r'\s+' + NUMBER + r'\s+' + r'A\.U\.' + r'\s*' + r'^\s+' + r'DMX' + r'\s+' + NUMBER + r'.*' + r'^\s+' + r'DMY' + r'\s+' + NUMBER + r'.*' + r'^\s+' + r'DMZ' + r'\s+' + NUMBER + r'.*' + r'^\s+' + r'.*' + r'^\s+' + r'Total dipole' + r'\s+' + NUMBER + r'\s+' + r'A\.U\.' + r'\s*' + r'^\s+' + r'Dipole moment' + r'\s+' + NUMBER + r'\s' + r'Debye\(s\)' + r'\s*' + r'^\s+' + r'DMX' + r'\s+' + NUMBER + r'.*' + r'^\s+' + r'DMY' + r'\s+' + NUMBER + r'.*' + r'^\s+' + r'DMZ' + r'\s+' + NUMBER + r'.*' + r'^\s+' + r'.*' + r'^\s+' + r'Total dipole' + r'\s+' + NUMBER + r'\s' + r'DEBYE\(S\)' + r'\s*$', # fmt: on outtext, re.MULTILINE, ) if mobj: logger.debug("matched total dipole") # UNIT = DEBYE(S) psivar[f"CURRENT DIPOLE"] = d2au * np.array([mobj.group(7), mobj.group(8), mobj.group(9)]) # total? # Process error code mobj = re.search( # fmt: off r'^\s+' + r'current input line \:' + r'\s*' + r'^\s+' + r'([1-9][0-9]*)' + r'\:' + r'\s+' + r'(.*)' + r'\s*' + r'^\s+' r'------------------------------------------------------------------------' + r'\s*' + r'^\s+' r'------------------------------------------------------------------------' + r'\s*' + r'^\s+' + r'There is an error in the input file' + r'\s*$', # fmt: on outtext, re.MULTILINE, ) if mobj: logger.debug("matched error") # print (mobj.group(1)) #error line number # print (mobj.group(2)) #error reason psivar["NWCHEM ERROR CODE"] = mobj.group(1) # TODO process errors into error var # Get the size of the basis sets, etc mobj = re.search(r"No. of atoms\s+:\s+(\d+)", outtext, re.MULTILINE) if mobj: psivar["N ATOMS"] = mobj.group(1) mobj = re.search( r"No. of electrons\s+:\s+(\d+)\s+Alpha electrons\s+:\s+(\d+)\s+Beta electrons\s+:\s+(\d+)", outtext, re.MULTILINE, ) if mobj: psivar["N ALPHA ELECTRONS"] = mobj.group(2) psivar["N BETA ELECTRONS"] = mobj.group(3) if psivar["N ALPHA ELECTRONS"] == psivar["N BETA ELECTRONS"]: # get HOMO and LUMO energy mobj = re.search( r"Vector" + r"\s+" + r"%d" % (psivar["N ALPHA ELECTRONS"]) + r"\s+" + r"Occ=" + r".*" + r"\s+" + r"E=" + r"([+-]?\s?\d+[.]\d+)" + r"[D]" + r"([+-]0\d)", outtext, re.MULTILINE, ) if mobj: homo = float(mobj.group(1)) * (10 ** (int(mobj.group(2)))) psivar["HOMO"] = np.array([round(homo, 10)]) mobj = re.search( r"Vector" + r"\s+" + r"%d" % (psivar["N ALPHA ELECTRONS"] + 1) + r"\s+" + r"Occ=" + r".*" + r"\s+" + r"E=" + r"([+-]?\s?\d+[.]\d+)" + r"[D]" + r"([+-]0\d)", outtext, re.MULTILINE, ) if mobj: lumo = float(mobj.group(1)) * (10 ** (int(mobj.group(2)))) psivar["LUMO"] = np.array([round(lumo, 10)]) mobj = re.search(r"AO basis - number of functions:\s+(\d+)\s+number of shells:\s+(\d+)", outtext, re.MULTILINE) if mobj: psivar["N MOLECULAR ORBITALS"] = mobj.group(1) psivar["N BASIS FUNCTIONS"] = mobj.group(1) # Search for Center of charge mobj = re.search( r"Center of charge \(in au\) is the expansion point" + r"\n" + r"\s+" + r"X\s+=\s+([+-]?\d+[.]\d+)" + r"\s+" + r"Y\s+=\s+([+-]?\d+[.]\d+)" + r"\s+" + r"Z\s+=\s+([+-]?\d+[.]\d+)", outtext, re.MULTILINE, ) if mobj: psivar["CENTER OF CHARGE"] = np.array([mobj.group(1), mobj.group(2), mobj.group(3)]) mobj = re.search( r"Dipole moment" + r".*?" + r"A\.U\." + r"\s+" + r"DMX\s+([+-]?\d+[.]\d+)\s+" + r"DMXEFC\s+[+-]?\d+[.]\d+\s+" + r"DMY\s+([+-]?\d+[.]\d+)\s+" + r"DMYEFC\s+[+-]?\d+[.]\d+\s+" + r"DMZ\s+([+-]?\d+[.]\d+)\s+" + r"DMZEFC\s+[+-]?\d+[.]\d+\s+" + r"\-EFC\-" + r".*?" + r"A\.U\.\s+" + r"Total dipole\s+([+-]?\d+[.]\d+\s+)", outtext, re.MULTILINE, ) # + r"DMY\s+" + r"([+-]?\d+[.]\d+)", outtext, re.MULTILINE) if mobj: psivar["DIPOLE MOMENT"] = np.array([mobj.group(1), mobj.group(2), mobj.group(3)]) psivar["TOTAL DIPOLE MOMENT"] = mobj.group(4) # Process CURRENT energies (TODO: needs better way) if "HF TOTAL ENERGY" in psivar: psivar["SCF TOTAL ENERGY"] = psivar["HF TOTAL ENERGY"] psivar["CURRENT REFERENCE ENERGY"] = psivar["HF TOTAL ENERGY"] psivar["CURRENT ENERGY"] = psivar["HF TOTAL ENERGY"] if "MCSCF TOTAL ENERGY" in psivar: psivar["CURRENT REFERENCE ENERGY"] = psivar["MCSCF TOTAL ENERGY"] psivar["CURRENT CORRELATION ENERGY"] = 0.0 psivar["CURRENT ENERGY"] = psivar["MCSCF TOTAL ENERGY"] if "MP2 TOTAL ENERGY" in psivar and "MP2 CORRELATION ENERGY" in psivar: psivar["CURRENT CORRELATION ENERGY"] = psivar["MP2 CORRELATION ENERGY"] psivar["CURRENT ENERGY"] = psivar["MP2 TOTAL ENERGY"] if "MP3 TOTAL ENERGY" in psivar and "MP3 CORRECTION ENERGY" in psivar: psivar["CURRENT CORRELATION ENERGY"] = psivar["MP3 TOTAL ENERGY"] - psivar["HF TOTAL ENERGY"] psivar["CURRENT ENERGY"] = psivar["MP3 TOTAL ENERGY"] if "MP4 TOTAL ENERGY" in psivar and "MP4 CORRECTION ENERGY" in psivar: psivar["CURRENT CORRELATION ENERGY"] = psivar["MP4 TOTAL ENERGY"] - psivar["HF TOTAL ENERGY"] psivar["CURRENT ENERGY"] = psivar["MP4 TOTAL ENERGY"] if "CISD TOTAL ENERGY" in psivar and "CISD CORRELATION ENERGY" in psivar: psivar["CURRENT CORRELATION ENERGY"] = psivar["CISD CORRELATION ENERGY"] psivar["CURRENT ENERGY"] = psivar["CISD TOTAL ENERGY"] if "QCISD TOTAL ENERGY" in psivar and "QCISD CORRELATION ENERGY" in psivar: psivar["CURRENT CORRELATION ENERGY"] = psivar["QCISD CORRELATION ENERGY"] psivar["CURRENT ENERGY"] = psivar["QCISD TOTAL ENERGY"] if "LCCD TOTAL ENERGY" in psivar and "LCCD CORRELATION ENERGY" in psivar: psivar["CURRENT CORRELATION ENERGY"] = psivar["LCCD CORRELATION ENERGY"] psivar["CURRENT ENERGY"] = psivar["LCCD TOTAL ENERGY"] if "LCCSD TOTAL ENERGY" in psivar and "LCCSD CORRELATION ENERGY" in psivar: psivar["CURRENT CORRELATION ENERGY"] = psivar["LCCSD CORRELATION ENERGY"] psivar["CURRENT ENERGY"] = psivar["LCCSD TOTAL ENERGY"] if "DFT TOTAL ENERGY" in psivar: psivar["CURRENT REFERENCE ENERGY"] = psivar["DFT TOTAL ENERGY"] psivar["CURRENT ENERGY"] = psivar["DFT TOTAL ENERGY"] # Process TCE CURRENT energies # Need to be fixed # HOW TO KNOW options['NWCHEM']['NWCHEM_TCE']['value']? # TODO: CURRENT ENERGY = TCE ENERGY if "%s TOTAL ENERGY" % (cc_name) in psivar and ("%s CORRELATION ENERGY" % (cc_name) in psivar): psivar["CURRENT CORRELATION ENERGY"] = psivar["%s CORRELATION ENERGY" % (cc_name)] psivar["CURRENT ENERGY"] = psivar["%s TOTAL ENERGY" % (cc_name)] if "CCD TOTAL ENERGY" in psivar and "CCD CORRELATION ENERGY" in psivar: psivar["CURRENT CORRELATION ENERGY"] = psivar["CCD CORRELATION ENERGY"] psivar["CURRENT ENERGY"] = psivar["CCD TOTAL ENERGY"] if "CCSD TOTAL ENERGY" in psivar and "CCSD CORRELATION ENERGY" in psivar: psivar["CURRENT CORRELATION ENERGY"] = psivar["CCSD CORRELATION ENERGY"] psivar["CURRENT ENERGY"] = psivar["CCSD TOTAL ENERGY"] if "CCSD+T(CCSD) TOTAL ENERGY" in psivar and "CCSD+T(CCSD) CORRELATION ENERGY" in psivar: psivar["CURRENT CORRELATION ENERGY"] = psivar["CCSD+T(CCSD) CORRELATION ENERGY"] psivar["CURRENT ENERGY"] = psivar["CCSD+T(CCSD) TOTAL ENERGY"] if "CCSD(T) TOTAL ENERGY" in psivar and "CCSD(T) CORRELATION ENERGY" in psivar: psivar["CURRENT CORRELATION ENERGY"] = psivar["CCSD(T) CORRELATION ENERGY"] psivar["CURRENT ENERGY"] = psivar["CCSD(T) TOTAL ENERGY"] if "CCSDT TOTAL ENERGY" in psivar and "CCSDT CORRELATION ENERGY" in psivar: psivar["CURRENT CORRELATION ENERGY"] = psivar["CCSDT CORRELATION ENERGY"] psivar["CURRENT ENERGY"] = psivar["CCSDT TOTAL ENERGY"] if ("EOM-%s TOTAL ENERGY" % (cc_name) in psivar) and ("%s EXCITATION ENERGY" % (cc_name) in psivar): psivar["CURRENT ENERGY"] = psivar["EOM-%s TOTAL ENERGY" % (cc_name)] psivar["CURRENT EXCITATION ENERGY"] = psivar["%s EXCITATION ENERGY" % (cc_name)] psivar[f"N ATOMS"] = len(psivar_coord.symbols) return psivar, psivar_coord, psivar_grad, version, module, error
10,498
def create_denoising_dataset(epi_path,log_path,acqtimes_path,rot_dir=-1, interactive=True, img_dir=None, slice_indices=None, inner_mask_level=.004): """Generates masks and timeseries for analysis. Parameters ---------- epi_path : str Path to the phantom data. log_path : str Path to the BrainDancer movement log file. acqtimes_path : str Path to the slice timing file. interactive : bool, optional If True (default), prompt user for decision inputs img_dir : str, optional If specified, displays will be saved to file. Otherwise, display onscreen. slice_indices : tuple, optional start and end indices of slices to include for processing. inner_mask_level : float Threshold for segmenting the phantom. """ data_read = nib.load(epi_path) if interactive: display_all_slices(data_read,0) start = int(input('Enter the first good slice: ')) end = int(input('Enter the last good slice: ')) # non-interactive. select specified slices elif slice_indices is not None: start = slice_indices[0] end = slice_indices[1] slice_img_path = os.path.join(img_dir, 'slices.png') display_all_slices(data_read,0, file=slice_img_path, subset=np.arange(start, end+1)) # non-interactive, but empty slice list else: raise TypeError('slice_indices cannot be None in non-interactive mode') with open(log_path, 'r') as fp: line = fp.readline() if line.startswith('Sequence'): # skip lines log = pd.read_csv(log_path, header=2) else: log = pd.read_csv(log_path) motion_time = np.max(log['Tmot'].values) acq_times = pd.read_csv(acqtimes_path) motionfree = acq_times[acq_times['Time']>motion_time]['Slice'].values total_slices = [] for i in list(motionfree): if start<= i <= end: total_slices.append(i) print('Selected Slices for Analysis are: ', total_slices) imask = [] cen = [] imask_metrics = [] center_rotation_all = [] omask = [] detect_remove = [] updated_total_slices = [] good_slices = [] for i in range(len(total_slices)): if interactive: level_img_path = None center_img_path = None else: level_img_path = os.path.join(img_dir, f'contours_{i:03d}.png') center_img_path = os.path.join(img_dir, f'centers_{i:03d}.png') img_complete,cy_complete,cx_complete, radii_complete = inner_mask(epi_path,total_slices[i],volume_num=0,lvl=inner_mask_level,rad1=7,rad2=50,step=1, img_path=level_img_path) center_rotation = cen_rotation(epi_path,total_slices[i],img_complete,cy_complete,cx_complete,radii_complete, canny_sgm=1, img_path=center_img_path) if interactive: detect = int(input('Enter 1 if this slice is good')) good_slices.append(detect) center_rotation_all.append(center_rotation) imask.append(img_complete) updated_total_slices.append(total_slices[i]) # TO DO - Include the option to generate outer mask and corresponding time_series, with something like below: #out_mask = outer_mask(data_read,findcartridge(data_read,total_slices[i],0),total_slices[i],0) #omask.append(out_mask) # update good slices if not interactive: row_med = np.median([x[0] for x in center_rotation_all]) col_med = np.median([x[1] for x in center_rotation_all]) for row_cor,col_cor in center_rotation_all: if np.all([row_cor <= row_med+1, row_cor >= row_med-1, col_cor <= col_med+1, col_cor >= col_med-1]): good_slices.append(1) else: good_slices.append(0) print(good_slices) print(center_rotation_all) center_rotation_all = [x for good,x in zip(good_slices, center_rotation_all) if good==1 ] imask = [x for good,x in zip(good_slices, imask) if good==1 ] updated_total_slices = [x for good,x in zip(good_slices, updated_total_slices) if good==1 ] print(good_slices) print(center_rotation_all) if img_dir is not None: motion_img = os.path.join(img_dir, 'motion.png') else: motion_img = None positions = phantom_motion(log_path, img_path=motion_img) synth = create_mean_slices(data_read,updated_total_slices,imask,200) simulated_data = simulate_inner(synth,positions,updated_total_slices,imask,center_rotation_all,rot_dir) scanner_inner = scanner_output(data_read,positions,updated_total_slices,imask,200) # add omask in future for outer cylinder return simulated_data, scanner_inner, imask, center_rotation_all, updated_total_slices
10,499