content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Tuple def watermark_pdf(input_file: str, wm_text: str, pages: Tuple = None): """ Adds watermark to a pdf file. """ result, wm_buffer = create_watermark(wm_text) if result: wm_reader = PdfFileReader(wm_buffer) pdf_reader = PdfFileReader(open(input_file, 'rb'), strict=False) pdf_writer = PdfFileWriter() try: for page in range(pdf_reader.getNumPages()): # If required to watermark specific pages not all the document pages if pages: if str(page) not in pages: continue page = pdf_reader.getPage(page) page.mergePage(wm_reader.getPage(0)) pdf_writer.addPage(page) except Exception as e: print("Exception = ", e) return False, None, None return True, pdf_reader, pdf_writer
3fb4d51a88db9c509842ee76b7fee22af30a358d
3,637,000
def wrapper_configuration_get(): # noqa: E501 """gets configuration details on the current wrapper configuration # noqa: E501 :rtype: object """ return 'do some magic!'
85ac6abbf09f93a08295584d7051aad2e8cad8d6
3,637,001
def update_qgs(): """Generate QGIS project files.""" try: # create ConfigGenerator generator = config_generator() qgs_writer_log = generator.write_qgs() return { 'message': "Finished writing QGIS project files", 'log': qgs_writer_log } except Exception as e: return { 'error': str(e) }
1cb7f6f844fc40b611dc49b8f2b5a8de795e04e0
3,637,002
from typing import Tuple from typing import List import warnings def time_evolution_derivatives( hamiltonian: pyquil.paulis.PauliSum, time: float, method: str = "Trotter", trotter_order: int = 1, ) -> Tuple[List[circuits.Circuit], List[float]]: """Generates derivative circuits for the time evolution operator defined in function time_evolution Args: hamiltonian: The Hamiltonian to be evolved under. It should contain numeric coefficients, symbolic expressions aren't supported. time: time duration of the evolution. method: time evolution method. Currently the only option is 'Trotter'. trotter_order: order of Trotter evolution Returns: A Circuit simulating time evolution. """ if method != "Trotter": raise ValueError(f"The method {method} is currently not supported.") single_trotter_derivatives = [] factors = [1.0, -1.0] output_factors = [] if isinstance(hamiltonian, QubitOperator): terms = list(hamiltonian.get_operators()) elif isinstance(hamiltonian, pyquil.paulis.PauliSum): warnings.warn( "PauliSum as an input to time_evolution_derivatives will be depreciated, " "please change to QubitOperator instead.", DeprecationWarning, ) terms = hamiltonian.terms for i, term_1 in enumerate(terms): for factor in factors: output = circuits.Circuit() try: if isinstance(term_1, QubitOperator): r = list(term_1.terms.values())[0] / trotter_order else: r = complex(term_1.coefficient).real / trotter_order except TypeError: raise ValueError( "Term coefficients need to be numerical. " f"Offending term: {term_1}" ) output_factors.append(r * factor) shift = factor * (np.pi / (4.0 * r)) for j, term_2 in enumerate(terms): output += time_evolution_for_term( term_2, (time + shift) / trotter_order if i == j else time / trotter_order, ) single_trotter_derivatives.append(output) if trotter_order > 1: output_circuits = [] final_factors = [] repeated_circuit = time_evolution( hamiltonian, time, method="Trotter", trotter_order=1 ) for position in range(trotter_order): for factor, different_circuit in zip( output_factors, single_trotter_derivatives ): output_circuits.append( _generate_circuit_sequence( repeated_circuit, different_circuit, trotter_order, position ) ) final_factors.append(factor) return output_circuits, final_factors else: return single_trotter_derivatives, output_factors
fe793657d9fa199df174a288f59a390c7787598c
3,637,003
def had_cells_strength(strmfunc, min_plev=None, max_plev=None, lat_str=LAT_STR, lev_str=LEV_STR): """Location and signed magnitude of both Hadley cell centers.""" lat = strmfunc[lat_str] # Sometimes the winter Ferrel cell is stronger than the summer Hadley cell. # So find the global extremal negative and positive values as well as the # opposite-signed cell on either side. The Hadley cells will be the two of # these whose centers are nearest the equator. cell_pos_max_strength = had_cell_strength( strmfunc, min_plev=min_plev, max_plev=max_plev, lev_str=lev_str, ) lat_pos_max = cell_pos_max_strength.coords[lat_str] cell_south_of_pos_strength = -1*had_cell_strength( -1*strmfunc.where(lat < lat_pos_max), min_plev=min_plev, max_plev=max_plev, lev_str=lev_str, ) cell_north_of_pos_strength = -1*had_cell_strength( -1*strmfunc.where(lat > lat_pos_max), min_plev=min_plev, max_plev=max_plev, lev_str=lev_str, ) cell_neg_max_strength = had_cell_strength( -1*strmfunc, min_plev=min_plev, max_plev=max_plev, lev_str=lev_str, ) lat_neg_max = cell_neg_max_strength.coords[lat_str] cell_south_of_neg_strength = had_cell_strength( strmfunc.where(lat < lat_neg_max), min_plev=min_plev, max_plev=max_plev, lev_str=lev_str, ) cell_north_of_neg_strength = had_cell_strength( strmfunc.where(lat > lat_neg_max), min_plev=min_plev, max_plev=max_plev, lev_str=lev_str, ) # The above procedure generats 6 cells, of which 2 are duplicates. Now, # get rid of the duplicates. strengths = [ cell_pos_max_strength, cell_south_of_pos_strength, cell_north_of_pos_strength, cell_neg_max_strength, cell_south_of_neg_strength, cell_north_of_neg_strength, ] cell_strengths = xr.concat(strengths, dim=lat_str, coords=[lev_str]) dupes = cell_strengths.get_index(LAT_STR).duplicated() cell_strengths = cell_strengths[~dupes] # Pick the two cells closest to the equator. center_lats = cell_strengths[lat_str] hc_strengths = cell_strengths.sortby(np.abs(center_lats))[:2] # Order the cells from south to north. hc_strengths = hc_strengths.sortby(hc_strengths[lat_str]) # Create DataArray with one label for each cell, the cell strengths # as the values, and the cell center latitudes and levels as coords. coords_out = {"cell": ["had_cell_sh", "had_cell_nh"]} ds_strengths = xr.Dataset(coords=coords_out) arr_lat_center = xr.DataArray(hc_strengths[lat_str].values, dims=["cell"], coords=coords_out) arr_lev_center = xr.DataArray(hc_strengths[lev_str].values, dims=["cell"], coords=coords_out) arr_strength = xr.DataArray(hc_strengths.values, dims=["cell"], coords=coords_out) ds_strengths.coords[lat_str] = arr_lat_center ds_strengths.coords[lev_str] = arr_lev_center ds_strengths["cell_strength"] = arr_strength return ds_strengths["cell_strength"]
ba8b4840a3e7e851a7156cd6aed1e3969e362692
3,637,004
def d_enter_waste_cooler(W_mass, rho_waste, w_drift): """ Calculates the tube's diameter of enter waste to waste cooler. Parameters ---------- W_mass : float The mass flow rate of waste, [kg/s] rho_waste : float The density of liquid at boilling temperature, [kg/m**3] w_drift :float The speed of steam at the tube, [m/s] Returns ------- d_enter_waste_cooler : float The tube's diameter of enter waste to waste cooler, [m] References ---------- &&& """ return W_mass/(0,785*rho_waste*w_drift)
651c1adc0b90a286c2c8685c389268bc8834ad73
3,637,005
async def finalize_round(request, persistence): """Finalize an owned round.""" game_id = request.match_info['game_id'] round_name = request.match_info['round_name'] user_session = await get_session(request) if not client_owns_game(game_id, user_session, persistence): return json_response({'error': 'The user is not the moderator of this game.'}, status=403) try: persistence.finalize_round(game_id, round_name) except NoSuchRound: return json_response({'error': 'Round does not exist.'}, status=404) except NoActivePoll: return json_response({'error': 'There is no active poll in this round.'}, status=404) except RoundFinalized: return json_response({'error': 'This round has already been finalized.'}, status=409) return json_response({'game': persistence.serialize_game(game_id)})
21c07b35eb366d1ca78a90940bfb85772469683f
3,637,006
def _arrs_to_ds(arrs, names=None): """Combine DataArrays into a single Dataset.""" if names is None: names = [str(n) for n in range(len(arrs))] return xr.Dataset(data_vars=dict(zip(names, arrs)))
5672ba30c43d646a637d1db5735df23f916f012b
3,637,007
from datetime import datetime import time def formatTimeFromNow(secs=0): """ Properly Format Time that is `x` seconds in the future :param int secs: Seconds to go in the future (`x>0`) or the past (`x<0`) :return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`) :rtype: str """ return datetime.utcfromtimestamp(time.time() + int(secs)).strftime(timeFormat)
b36e68466c05eb33f178d2568b3c2ff21bc9c707
3,637,008
def exitFlow(x, n_classes): """ Create the exit flow section x : input to the exit flow section n_classes : number of output classes """ def classifier(x, n_classes): """ The output classifier x : input to the classifier n_classes : number of output classes """ # Global Average Pooling will flatten the 10x10 feature maps into 1D # feature maps x = GlobalAveragePooling2D()(x) # Fully connected output layer (classification) x = Dense(n_classes, activation='softmax')(x) return x # Remember the input shortcut = x # Strided convolution to double number of filters in identity link to # match output of residual block for the add operation (projection shortcut) shortcut = Conv2D(1024, (1, 1), strides=(2, 2), padding='same')(shortcut) shortcut = BatchNormalization()(shortcut) # First Depthwise Separable Convolution # Dimensionality reduction - reduce number of filters x = SeparableConv2D(728, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = ReLU()(x) # Second Depthwise Separable Convolution # Dimensionality restoration x = SeparableConv2D(1024, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = ReLU()(x) # Create pooled feature maps, reduce size by 75% x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) # Add the projection shortcut to the output of the pooling layer x = Add()([x, shortcut]) # Third Depthwise Separable Convolution x = SeparableConv2D(1556, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = ReLU()(x) # Fourth Depthwise Separable Convolution x = SeparableConv2D(2048, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = ReLU()(x) # Create classifier section x = classifier(x, n_classes) return x
95ac0696e03cb6e3320cebd20790e2f07c69d4ee
3,637,009
import os def get_files_for_variable(cmake_path, variables, variable): """ Returns the path values associated with |variable| and relative to the |cmake_path| directory. """ if not variable in variables: raise Exception('Variable %s does not exist' % variable) # Cmake file directory. cmake_dirname = os.path.dirname(cmake_path) + '/' # Return path values relative to the cmake file directory. # Example 1: # cmake file = "/path/to/libcef_dll/CMakeLists.txt" # include path = "/path/to/libcef_dll/wrapper/cef_browser_info_map.h" # return path = "wrapper/cef_browser_info_map.h" # Example 2: # cmake file = "/path/to/libcef_dll/CMakeLists.txt" # include path = "/path/to/include/internal/cef_export.h" # return path = "../include/internal/cef_export.h" new_paths = [] paths = variables[variable] for path in paths: abspath = os.path.join(cef_dir, path) newpath = normalize_path(os.path.relpath(abspath, cmake_dirname)) new_paths.append(newpath) return new_paths
aaaeec33895b9cea3856f7ab9d304b15c34b152c
3,637,010
from typing import Optional def SingleChannelDDR4_2400(size: Optional[str] = "1024MB") -> SingleChannel: """ A single channel DDR3_2400. :param size: The size of the memory system. Default value of 1024MB. """ return SingleChannel("DDR4_4Gb_x8_2400", size)
10a83cd74b55f5ec93812fd1d52c8d753d9024d4
3,637,011
def convert_Pa_to_dBSPL(pa): """ Converts units of Pa to dB re 20e-6 Pa (dB SPL) """ return 20. * np.log10(pa / 20e-6)
a14991c7923b7ceb46f279a95b3ef64ff648ae57
3,637,012
def isPalindromic(seq): """ is a sequence palindromic? returns True or False """ if rc_expanded(seq.lower()) == seq.lower(): return(True) return(False)
bbe011e0b599f8df417ffc10eef0ace0d8f08d37
3,637,013
import os def get_corpus(data_dir): """Get list of words in the text. Args: data_dir: data directory. Returns: list of str words. """ corpus = [] files = os.listdir(data_dir) for filename in files: data_path = os.path.join(data_dir, filename) if not os.path.isfile(data_path): continue with open(data_path, 'r') as f: text = f.read().strip('\n') corpus.extend(del_useless_char(text)) return corpus
57cbc16f3ab2767e5a44fd261e6a31b14b8ddb06
3,637,014
import numpy import random def randomPairsMatch(n_records_A: int, n_records_B: int, sample_size: int) -> IndicesIterator: """ Return random combinations of indices for record list A and B """ n: int = n_records_A * n_records_B if not sample_size: return iter([]) elif sample_size >= n: random_pairs = numpy.arange(n) else: random_pairs = numpy.array(random.sample(range(n), sample_size)) i, j = numpy.unravel_index(random_pairs, (n_records_A, n_records_B)) return zip(i, j)
2cd6f905933149b4f23f656e9db44f57830e1eb9
3,637,015
import logging def GetScaffoldLengths(genome_fna_fp): """ This function gets the lengths of the scaffolds, returns a dict Args: genome_fna_fp: (str) Path to genome fna file (FASTA) Returns: Scaffold_To_Length: (dict) scaffold_name: (str) -> length (int) """ Scaffold_To_Length = {} FNA_FH = open(genome_fna_fp) c_line = FNA_FH.readline().strip() c_scaffold_name = "" while c_line != "": if c_line[0] == ">": if c_scaffold_name != "": Scaffold_To_Length[c_scaffold_name] = cs_len if " " in c_line: logging.warning(f"A space found in scaffold name: '{c_line}'." " This might cause an error.") c_scaffold_name = (c_line.split(' ')[0])[1:] logging.warning(f"Instead using scaffold name {c_scaffold_name}") else: c_scaffold_name = c_line[1:] # Current scaffold length is reset cs_len = 0 else: cs_len += len(c_line) c_line = FNA_FH.readline().strip() FNA_FH.close() if c_scaffold_name != "": Scaffold_To_Length[c_scaffold_name] = cs_len if len(Scaffold_To_Length.keys()) == 0: logging.warning("No Scaffolds found in " + genome_fna_fp) return Scaffold_To_Length
cee4c6a3d9171dc86563e5f74dae6fbdfcb0556a
3,637,016
import re import os import time def approx_version_number(): """ In the event that git is unavailable and the VERSION file is not present this returns a "version number" in the following precedence: - version number from path downloads of viral-ngs from GitHub tagged releases are likely to be extracted into directories containing the version number. If they contain a version number in the form d.d.d, we can use it - modification time of this file (unix timestamp) file modification time for github releases corresponds to when the release archives were created, a rough way to ballpark the release date. If we can't get the version number from the path we can at least use the modification time of this file as a proxy for the true version number - the current time (unix timestamp) the current time is better than not having any version number """ version = "" version_re = re.compile(r"(?:(\d+)\.)?(?:(\d+)\.)?(?:(\d+))") # path relative to version.py viral_ngs_path = os.path.basename(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # for tagged releases, it is likely the version number is part of # the viral-ngs root directory name matches = version_re.search(viral_ngs_path) if matches and len([n for n in matches.groups() if n]) == 3: version = ".".join( map(str,matches.groups()) ) else: try: mtime = os.path.getmtime(__file__) except OSError: mtime = 0 if mtime > 0: # if we could get the modification time of the current file, use it version = str(int(mtime)) else: # just use the current time version = str(int(time.time())) return version
16adf3e5c274cf86bc9a7b3b53889e5340021798
3,637,017
def flip_mesh(mesh): """ It flips the mesh of a shape. ---------------------------- Args: mesh (obj: 'base.Trimesh'): The mesh of a shape Returns: mesh (obj: 'base.Trimesh'): The flipped mesh of the shape """ triangles = np.zeros((3, len(mesh.faces))) for i, index in enumerate(mesh.faces[1:]): x, y, z = [], [], [] for num in index: vertices = mesh.vertices[num] x.append(vertices[0]) y.append(vertices[1]) z.append(vertices[2]) triangles[0][i] = np.sum(x)/3 triangles[1][i] = np.sum(y)/3 triangles[2][i] = np.sum(z)/3 f_x = calculate_f(triangles[0]) f_y = calculate_f(triangles[1]) f_z = calculate_f(triangles[2]) R = np.array([[np.sign(f_x), 0, 0], [0, np.sign(f_y), 0], [0, 0, np.sign(f_z)]]) mesh.vertices = np.matmul(mesh.vertices, R) return mesh
a527b47a4f1c184a97d4ee7005d05be7926e0258
3,637,018
def leaky_twice_relu6(x, alpha_low=0.2, alpha_high=0.2, name="leaky_relu6"): """:func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`. This activation function is a modified version :func:`leaky_relu` introduced by the following paper: `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__ This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper: `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__ This function push further the logic by adding `leaky` behaviour both below zero and above six. The function return the following results: - When x < 0: ``f(x) = alpha_low * x``. - When x in [0, 6]: ``f(x) = x``. - When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``. Parameters ---------- x : Tensor Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``. alpha_low : float Slope for x < 0: ``f(x) = alpha_low * x``. alpha_high : float Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``. name : str The function name (optional). Examples -------- >>> import tensorlayer as tl >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense') Returns ------- Tensor A ``Tensor`` in the same type as ``x``. References ---------- - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__ - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__ """ if not isinstance(alpha_high, tf.Tensor) and not (0 < alpha_high <= 1): raise ValueError("`alpha_high` value must be in [0, 1]`") if not isinstance(alpha_low, tf.Tensor) and not (0 < alpha_low <= 1): raise ValueError("`alpha_low` value must be in [0, 1]`") with tf.name_scope(name, "leaky_twice_relu6") as name_scope: x = tf.convert_to_tensor(x, name="features") x_is_above_0 = tf.minimum(x, 6 * (1 - alpha_high) + alpha_high * x) x_is_below_0 = tf.minimum(alpha_low * x, 0) return tf.maximum(x_is_above_0, x_is_below_0, name=name_scope)
17c4fce9bd8803cda254fb28cde72e5401760c3d
3,637,019
def fully_connected(inputs, num_outputs, scope, use_xavier=True, stddev=1e-3, weight_decay=0.0, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None): """ Fully connected layer with non-linear operation. Args: inputs: 2-D tensor BxN num_outputs: int Returns: Variable tensor of size B x num_outputs. """ with tf.variable_scope(scope) as sc: num_input_units = inputs.get_shape()[-1].value weights = _variable_with_weight_decay('weights', shape=[num_input_units, num_outputs], use_xavier=use_xavier, stddev=stddev, wd=weight_decay) outputs = tf.matmul(inputs, weights) biases = tf.get_variable('biases', [num_outputs], initializer = tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn: outputs = tf.contrib.layers.batch_norm(outputs, decay = bn_decay, updates_collections = None, epsilon = 1e-5, scale = True, is_training = is_training, scope = 'bn') if activation_fn is not None: outputs = activation_fn(outputs) return outputs
01646dd4d18a210b298c313b13a03274c69fd127
3,637,020
def _x_orientation_rep_dict(x_orientation): """"Helper function to create replacement dict based on x_orientation""" if x_orientation.lower() == 'east' or x_orientation.lower() == 'e': return {'x': 'e', 'y': 'n'} elif x_orientation.lower() == 'north' or x_orientation.lower() == 'n': return {'x': 'n', 'y': 'e'} else: raise ValueError('x_orientation not recognized.')
83434a8aef7003146a19c470b831e8e9cfa85f19
3,637,021
def move_at_objc_to_access_note(access_notes_file, arg, offset, access_note_name): """Write an @objc attribute into an access notes file, then return the string that will replace the attribute and trailing comment.""" access_notes_file.write(u""" - Name: '{}' ObjC: true""".format(access_note_name)) if arg: access_notes_file.write(u""" ObjCName: '{}'""".format(arg)) # Default to shifting expected diagnostics down 1 line. if offset is None: offset = 1 return u"// access-note-adjust" + offsetify(offset) + u" [attr moved] " + \ u"expected-remark{{access note for fancy tests adds attribute 'objc' to " + \ u"this }} expected-note{{add attribute explicitly to silence this warning}}"
6037b6db15188ce43771d47f01518994f562d409
3,637,022
def test_idempotent_lambda_with_validator_util( config_without_jmespath: IdempotencyConfig, persistence_store: DynamoDBPersistenceLayer, lambda_apigw_event, timestamp_future, serialized_lambda_response, deserialized_lambda_response, hashed_idempotency_key_with_envelope, mock_function, lambda_context, ): """ Test idempotent decorator where event with matching event key has already been succesfully processed, using the validator utility to unwrap the event """ stubber = stub.Stubber(persistence_store.table.meta.client) ddb_response = { "Item": { "id": {"S": hashed_idempotency_key_with_envelope}, "expiration": {"N": timestamp_future}, "data": {"S": serialized_lambda_response}, "status": {"S": "COMPLETED"}, } } expected_params = { "TableName": TABLE_NAME, "Key": {"id": hashed_idempotency_key_with_envelope}, "ConsistentRead": True, } stubber.add_client_error("put_item", "ConditionalCheckFailedException") stubber.add_response("get_item", ddb_response, expected_params) stubber.activate() @validator(envelope=envelopes.API_GATEWAY_HTTP) @idempotent(config=config_without_jmespath, persistence_store=persistence_store) def lambda_handler(event, context): mock_function() return "shouldn't get here!" mock_function.assert_not_called() lambda_resp = lambda_handler(lambda_apigw_event, lambda_context) assert lambda_resp == deserialized_lambda_response stubber.assert_no_pending_responses() stubber.deactivate()
75e0d3a8aabb3e3520c06a0268a7e2d1e534d249
3,637,023
import os import shutil def load_pdbbind_fragment_coordinates(frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors, neighbor_cutoff, pdbbind_dir, base_dir, datafile="INDEX_core_data.2013"): """Featurize PDBBind dataset. Parameters ---------- frag1_num_atoms: int Maximum number of atoms in fragment 1. frag2_num_atoms: int Maximum number of atoms in fragment 2. complex_num_atoms: int Maximum number of atoms in complex. max_num_neighbors: int Maximum number of neighbors per atom. neighbor_cutoff: float Interaction cutoff [Angstrom]. pdbbind_dir: str Location of PDBbind datafile. base_dir: str Location for storing featurized dataset. datafile: str Name of PDBbind datafile, optional (Default "INDEX_core_data.2013"). Returns ------- tasks: list PDBbind tasks. dataset: dc.data.DiskDataset PDBbind featurized dataset. transformers: list dc.trans.NLP objects. """ # Create some directories for analysis # The base_dir holds the results of all analysis if not reload: if os.path.exists(base_dir): shutil.rmtree(base_dir) if not os.path.exists(base_dir): os.makedirs(base_dir) current_dir = os.path.dirname(os.path.realpath(__file__)) #Make directories to store the raw and featurized datasets. data_dir = os.path.join(base_dir, "dataset") # Load PDBBind dataset labels_file = os.path.join(pdbbind_dir, datafile) tasks = ["-logKd/Ki"] print("About to load contents.") contents_df = load_pdbbind_labels(labels_file) ids = contents_df["PDB code"].values y = np.array([float(val) for val in contents_df["-logKd/Ki"].values]) # Define featurizers featurizer = ComplexNeighborListFragmentAtomicCoordinates( frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors, neighbor_cutoff) w = np.ones_like(y) #Currently featurizes with shard_size=1 #Dataset can be reshard: dataset = dataset.reshard(48) for example def shard_generator(): for ind, pdb_code in enumerate(ids): print("Processing %s" % str(pdb_code)) pdb_subdir = os.path.join(pdbbind_dir, pdb_code) computed_feature = compute_pdbbind_coordinate_features( featurizer, pdb_subdir, pdb_code) if computed_feature[0] is None: print("Bad featurization") continue else: X_b = np.reshape(np.array(computed_feature), (1, 9)) y_b = y[ind] w_b = w[ind] y_b = np.reshape(y_b, (1, -1)) w_b = np.reshape(w_b, (1, -1)) yield (X_b, y_b, w_b, pdb_code) dataset = dc.data.DiskDataset.create_dataset( shard_generator(), data_dir=data_dir, tasks=tasks) transformers = [] return tasks, dataset, transformers
b1ad0cba65ad7e199d167aed9a0c0b569d4cfedd
3,637,024
def get_version_if_modified(gh_type, repo_name, typ, force=False): """ Return the latest version if the latest version is different from the previously indexed version. Return None if no change. if force in True, always return the latest version """ latest_version = get_latest_version(gh_type, repo_name, typ) if force: return latest_version indexed_version = get_indexed_version(gh_type, repo_name, typ) if indexed_version == latest_version: print '%s (%s): skipping %s' % (repo_name, gh_type, typ) return None else: return latest_version
51dcd251dece6e6e401261f79007be8fcd653844
3,637,025
import requests import json def do_rest_request(**kwargs): """This function expects full_url or in absence of which, expects a combination of "url" and "query_params""" if 'full_url' in kwargs: query_url = kwargs['full_url'] elif 'rest_url' in kwargs and 'query_params' in kwargs: query_url = kwargs['url'] + '?' + urlparse.urlencode(kwargs['query_params']) else: raise Exception('Provide either "full_url" or a combination of "url" and "query_params"') print("Querying {}".format(query_url)) rest_response = requests.get(query_url, headers={'content-type': 'application/json'}) if rest_response.status_code != 200: raise Exception('Cannot fetch info: {}'.format(rest_response.status_code)) rest_response = json.loads(rest_response.text) return rest_response
0ee2d7e20ca98e2c73b8d4b3e89d709a1b14a911
3,637,026
def variable(init_val, lb=None, ub=None): """ Initialize a scalar design variable. :param init_val: Initial guess :param lb: Optional lower bound :param ub: Optional upper bound :return: The created variable """ var = opti.variable() opti.set_initial(var, init_val) if lb is not None: opti.subject_to(var >= lb) if ub is not None: opti.subject_to(var <= ub) return var
6cd346effba937a43c555e3e0e1e7b3fecf231e3
3,637,027
def current_user() -> str: """ Retorna o usuário corrente. """ session_id = request.get_cookie(cookie_session_name()) c = get_cursor() c.execute( """ select username from sessions where session_id = :session_id """, {"session_id": session_id}, ) return c.fetchone()["username"]
ec2b16f671a9fd11762160bcb73f770d9bc5eb7a
3,637,028
async def async_setup(hass, hassconfig): """Setup Component.""" hass.data.setdefault(DOMAIN, {}) config = hassconfig.get(DOMAIN) or {} hass.data[DOMAIN]['config'] = config hass.data[DOMAIN].setdefault('entities', {}) hass.data[DOMAIN].setdefault('configs', {}) hass.data[DOMAIN].setdefault('miot_main_entity', {}) hass.data[DOMAIN].setdefault('micloud_devices', []) hass.data[DOMAIN].setdefault('cloud_instance_list', []) hass.data[DOMAIN].setdefault('event_fetcher_list', []) hass.data[DOMAIN].setdefault('add_handler', {}) component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL) hass.data[DOMAIN]['component'] = component await component.async_setup(config) return True
5708286ac76bc01ff8b979632d8d030192600e3f
3,637,029
def mplplot(peaklist, w=1, y_min=-0.01, y_max=1, points=800, limits=None): """ A no-frills routine that plots spectral simulation data. Arguments --------- peaklist : [(float, float)...] a list of (frequency, intensity) tuples. w : float peak width at half height y_max : float or int maximum intensity for the plot. points : int number of data points limits : (float, float) frequency limits for the plot """ # import matplotlib.pyplot as plt peaklist.sort() # Could become costly with larger spectra if limits: try: l_limit, r_limit = limits l_limit = float(l_limit) r_limit = float(r_limit) except Exception as e: print(e) print('limits must be a tuple of two numbers') # return None raise if l_limit > r_limit: l_limit, r_limit = r_limit, l_limit else: l_limit = peaklist[0][0] - 50 r_limit = peaklist[-1][0] + 50 x = np.linspace(l_limit, r_limit, points) plt.ylim(y_min, y_max) plt.gca().invert_xaxis() # reverses the x axis y = add_signals(x, peaklist, w) # noinspection PyTypeChecker plt.plot(x, y) plt.show() return x, y # TODO: or return plt? Decide behavior
92e6d268d1a2fcd818ca6f28e84ad0b925e09c7a
3,637,030
def get_node_centroids(mesh): """ Calculate the node centroids of the given elements. Parameters ---------- mesh : list of dicts or single dict each dict containing at least the following keywords nodes : ndarray Array with all node postions. elements : dict of ndarrays Contains array of nodes for elements sorted by element types. Returns ------- result : list of dictionaries or single dict of ndarrays (like 'mesh') Centroids of elements sorted by element types. """ single = False if not isinstance(mesh, (list, tuple)): tmp_mesh = [mesh] single = True else: tmp_mesh = mesh result = [] for mesh_i in tmp_mesh: out = {} for elem in ELEM_NAMES: if elem not in mesh_i["elements"]: continue points = mesh_i["nodes"][mesh_i["elements"][elem]] out[elem] = np.mean(points, axis=1) result.append(out) if single: result = result[0] return result
eb7244184921a9728ce12e0f7eaf46bd52cf2399
3,637,031
def find_saas_replication_price(package, tier=None, iops=None): """Find the price in the given package for the desired replicant volume :param package: The product package of the endurance storage type :param tier: The tier of the primary storage volume :param iops: The IOPS of the primary storage volume :return: Returns the replication price, or an error if not found """ if tier is not None: target_value = ENDURANCE_TIERS.get(tier) target_item_keyname = 'REPLICATION_FOR_TIERBASED_PERFORMANCE' target_restriction_type = 'STORAGE_TIER_LEVEL' else: target_value = iops target_item_keyname = 'REPLICATION_FOR_IOPSBASED_PERFORMANCE' target_restriction_type = 'IOPS' for item in package['items']: if item['keyName'] != target_item_keyname: continue price_id = _find_price_id( item['prices'], 'performance_storage_replication', target_restriction_type, target_value ) if price_id: return price_id raise ValueError("Could not find price for replicant volume")
5f3abdd4a2edd24abd8c19752316b06e76212532
3,637,032
def _get_option_of_highest_precedence(config, option_name): """looks in the config and returns the option of the highest precedence This assumes that there are options and flags that are equivalent Args: config (_pytest.config.Config): The pytest config object option_name (str): The name of the option Returns: str: The value of the option that is of highest precedence None: no value is present """ # Try to get configs from CLI and ini try: cli_option = config.getoption("--{}".format(option_name)) except ValueError: cli_option = None try: ini_option = config.getini(option_name) except ValueError: ini_option = None highest_precedence = cli_option or ini_option return highest_precedence
4f3bca4ff5b0a1eb04fbdc7a5d22bc09dbc95df6
3,637,033
def get_industry_categories(): """按编制部门输出{代码:名称}映射""" expr = STOCK_DB.industries.drop_field('last_updated') df = odo(expr, pd.DataFrame) res = {} for name, group in df.groupby('department'): res[name] = group.set_index('industry_id').to_dict()['name'] return res
5b50dc2845a903e56071b57b0ee8d307f5e52f27
3,637,034
def rate(t, y, dt, elph_tau, pol_tau, delay, start): """Rate equation function for two state model. y[0] is charge transfer state, y[1] is polaron state, elph_tau is electron-phonon scattering constant, pol_tau is polaron formation constant.""" dydt = [(pulse(t, dt, delay, start) - (y[0] - y[1])/elph_tau - y[0]*y[1]/pol_tau), ((y[0] - y[1])/elph_tau - y[0]*y[1]/pol_tau), (y[0]*y[1]/pol_tau)] return dydt
dd98606f0ab4dd5c3334acfbd080959ac4921030
3,637,035
import copy def trace_module(no_print=True): """ Trace my_module_original exceptions """ with putil.exdoc.ExDocCxt() as exdoc_obj: try: docs.support.my_module.func('John') obj = docs.support.my_module.MyClass() obj.value = 5 obj.value except: raise RuntimeError( 'Tracing did not complete successfully' ) if not no_print: module_prefix = 'docs.support.my_module.' callable_names = ['func', 'MyClass.value'] for callable_name in callable_names: callable_name = module_prefix+callable_name print('\nCallable: {0}'.format(callable_name)) print(exdoc_obj.get_sphinx_doc(callable_name, width=70)) print('\n') return copy.copy(exdoc_obj)
f407cba3f2ae8582bdaa685ae5bcba1ca908e9a9
3,637,036
def data_to_seq(X, Y, t_lag=8, t_future_shift=1, t_future_steps=1, t_sw_step=1, X_pad_with=None): """Slice X and Y into sequences using a sliding window. Arguments: ---------- X : np.ndarray with ndim == 2 Y : np.ndarray with ndim == 2 t_sw_step : uint (default: 1) Time step of the sliding window. t_lag : uint (default: 8) (t_lag - 1) past time steps used to construct a sequence of inputs. t_future_shift : uint (default: 0) How far in the future predictions are supposed to be made. t_future_steps : uint (default: 1) How many steps to be predicted from t + t_future_shift. The sequences are constructed in a way that the model can be trained to predict Y[t_future:t_future+t_future_steps] from X[t-t_lag:t] where t_future = t + t_future_shift. """ # Assume that provided X and Y are matrices and are aligned in time assert X.ndim == 2 and Y.ndim == 2 assert len(X) == len(Y) # Pad X sequence from the beginning X_padding_left = np.zeros((t_lag - 1, X.shape[1])) X = np.vstack([X_padding_left, X]) # The future steps of X should be skipped, hence padded with zeros # X_padding_right = np.zeros((t_future_shift+t_future_steps-1, X.shape[1])) nb_t_steps = 1 + len(X) - (t_future_shift + (t_future_steps - 1)) X_seq, Y_seq = [], [] for t in xrange(t_lag, nb_t_steps, t_sw_step): t_past = t - t_lag t_future = t_past + t_future_shift # X_seq.append(np.vstack([X[t_past:t], X_padding_right])) X_seq.append(X[t_past:t]) Y_seq.append(Y[t_future:t_future+t_future_steps]) X_seq = np.asarray(X_seq) Y_seq = np.asarray(Y_seq) return [X_seq, Y_seq]
477366408309483eb1c9dcc2d90c70f7bd3ab143
3,637,037
import time import os def masterbias(files,med=False,outfile=None,clobber=True,verbose=False): """ Load the bias images. Overscan correct and trim them. Then average them. Parameters ---------- files : list List of bias FITS files. med : boolean, optional Use the median of all the files. By default med=False and the mean is calculated. outfile : string, optional Filename to write the master bias image to. clobber : boolean, optional If the output file already exists, then overwrite it. Default is True. verbose : boolean, optional Verbose output to the screen. Default is False. Returns ------- aim : numpy image The 2D master bias image. ahead : header dictionary The master bias header. Example ------- bias, bhead = masterbias(bias_files) """ nfiles = len(files) if verbose: print('Creating master bias using '+str(nfiles)+' files') # File loop for i in range(nfiles): im,head = fits.getdata(files[i],0,header=True) sh = im.shape if verbose: print(str(i+1)+' '+files[i]+' ['+str(sh[1])+','+str(sh[0])+']') # Fix header, if necessary if (head.get('TRIMSEC') is None) | (head.get('BIASSEC') is None): head = fixheader(head) # Check image type imagetyp = head.get('IMAGETYP') exptime = head.get('EXPTIME') if imagetyp is not None: if 'bias' not in imagetyp.lower() and 'zero' not in imagetyp.lower() and exptime != 0.0: raise ValueError(files[i]+' is not a bias') # Image processing, overscan+trim im2,head2 = ccdproc(im,head) # Initialize array if i==0: ny,nx = im2.shape if med: imarr = np.zeros((ny, nx, nfiles),float) else: totim = np.zeros(im2.shape,float) if med: imarr[:,:,i] = im2 else: totim += im2 if i==0: ahead=head2.copy() ahead['CMB'+str(i+1)] = files[i] # Final calculation if med: aim = np.median(imarr,axis=2) ahead['HISTORY'] = 'Median combine' else: aim = totim/nfiles ahead['HISTORY'] = 'Mean combine' ahead['NCOMBINE'] = nfiles ahead['HISTORY'] = time.ctime()+' bias combine' aim = aim.astype(np.float32) # convert to 32 bit # Output file if outfile is not None: if os.path.exists(outfile): if clobber is False: raise ValueError(outfile+' already exists and clobber=False') else: os.remove(outfile) if verbose: print('Writing master bias to '+outfile) hdu = fits.PrimaryHDU(aim,ahead).writeto(outfile) return aim, ahead
43b6e905545d57e65b44b16787d67a5c8e2964e4
3,637,038
def get_model_kind(model): """Returns the "kind" of the given model. NOTE: A model's kind is usually, but not always, the same as a model's class name. Specifically, the kind is different when a model overwrites the _get_kind() class method. Although Oppia never does this, the Apache Beam framework uses "kind" to refer to models extensively, so we follow the same convention and take special care to always return the correct value. Args: model: base_models.Model|cloud_datastore_types.Entity. The model to inspect. Returns: bytes. The model's kind. Raises: TypeError. When the argument is not a model. """ if isinstance(model, base_models.BaseModel) or ( isinstance(model, type) and issubclass(model, base_models.BaseModel)): return model._get_kind() # pylint: disable=protected-access elif isinstance(model, cloud_datastore_types.Entity): return model.kind else: raise TypeError('%r is not a model type or instance' % model)
58465fd8d9a7893aeb046b5e05e713a912ff4a2f
3,637,039
def get_Zvalence_from_pseudo(pseudo): """ Extract the number of valence electrons from a pseudo """ with open(pseudo.get_file_abs_path(),'r') as f: lines=f.readlines() for line in lines: if 'valence' in line: try: return int(float(line.split("z_valence=\"" )[-1].split("\"")[0].strip())) except (ValueError, IndexError): try: return int(float(line.split("Z")[0].strip())) except (ValueError, IndexError): return None
aade59ef7d9d7d517c19f95d237993433f21ed7a
3,637,040
import ruptures as rpt def detect_data_shifts(time_series, filtering=True, use_default_models=True, method=None, cost=None, penalty=40): """ Detect data shifts in the time series, and return list of dates where these data shifts occur. Parameters ---------- time_series : Pandas series with datetime index. Daily time series of a PV data stream, which can include irradiance and power data streams. This series represents the summed daily values of the particular data stream. filtering : Boolean, default True. Whether or not to filter out outliers and stale data from the time series. If True, then this data is filtered out before running the data shift detection sequence. If False, this data is not filtered out. Default set to True. use_default_models: Boolean, default True If True, then default change point detection search parameters are used. For time series shorter than 2 years in length, the search function is `rpt.Window` with `model='rbf'`, `width=40` and `penalty=30`. For time series 2 years or longer in length, the search function is `rpt.BottomUp` with `model='rbf'` and `penalty=40`. method: ruptures search method instance or None, default None. Ruptures search method instance. See https://centre-borelli.github.io/ruptures-docs/user-guide/. cost: str or None, default None Cost function passed to the ruptures changepoint search instance. See https://centre-borelli.github.io/ruptures-docs/user-guide/ penalty: int, default 40 Penalty value passed to the ruptures changepoint detection method. Default set to 40. Returns ------- Pandas Series Series of boolean values with a datetime index, where detected changepoints are labeled as True, and all other values are labeled as False. .. warning:: If the passed time series is less than 2 years in length, it will not be corrected for seasonality. Data shift detection will be run on the min-max normalized time series with no seasonality correction. References ------- .. [1] Perry K., and Muller, M. "Automated shift detection in sensor-based PV power and irradiance time series", 2022 IEEE 48th Photovoltaic Specialists Conference (PVSC). Submitted. """ try: except ImportError: raise ImportError("data_shifts() requires ruptures.") # Run data checks on cleaned data to make sure that the data can be run # successfully through the routine _run_data_checks(time_series) # Run the filtering sequence, if marked as True if filtering: time_series_filtered = _erroneous_filter(time_series) # Drop any duplicated data from the time series time_series_filtered = time_series_filtered.drop_duplicates() # Check if the time series is more than 2 years long. If so, remove # seasonality. If not, run analysis on the normalized time series if (time_series_filtered.index.max() - time_series_filtered.index.min()).days <= 730: time_series_processed = _preprocess_data(time_series_filtered, remove_seasonality=False) seasonality_rmv = False else: # Perform pre-processing on the time series, to get the # seasonality-removed time series. time_series_processed = _preprocess_data(time_series_filtered, remove_seasonality=True) seasonality_rmv = True points = np.array(time_series_processed.dropna()) # If seasonality has been removed and default model is used, run # BottomUp method if (seasonality_rmv) & (use_default_models): algo = rpt.BottomUp(model='rbf').fit(points) result = algo.predict(pen=40) # If there is no seasonality but default model is used, run # Window-based method elif (not seasonality_rmv) & (use_default_models): algo = rpt.Window(model='rbf', width=50).fit(points) result = algo.predict(pen=30) # Otherwise run changepoint detection with the passed parameters else: algo = method(model=cost).fit(points) result = algo.predict(pen=penalty) # Remove the last index of the time series, if present if len(points) in result: result.remove(len(points)) # Return a list of dates where changepoints are detected time_series_processed.index.name = "datetime" mask = pd.Series(False, index=time_series_processed.index) mask.iloc[result] = True # Re-index the mask to include any timestamps that were # filtered out as outliers mask = mask.reindex(time_series.index, fill_value=False) return mask
d924d36a53f965b76943f1a466d3b88649cbe0ef
3,637,041
def read_rds(filepath): """Read an RDS-format matrix into a Pandas dataframe. Location can be data, scratch, or results. Index is populated from first column""" raw_df = pyreadr.read_r(filepath)[None] if raw_df.isnull().values.any(): raise ValueError("NaN's were found in the data matrix.") return raw_df.set_index(raw_df.columns[0], drop=True)
c4b171638883fc2c3b32397e79a413a9441567f0
3,637,042
def history(): """Show history of transactions.""" # Read Transactions database for desired elements transactions = db.execute("SELECT symbol, share, price, method, timestamp FROM Transactions WHERE id = :uid", uid = session["user_id"]) # Convert prices to 2 decimal places for transaction in transactions: transaction["price"] = usd(transaction["price"]) return render_template("history.html", transactions = transactions)
5eac4a49c473467db851fe2ea6e58b29cc1a9bfe
3,637,043
import os import urllib import torch def _load_expert_models(scenario_name, run_id, len_stream): """Load ExML experts. If necessary, the model are automatically downloaded. """ # base_dir = f'/raid/carta/EXML_CLVISION_PRETRAINED_EXPERTS/{scenario_name}' base_dir = default_dataset_location( f"EXML_CLVISION22_PRETRAINED_EXPERTS/{scenario_name}/run{run_id}" ) weburl = ( f"http://131.114.50.174/data/EXML_CLVISION22_PRETRAINED_EXPERTS" f"/{scenario_name}/run{run_id}" ) experts_stream = [] for i in range(len_stream): fname_i = f"{base_dir}/model_e{i}.pt" weburl_i = f"{weburl}/model_e{i}.pt" if not os.path.exists(fname_i): os.makedirs(base_dir, exist_ok=True) print(f"Downloading expert model {i}") urllib.request.urlretrieve(weburl_i, fname_i) model = torch.load(fname_i).to("cpu") model.eval() experts_stream.append(model) return experts_stream
a972a04997ebf7f092c4ec4256a48f25eb8f3f0c
3,637,044
from typing import get_args def generate_args(job_name, common, cloud_provider, image, k8s_version, test_suite, job): """Returns a list of args fetched from the given fields.""" args = [] args.extend(get_args(job_name, common)) args.extend(get_args(job_name, cloud_provider)) args.extend(get_args(job_name, image)) args.extend(get_args(job_name, k8s_version)) args.extend(get_args(job_name, test_suite)) args.extend(get_args(job_name, job)) return args
7f53dcf66269b0d14f9fad1c1079cf1716529f09
3,637,045
import math def isPrime(n): """ check is Prime,for positive integer. 使用试除法 """ if n <= 1: return False if n == 2: return True i = 2 thres = math.ceil(math.sqrt(n)) while i <= thres: if n % i == 0: return False i += 1 return True
458775fbd324dc976c91a035898b3122e6bc1109
3,637,046
from typing import Tuple import torch def reconstruction_loss(loss_type: str, in_dim: Tuple[int], x: torch.Tensor, x_reconstr: torch.Tensor, logits: bool = True, ) -> torch.Tensor: """ Computes reconstruction loss (mse or cross-entropy) without mean reduction (used in VAE objectives) """ batch_dim = x.size(0) if loss_type == "mse": reconstr_loss = 0.5 * torch.sum( (x_reconstr.reshape(batch_dim, -1) - x.reshape(batch_dim, -1))**2, 1) elif loss_type == "ce": rs = (np.product(in_dim[:2]),) if len(in_dim) == 3: rs = rs + (in_dim[-1],) xe = (F.binary_cross_entropy_with_logits if logits else F.binary_cross_entropy) reconstr_loss = xe(x_reconstr.reshape(-1, *rs), x.reshape(-1, *rs), reduction='none').sum(-1) else: raise NotImplementedError("Reconstruction loss must be 'mse' or 'ce'") return reconstr_loss
30dbd75eddbc7f2d0994f867e2f9492b24f707b1
3,637,047
def NOR(*variables): """NOR. Return the boolean expression for the OR of the variables. Equivalent to ``NOT(OR(*variables))``. Parameters ---------- *variables : arguments. ``variables`` can be of arbitrary length. Each variable can be a hashable object, which is the label of the boolean variable, or a dict (or subclass of dict) representing a boolean expression. Return ------ P : ``qubovert.PUBO`` object or same type as ``type(variables[0])``. The boolean expression for the logic operation. If ``variables[0]`` is a ``qubovert.QUBO``, ``qubovert.PCBO``, ``qubovert.utils.QUBOMatrix``, or ``qubovert.utils.PUBOMatrix`` object, then ``type(P) == type(variables[0])``. Otherwise, ``type(P) == type(variables[0])``. Example ------- >>> from qubovert.sat import NOR >>> P = NOR(0, 1) >>> P {(0,): -1, (0, 1): 1, (1,): -1, (): 1} >>> P.value({0: 0, 1: 0}) 1 >>> P.value({0: 0, 1: 1}) 0 >>> P.value({0: 1, 1: 0}) 0 >>> P.value({0: 1, 1: 1}) 0 >>> type(P) qubovert._pubo.PUBO >>> P = NOR({(0, 1): 1}, 'x') # nor of 0, 1, and 'x'. >>> P {(0, 1): -1, (0, 1, 'x'): 1, ('x',): -1, (): 1} >>> type(P) qubovert._pubo.PUBO >>> from qubovert import boolean_var >>> x, y = boolean_var('x'), boolean_var('y') >>> P = NOR(x, y) >>> type(P) qubovert.PCBO """ return NOT(OR(*variables))
e3b9d5eb3c167ac04de66609828583bb5eeb7004
3,637,048
import io import time def timing_run(args, shell: bool = False, stdin=None, stdout=None, stderr=None, environ=None, cwd=None, resources=None, identification=None, shuffle=False) -> RunResult: """ Create an timing process with stream :param args: arguments for execution :param shell: use shell to execute args :param stdin: stdin stream (none means nothing) :param stdout: stdout stream (none means nothing) :param stderr: stderr stream (none means nothing) :param environ: environment variables :param cwd: new work dir :param resources: resource limit :param identification: user and group for execution :param shuffle: Shuffle the inputs with similar timestamp. :return: run result of this time """ stdin_need_close = not stdin stdin = stdin or io.BytesIO() stdout_need_close = not stdout stdout = stdout or io.BytesIO() stderr_need_close = not stderr stderr = stderr or io.BytesIO() with eclosing(stdin, stdin_need_close) as stdin, \ eclosing(stdout, stdout_need_close) as stdout, \ eclosing(stderr, stderr_need_close) as stderr: _stdin = TimingStdin.loads(_try_read_to_bytes(stdin)) if shuffle: _stdin = _stdin.to_shuffled() with interactive_process( args=args, shell=shell, environ=environ, cwd=cwd, resources=resources, identification=identification, ) as ip: for _time, _line in _stdin.lines: _target_time = ip.start_time + _time while time.time() < _target_time and not ip.completed: time.sleep(max(min(0.2, _target_time - time.time()), 0.0)) try: ip.print_stdin(_line) except BrokenPipeError: break ip.close_stdin() _stdout, _stderr = [], [] for _time, _tag, _line in ip.output_yield: if _tag == 'stdout': _stdout.append((_time, _line)) elif _tag == 'stderr': _stderr.append((_time, _line)) else: raise ValueError('Unknown output type - {type}.'.format(type=repr(_time))) # pragma: no cover ip.join() _try_write(stdout, TimingStdout.loads(_stdout).dumps()) _try_write(stderr, TimingStderr.loads(_stderr).dumps()) return ip.result
936d9611769dc5e04381131cd7bf18be73580bb3
3,637,049
def alterMethods(cls): """ Alter Monte methods on behalf of AutoHelp. Return the signatures of the altered methods. NOT_RPYTHON """ atoms = [] imports = set() def nextName(nameIndex=[0]): name = "_%d" % nameIndex[0] nameIndex[0] += 1 return name execNames = {"Refused": Refused} dispatchClauses = [] d = {} # Walk the MRO and harvest Monte methods. The repacker has already placed # them in the correct location. for c in reversed(cls.__mro__): if hasattr(c, "_monteMethods_"): d.update(c._monteMethods_) for attr, (f, verb, args, kwargs, rv) in d.iteritems(): # The verb is now Unicode. verb = verb.decode("utf-8") assignments = [] if isStarArgs(args): atomTest = "atom.verb == %r" % verb call = "self.%s(args)" % attr else: atomName = nextName() execNames[atomName] = atom = getAtom(verb, len(args)) atoms.append(atom) atomTest = "atom is %s" % atomName argNames = [] for i, arg in enumerate(args): argName = nextName() argNames.append(argName) assignments.append("%s = args[%d]" % (argName, i)) if arg != "Any": unwrapperModule = wrappers[arg] pred = "is" + arg imports.add("from %s import %s" % (unwrapperModule, pred)) atomTest += " and %s(args[%d])" % (pred, i) unwrapper = "unwrap" + arg imports.add("from %s import %s" % (unwrapperModule, unwrapper)) assignments.append("%s = %s(%s)" % (argName, unwrapper, argName)) for k, v in kwargs.iteritems(): kwargName = nextName() argNames.append("%s=%s" % (k, kwargName)) assignments.append("%s = namedArgs.extractStringKey(%r, None)" % (kwargName, k.decode("utf-8"))) if v != "Any": unwrapperModule = wrappers[v] unwrapper = "unwrap" + v imports.add("from %s import %s" % (unwrapperModule, unwrapper)) assignments.append("%s = %s(%s) if %s is None else None" % (kwargName, unwrapper, kwargName, kwargName)) call = "self.%s(%s)" % (attr, ",".join(argNames)) retvals = [] if rv == "Any": # No wrapping. retvals.append("return rv") elif rv == "Void": # Enforced correctness. Disobedience will not be tolerated. retvals.append("assert rv is None, 'habanero'") retvals.append("from typhon.objects.constants import NullObject") retvals.append("return NullObject") else: wrapperModule = wrappers[rv] wrapper = "wrap" + rv imports.add("from %s import %s" % (wrapperModule, wrapper)) retvals.append("return %s(rv)" % wrapper) dispatchClauses.append(""" if %s: %s rv = %s %s """ % (atomTest, ";".join(assignments), call, ";".join(retvals))) setattr(cls, attr, f) # Temporary. Soon, all classes shall receive AutoHelp, and no class will # have a handwritten recv(). if dispatchClauses: exec py.code.Source(""" def recvNamed(self, atom, args, namedArgs): %s %s rv = self.mirandaMethods(atom, args, namedArgs) if rv is None: raise Refused(self, atom, args) else: return rv """ % (";".join(imports), "\n".join(dispatchClauses))).compile() in execNames cls.recvNamed = execNames["recvNamed"] return atoms
9c1dcbda1a96196bdde3f31563d53f8c2be6eeb1
3,637,050
from typing import List from typing import Union def make_multiclouds(docs: List[Union[dict, object, str, tuple]], opts: dict = None, ncols: int = 3, title: str = None, labels: List[str] = None, show: bool = True, figure_opts: dict = None, round: int = None ): """Make multiclouds. Accepts data from a string, list of lists or tuples, a dict with terms as keys and counts/frequencies as values, or a dataframe. The best input is a dtm produced by `get_dtm_table()`. Args: docs (List[Union[dict, object, str, tuple]]): The data. Accepts a list of text strings, a list of tuples, or dicts with the terms as keys and the counts/frequencies as values, or a dataframe with "term" and "count" or "frequency" columns. opts (dict): The WordCloud() options. For testing, try {"background_color": "white", "max_words": 2000, "contour_width": 3, "contour_width": "steelblue"} ncols (int): The number of columns in the grid. title (str): The title of the grid. labels (List[str]): The document labels for each subplot. show (bool): Whether to show the plotted word cloud or return it as a WordCloud object. figure_opts (dict): A dict of matplotlib figure options. round (int): An integer (generally between 100-300) to apply a mask that rounds the word cloud. Returns: object: A WordCloud object if show is set to False. Notes: - For a full list of options, see https://amueller.github.io/word_cloud/generated/wordcloud.WordCloud.html#wordcloud-wordcloud. - If `show=False` the function expects to be called with something like `wordcloud = make_wordcloud(data, show=False)`. This returns WordCloud object which can be manipulated by any of its methods, such as `to_file()`. See the WordCloud documentation for a list of methods. """ # Process the docs data into a list if isinstance(docs, pd.core.frame.DataFrame): # Assumes a df with columns: Terms, Doc_Label, DocLabel,... # Transpose the df docs = docs.T # Grab the first row for the header new_header = docs.iloc[0] # Drop the first row docs = docs[1:] # Set the header row as the df header docs.columns = new_header # Return a dict docs = docs.to_dict(orient="records") # Ensure that anything that is not a list of strings is converted # to the appropriate format. elif isinstance(docs, list): if all(isinstance(s, str) for s in docs): pass else: docs = [{x[0:1]: x[1:2] for x in data} for data in docs] # List for multiple word clouds if they are to be returned. multiclouds = [] # Create a rounded mask. if round: x, y = np.ogrid[:300, :300] mask = (x - 150) ** 2 + (y - 150) ** 2 > round ** 2 mask = 255 * mask.astype(int) opts["mask"] = mask # Constrain the layout figure_opts["constrained_layout"] = True # Create the figure. fig = plt.figure(**figure_opts) # Add the title if title: fig.suptitle(title) # Calculate the number of rows and columns. nrows = int(np.ceil(len(docs) / ncols)) spec = fig.add_gridspec(nrows, ncols) # Divide the data into rows. rows = list(get_rows(docs, ncols)) # Set an index for labels i = 0 # Loop through the rows. for row, doc in enumerate(rows): # Loop through the documents in the row. for col, data in enumerate(doc): # Create a subplot. ax = fig.add_subplot(spec[row, col]) # Generate the subplot's word cloud. if isinstance(data, str): wordcloud = WordCloud(**opts).generate_from_text(data) else: wordcloud = WordCloud(**opts).generate_from_frequencies(data) # If `show=True`, show the word cloud. if show: ax.imshow(wordcloud) ax.axis("off") # Set the image title from the label if labels: ax.set_title(labels[i]) i += 1 # Otherwise, add the word cloud to the multiclouds list. else: multiclouds.append(wordcloud) # If `show=False`, return the multiclouds list. if not show: return multiclouds
9c1f6363d1cc6cd0e20591c1ab54b1761414d29c
3,637,051
def action_prop(param, val=1): """A param that performs an action""" def fdo(self): self.setter(param, val) return fdo
6a4f6e7e178e62755113d6b93a59534675dfa2dd
3,637,052
def find_or_create(find, create): """Given a find and a create function, create a resource if it doesn't exist""" result = find() return result if result else create()
ffe608bf2da1b83d662b93266f4309976424300f
3,637,053
import math def Gsigma(sigma): """Pickle a gaussian function G(x) for given sigma""" def G(x): return (math.e ** (-(x**2)/(2*sigma**2)))/(2 * math.pi* sigma**2)**0.5 return G
77eac3ca8b6ced0063074527b83c50e8681f980d
3,637,054
from indico.modules.events.contributions.ical import generate_contribution_component def session_to_ical(session, detailed=False): """Serialize a session into an iCal. :param session: The session to serialize :param detailed: If True, iCal will include the session's contributions """ calendar = icalendar.Calendar() calendar.add('version', '2.0') calendar.add('prodid', '-//CERN//INDICO//EN') related_event_uid = f'indico-event-{session.event.id}@{url_parse(config.BASE_URL).host}' if not detailed: component = generate_session_component(session, related_event_uid) calendar.add_component(component) else: contributions = (Contribution.query.with_parent(session) .filter(Contribution.is_scheduled) .all()) components = [generate_contribution_component(contribution, related_event_uid) for contribution in contributions] for component in components: calendar.add_component(component) return calendar.to_ical()
9f0cb5a5ce6f31c6690b71948fbe6e8eeb2f7080
3,637,055
def _normalize_hosts(hosts): """ Helper function to transform hosts argument to :class:`~elasticsearch.Elasticsearch` to a list of dicts. """ # if hosts are empty, just defer to defaults down the line if hosts is None: return [{}] # passed in just one string if isinstance(hosts, string_types): hosts = [hosts] out = [] # normalize hosts to dicts for host in hosts: if isinstance(host, string_types): if "://" not in host: host = "//%s" % host parsed_url = urlparse(host) h = {"host": parsed_url.hostname} if parsed_url.port: h["port"] = parsed_url.port if parsed_url.scheme == "https": h["port"] = parsed_url.port or 443 h["use_ssl"] = True if parsed_url.username or parsed_url.password: h["http_auth"] = "%s:%s" % ( unquote(parsed_url.username), unquote(parsed_url.password), ) if parsed_url.path and parsed_url.path != "/": h["url_prefix"] = parsed_url.path out.append(h) else: out.append(host) return out
ef3a6cfadd6a297f31afdfec4b8a77a0f88cd08f
3,637,056
def data(self: Client) -> DataProxy: """Delegates to a :py:class:`mcipc.rcon.je.commands.data.DataProxy` """ return DataProxy(self, 'data')
072806ad6f27e8bd645bd04cf34619946a83bf06
3,637,057
def proximal_policy_optimization_loss(advantage, old_prediction, loss_clipping=0.2, entropy_loss=5e-3): """ https://github.com/LuEE-C/PPO-Keras/blob/master/Main.py # Only implemented clipping for the surrogate loss, paper said it was best :param advantage: :param old_prediction: :param loss_clipping: :param entropy_loss: :return: """ def loss(y_true, y_pred): prob = K.sum(y_true * y_pred, axis=-1) # Multiply with the one hot encoded taken action old_prob = K.sum(y_true * old_prediction, axis=-1) r = prob / (old_prob + 1e-10) return -K.mean(K.minimum(r * advantage, K.clip( r, min_value=1 - loss_clipping, max_value=1 + loss_clipping) * advantage) + entropy_loss * -( prob * K.log(prob + 1e-10))) return loss
ca7e1a602a6da6236fbd85facb373fa623fc62d5
3,637,058
import re def tokenize_string(string): """Split a string up into analyzable characters. Returns a list of individual characters that can then be matched with the regex patterns. Note that all accent characters can be found with the range: \u0300-\u036F. Thus, strings are split by [any_character][any_accent]*. """ norm_string = normalize_string(string) return re.findall('.[\u0300-\u036F]*', norm_string)
f3757e190f99d3430dee17ca51ea6a6d7fa70ff9
3,637,059
def compute_final_metrics(source_waveforms, separated_waveforms, mixture_waveform): """Permutation-invariant SI-SNR, powers, and under/equal/over-separation.""" perm_inv_loss = wrap(lambda tar, est: -signal_to_noise_ratio_gain_invariant(est, tar)) _, separated_waveforms = perm_inv_loss(source_waveforms,separated_waveforms) # Compute separated and source powers. power_separated = tf.reduce_mean(separated_waveforms ** 2, axis=-1) power_sources = tf.reduce_mean(source_waveforms ** 2, axis=-1) # Compute weights for active (separated, source) pairs where source is nonzero # and separated power is above threshold of quietest source power - 20 dB. weights_active_refs = _weights_for_nonzero_refs(source_waveforms) weights_active_seps = _weights_for_active_seps( tf.boolean_mask(power_sources, weights_active_refs), power_separated) weights_active_pairs = tf.logical_and(weights_active_refs, weights_active_seps) # Compute SI-SNR. sisnr_separated = signal_to_noise_ratio_gain_invariant(separated_waveforms, source_waveforms) num_active_refs = tf.math.reduce_sum(tf.cast(weights_active_refs, tf.int32)) num_active_seps = tf.math.reduce_sum(tf.cast(weights_active_seps, tf.int32)) num_active_pairs = tf.math.reduce_sum(tf.cast(weights_active_pairs, tf.int32)) sisnr_mixture = signal_to_noise_ratio_gain_invariant( tf.tile(mixture_waveform, (1,source_waveforms.shape[1], 1)),source_waveforms) # Compute under/equal/over separation. under_separation = tf.cast(tf.less(num_active_seps, num_active_refs), tf.float32) equal_separation = tf.cast(tf.equal(num_active_seps, num_active_refs), tf.float32) over_separation = tf.cast(tf.greater(num_active_seps, num_active_refs), tf.float32) return {'sisnr_separated': sisnr_separated, 'sisnr_mixture': sisnr_mixture, 'sisnr_improvement': sisnr_separated - sisnr_mixture, 'power_separated': power_separated, 'power_sources': power_sources, 'under_separation': under_separation, 'equal_separation': equal_separation, 'over_separation': over_separation, 'weights_active_refs': weights_active_refs, 'weights_active_seps': weights_active_seps, 'weights_active_pairs': weights_active_pairs, 'num_active_refs': num_active_refs, 'num_active_seps': num_active_seps, 'num_active_pairs': num_active_pairs}
3e7a6a52b8a26c4a4fa7fec9de17559617e4d467
3,637,060
import numpy import pandas def gen_sdc_pandas_series_rolling_impl(pop, put, get_result=result_or_nan, init_result=numpy.nan): """Generate series rolling methods implementations based on pop/put funcs""" def impl(self): win = self._window minp = self._min_periods input_series = self._data input_arr = input_series._data length = len(input_arr) output_arr = numpy.empty(length, dtype=float64) chunks = parallel_chunks(length) for i in prange(len(chunks)): chunk = chunks[i] nfinite = 0 result = init_result if win == 0: for idx in range(chunk.start, chunk.stop): output_arr[idx] = get_result(nfinite, minp, result) continue prelude_start = max(0, chunk.start - win + 1) prelude_stop = chunk.start interlude_start = prelude_stop interlude_stop = min(prelude_start + win, chunk.stop) for idx in range(prelude_start, prelude_stop): value = input_arr[idx] nfinite, result = put(value, nfinite, result) for idx in range(interlude_start, interlude_stop): value = input_arr[idx] nfinite, result = put(value, nfinite, result) output_arr[idx] = get_result(nfinite, minp, result) for idx in range(interlude_stop, chunk.stop): put_value = input_arr[idx] pop_value = input_arr[idx - win] nfinite, result = put(put_value, nfinite, result) nfinite, result = pop(pop_value, nfinite, result) output_arr[idx] = get_result(nfinite, minp, result) return pandas.Series(output_arr, input_series._index, name=input_series._name) return impl
8fb25c10e862d21af75b244053ac96075c1efa19
3,637,061
def gen_random_colors(num_groups, colors=None): """ Generates random colors. Parameters ---------- num_groups : int The number of groups for which colors should be generated. colors : list : optional (contains strs) Hex based colors that should be appended if not enough have been provided. Returns ------- colors or colors + new_colors : list (contains strs) Randomly generated colors for figures and plotting. """ if colors is None: colors = [] if len(colors) < num_groups: while len(colors) < num_groups: cryptogen = SystemRandom() random_rgba = [cryptogen.random() for i in range(4)] colors.append(random_rgba) sns.set_palette(colors) if isinstance(colors[0][0], float): # Convert over for non-sns use. colors = [mpl.colors.to_hex([c[0], c[1], c[2]]).upper() for c in colors] return colors
462835c6bacd5024ac20bab960d2c2e9d95e4dab
3,637,062
import os def get_file_creation_date(path): """ Get the file creation date. """ assert_file(path) creation_timestamp = os.path.getctime(path) creation_date = dt.datetime.fromtimestamp(creation_timestamp) return creation_date
87d5c985269448b1fc549fb03fb1cb09e7113f4f
3,637,063
import os import subprocess def eval_moses_bleu(ref, hyp): """ Given a file of hypothesis and reference files, evaluate the BLEU score using Moses scripts. """ assert os.path.isfile(hyp) assert os.path.isfile(ref) or os.path.isfile(ref + "0") assert os.path.isfile(BLEU_SCRIPT_PATH) command = BLEU_SCRIPT_PATH + " %s < %s" p = subprocess.Popen(command % (ref, hyp), stdout=subprocess.PIPE, shell=True) result = p.communicate()[0].decode("utf-8") if result.startswith("BLEU"): return float(result[7 : result.index(",")]) else: logger.warning('Impossible to parse BLEU score! "%s"' % result) return -1
c15259875549a9d447b0270b1202bb62c290aa42
3,637,064
def build_graph(sorted_sequence): """ Each node points to a list of the nodes that are reacheable from it. """ elements = set(sorted_sequence) graph = defaultdict(lambda : []) for element in sorted_sequence: for i in [1, 2, 3]: if element + i in elements: graph[element].append(element + i) return graph
a14d2278909df459856e23c7073d551b354f258d
3,637,065
from numpy import std def _findCentralBond(mol, distmat): """ Helper function to identify the atoms of the most central bond. Arguments: - mol: the molecule of interest - distmat: distance matrix of the molecule Return: atom indices of the two most central atoms (in order) """ # get the most central atom = atom with the least STD of shortest distances stds = [] for i in range(mol.GetNumAtoms()): # only consider non-terminal atoms if len(_getHeavyAtomNeighbors(mol.GetAtomWithIdx(i))) < 2: continue tmp = [d for d in distmat[i]] tmp.pop(i) stds.append((std(tmp), i)) stds.sort() aid1 = stds[0][1] # find the second most central bond that is bonded to aid1 i = 1 while 1: if mol.GetBondBetweenAtoms(aid1, stds[i][1]) is None: i += 1 else: aid2 = stds[i][1] break return aid1, aid2 # most central atom comes first
bbaca8c48bf8c5e1a5d2ffa317448f05235c834e
3,637,066
def transform(data, transformer): """This hook defines how DataRobot will use the trained object from fit() to transform new data. DataRobot runs this hook when the task is used for scoring inside a blueprint. As an output, this hook is expected to return the transformed data. The input parameters are passed by DataRobot based on dataset and blueprint configuration. Parameters ------- data: pd.DataFrame Data that DataRobot passes for transformation. transformer: Any Trained object, extracted by DataRobot from the artifact created inside fit(). In this example, it's a function Returns ------- pd.DataFrame Returns a dataframe with transformed data. """ return data.apply(transformer)
b52577c0b2a3f3edb1297dcf9c567f9845f04bd5
3,637,067
import asyncio import base64 async def sign_params(params, certificate_file, private_key_file): """ Signs params adding client_secret key, containing signature based on `scope`, `timestamp`, `client_id` and `state` keys values. :param dict params: requests parameters :param str certificate_file: path to certificate file :param str private_key_file: path to private key file :return:signed request parameters :rtype: dict """ plaintext = ''.join([ params.get(key, '') for key in ['scope', 'timestamp', 'client_id', 'state'] ]) cmd = 'openssl smime -sign -md md_gost12_256 -signer {cert} -inkey {key} -outform DER'.format( cert=certificate_file, key=private_key_file ) proc = await asyncio.create_subprocess_shell( cmd, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT, ) stdout, stderr = await proc.communicate(input=plaintext.encode()) if proc.returncode != 0: raise OpenSSLError client_secret=base64.urlsafe_b64encode(stdout).decode('utf-8'), return {**params, 'client_secret': client_secret}
be9980e5fb0b60da8a21c77b4ac7c9795560b557
3,637,068
def sum_of_fourth_powers(matrix): """ :param matrix: (numpy.ndarray) A numpy array. :return: The fourth power of the four-norm of the matrix. In other words, the sum of the fourth power of all of its entries. """ squared_entries = matrix * matrix return np.sum(squared_entries * squared_entries)
51039a259594205a88b223b1e3d8387e05581c0f
3,637,069
from typing import Dict def key_in_direction(start: Key, direction: str, keypad: Keypad) -> Key: """ Return the value of the key in the given direction. """ row = next(r for r in keypad if start in r) x_pos = row.index(start) col = [c[x_pos] for c in keypad] y_pos = col.index(start) directions: Dict[str, Key] = { "U": col[max(0, y_pos - 1)], "D": col[min(y_pos + 1, len(col) - 1)], "L": row[max(0, x_pos - 1)], "R": row[min(x_pos + 1, len(row) - 1)], } return directions[direction] or start
c0a8909517ec1de29325d0acc18e0c8968bda3b5
3,637,070
def vectorize_args(nums): """ Decorator for vectorization of arguments of a function. The positions of the arguments are given in the tuple nums. See numpy.vectorize. """ def wrap(func): @wraps(func) def wrapped(*args, ** kwargs): args = list(args) for i, arg in enumerate(args): if i in nums and type(arg) == list: args[i] = np.array(arg) for i, arg in enumerate(args): if i in nums and type(arg) == np.ndarray: shape = np.shape(arg) ind = np.transpose(np.ones(shape).nonzero()) break if i == len(args) - 1: # no need for vectorization as all relevant # arguments are scalars return func(*args, ** kwargs) res = np.array([func( * [arg[tuple(j)] if type(arg) == np.ndarray and i in nums else arg for i, arg in enumerate(args)], ** kwargs) for j in ind]) if np.shape(res) <> shape: # func returns more than 1 result, this means the array has to # be ordered differently res = res.transpose() if len(shape) > 1: # more than 1D arrays, the shape of the list has to be rearanged res = res.reshape((res.shape[0],) + shape) return res return wrapped return wrap
cd9b13bdcd26f1c74a2eaa18396ebfb11ed02446
3,637,071
def parse_lambda_config(x): """ Parse the configuration of lambda coefficient (for scheduling). x = "3" # lambda will be a constant equal to x x = "0:1,1000:0" # lambda will start from 1 and linearly decrease # to 0 during the first 1000 iterations x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 # iterations, then will linearly increase to 1 until iteration 2000 """ if isinstance(x, float): return x, None split = x.split(',') if len(split) == 1: return float(x), None else: split = [s.split(':') for s in split] assert all(len(s) == 2 for s in split) assert all(k.isdigit() for k, _ in split) assert all(int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1)) return float(split[0][1]), [(int(k), float(v)) for k, v in split]
d85980c2efd46284de8e939f42ef4f5dd49dfd73
3,637,072
def format_cols(colname, direction='in'): """Formats columns beween human-readable and pandorable Keyword arguments: real -- the real part (default 0.0) imag -- the imaginary part (default 0.0) """ if imag == 0.0 and real == 0.0: return complex_zero ... if direction == 'in': return (colname .lower() .replace(' ', '_') .replace('(', '') .replace(')', '') ) elif direction == 'out': return (colname.replace('_', ' ') .title() ) raise ValueError('Direction must be "in" or "out"')
a61dbedb2e08c4de03c719c4daff10de41e19304
3,637,073
def convert_decimal_to_binary(number): """ Parameters ---------- number: int Returns ------- out: str >>> convert_decimal_to_binary(10) '1010' """ return bin(number)[2:]
01a9be2e70c87091adc1d85759075668da9270f2
3,637,074
from typing import Optional import pathlib import tarfile def fetch_tgz( dataname: str, urlname: str, subfolder: Optional[str] = None, data_home: Optional[str] = None, ) -> pathlib.Path: """Fetch tgz dataset. Fetch a tgz file from a given url, unzips and stores it in a given directory. Parameters ---------- dataname: string Dataset name. urlname: string Dataset url. subfolder: string, default=None The subfolder where to put the data, if any. data_home: string, default=None Dataset directory. If None, use the default of scikit-learn. Returns ------- data_home: Path Directory. """ return fetch_compressed( dataname=dataname, urlname=urlname, compression_open=tarfile.open, subfolder=subfolder, data_home=data_home, open_format='r:gz', )
00c4f91a657e37767a43b3af0766b5b407144617
3,637,075
def choisir_action(): """Choisir action de cryptage ou de décryptage Entree : - Sortie: True pour cryptage, False pour décryptage""" action_est_crypter = True action = input("Quelle est l'action, crypter ou décrypter ? \n<Entrée> pour crypter, autre touche pour decrypter, ou <Crtl> + Z ou X pour arréter.\n") if action : action_est_crypter = False return action_est_crypter
c0bceb748afb1fc32b865136c4a477f06a6412b2
3,637,076
def σ(u, p, μ): """Stress tensor of isotropic Newtonian fluid. σ = 2 μ (symm ∇)(u) - p I This method returns a UFL expression the whole stress tensor. If you want to plot, extract and interpolate or project what you need. For example, to plot the von Mises stress:: from dolfin import tr, Identity, sqrt, inner from fenics import project, plot # scalar function space W = V.sub(0).collapse() # use the space of the first comp. of `V` # W = FunctionSpace(mesh, 'P', 2) # or create your own space def dev(T): '''Deviatoric (traceless) part of rank-2 tensor `T`. This assumes, for 2D, that `T` is actually 3D, but the third row and column of `T` are zero. ''' return T - (1 / 3) * tr(T) * Identity(T.geometric_dimension()) # `solver._μ` is the UFL `Constant` object σ = σ(solver.u_, solver.p_, solver._μ) s = dev(σ) vonMises = sqrt(3 / 2 * inner(s, s)) plot(project(vonMises, W)) """ return 2 * μ * ε(u) - p * Identity(p.geometric_dimension())
03f61ea7c128503ee930714107a8f7a007641cee
3,637,077
async def cycle(command: Command, switches: PowerSwitch, name: str, portnum: int): """cycle power to an Outlet""" command.info(text=f"Cycle port {name}...") for switch in switches: current_status = await switch.statusAsJson(name, portnum) if current_status: break # print(current_status) # status |= await switch.statusAsJson(name, portnum) works only with python 3.9 # current_status = await switch.statusAsJson(name, portnum) try: # off if current_status[name]["STATE"] == 1: current_status = await switch_control( "cycle", switches, False, name, portnum ) elif current_status[name]["STATE"] == 0: return command.fail(text=f"The Outlet {name} is OFF") else: return command.fail(text=f"The Outlet {name} returns wrong value") except PowerException as ex: return command.fail(error=str(ex)) return command.finish(text="done")
7b5a17eaeecb4d8f1072f014de716bb1bb95dc97
3,637,078
def zk_delete_working_node(zk_client, server): """删除服务节点""" node_path, root_path = get_path_to_current_working_node(server) zk_client.ensure_path(root_path) result = zk_client.delete(node_path, ephemeral=True) return result
45effe39d8cd5eb22742c6eed19984ae40b0e192
3,637,079
import torch def construct_filters_from_2d(matrix, filter_starts, decomp_level): """ construct the filters in the proper shape for the DWT inverse forward step Parameters ---------- matrix filter_starts decomp_level Returns ------- """ exp = filter_starts[0] low = matrix[: exp ** 2].reshape((exp, exp, matrix.shape[-1])) low = low.permute(2, 0, 1).unsqueeze(0) highs = [] last_end = exp ** 2 for lvl in range(decomp_level): exp = filter_starts[lvl] lp_list = [None, None, None] for i in range(1, 4): next_end = last_end + exp ** 2 lp_list[i - 1] = ( matrix[last_end:next_end] .reshape((exp, exp, matrix.shape[-1])) .permute(2, 0, 1) .unsqueeze(0) .unsqueeze(2) ) last_end = next_end highs.append(torch.cat(lp_list, dim=2)) highs.reverse() return low, highs
10411e774dc654586cd9b88b40e405b695a12919
3,637,080
def minpoly(firstterms): """ Return the minimal polynomial having at most degree n of of the linearly recurrent sequence whose first 2n terms are given. """ field = ring.getRing(firstterms[0]) r_0 = uniutil.polynomial({len(firstterms):field.one}, field) r_1 = uniutil.polynomial(enumerate(reversed(firstterms)), field) poly_ring = r_0.getRing() v_0 = poly_ring.zero v_1 = poly_ring.one n = len(firstterms) // 2 while n <= r_1.degree(): q, r = divmod(r_0, r_1) v_0, v_1 = v_1, v_0 - q*v_1 r_0, r_1 = r_1, r return v_1.scalar_mul(v_1.leading_coefficient().inverse())
8cad899aa40859884b4cdbe01b0734de84782804
3,637,081
def scale_gradient(tensor, scale): """Scales the gradient for the backward pass.""" return tf.add(tensor * scale ,tf.stop_gradient(tensor) * (1 - scale))
e3ea3a7baf06ebab5de0510ea13260e89b9397ca
3,637,082
import torch import os import pickle def get_cluster_assignments(args, model, dataset, groups): """ """ # pseudo-labels are confusing dataset.sub_classes = None # swith to eval mode model.eval() # this process deals only with a subset of the dataset local_nmb_data = len(dataset) // args.world_size indices = torch.arange(args.rank * local_nmb_data, (args.rank + 1) * local_nmb_data).int() if os.path.isfile(os.path.join(args.dump_path, 'super_class_assignments.pkl')): # super-class assignments have already been computed in a previous run super_class_assignements = pickle.load(open(os.path.join(args.dump_path, 'super_class_assignments.pkl'), 'rb')) logger.info('loaded super-class assignments') # dump cache where_helper = get_indices_sparse(super_class_assignements[indices]) nmb_data_per_super_cluster = torch.zeros(args.nmb_super_clusters).cuda() for super_class in range(len(where_helper)): nmb_data_per_super_cluster[super_class] = len(where_helper[super_class][0]) else: sampler = Subset_Sampler(indices) # we need a data loader loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.workers, pin_memory=True, ) # initialize cache, pca and centroids cache, centroids = initialize_cache(args, loader, model) # empty cuda cache (useful because we're about to use faiss on gpu) torch.cuda.empty_cache() ## perform clustering into super_clusters super_class_assignements, centroids_sc = distributed_kmeans( args, args.size_dataset, args.nmb_super_clusters, cache, args.rank, args.world_size, centroids, ) # dump activations in the cache where_helper = get_indices_sparse(super_class_assignements[indices]) nmb_data_per_super_cluster = torch.zeros(args.nmb_super_clusters).cuda() for super_class in range(len(where_helper)): ind_sc = where_helper[super_class][0] np.save(open(os.path.join( args.dump_path, 'cache/', 'super_class' + str(super_class) + '-' + str(args.rank), ), 'wb'), cache[ind_sc]) nmb_data_per_super_cluster[super_class] = len(ind_sc) dist.barrier() # dump super_class assignment and centroids of super_class if not args.rank: pickle.dump( super_class_assignements, open(os.path.join(args.dump_path, 'super_class_assignments.pkl'), 'wb'), ) pickle.dump( centroids_sc, open(os.path.join(args.dump_path, 'super_class_centroids.pkl'), 'wb'), ) # size of the different super clusters all_counts = [torch.zeros(args.nmb_super_clusters).cuda() for _ in range(args.world_size)] dist.all_gather(all_counts, nmb_data_per_super_cluster) all_counts = torch.cat(all_counts).cpu().long() all_counts = all_counts.reshape(args.world_size, args.nmb_super_clusters) logger.info(all_counts.sum(dim=0)) # what are the data belonging to this super class dataset.subset_indexes = np.where(super_class_assignements == args.clustering_local_world_id)[0] div = args.batch_size * args.clustering_local_world_size dataset.subset_indexes = dataset.subset_indexes[:len(dataset) // div * div] dist.barrier() # which files this process is going to read local_nmb_data = int(len(dataset) / args.clustering_local_world_size) low = np.long(args.clustering_local_rank * local_nmb_data) high = np.long(low + local_nmb_data) curr_ind = 0 cache = torch.zeros(local_nmb_data, args.dim_pca, dtype=torch.float32) cumsum = torch.cumsum(all_counts[:, args.clustering_local_world_id].long(), 0).long() for r in range(args.world_size): # data in this bucket r: [cumsum[r - 1] : cumsum[r] - 1] low_bucket = np.long(cumsum[r - 1]) if r else 0 # this bucket is empty if low_bucket > cumsum[r] - 1: continue if cumsum[r] - 1 < low: continue if low_bucket >= high: break # which are the data we are interested in inside this bucket ? ind_low = np.long(max(low, low_bucket)) ind_high = np.long(min(high, cumsum[r])) cache_r = np.load(open(os.path.join(args.dump_path, 'cache/', 'super_class' + str(args.clustering_local_world_id) + '-' + str(r)), 'rb')) cache[curr_ind: curr_ind + ind_high - ind_low] = torch.FloatTensor(cache_r[ind_low - low_bucket: ind_high - low_bucket]) curr_ind += (ind_high - ind_low) # randomly pick some centroids and dump them centroids_path = os.path.join(args.dump_path, 'centroids' + str(args.clustering_local_world_id) + '.pkl') if not args.clustering_local_rank: centroids = cache[np.random.choice( np.arange(cache.shape[0]), replace=cache.shape[0] < args.k // args.nmb_super_clusters, size=args.k // args.nmb_super_clusters, )] pickle.dump(centroids, open(centroids_path, 'wb'), -1) dist.barrier() # read centroids centroids = pickle.load(open(centroids_path, 'rb')).cuda() # distributed kmeans into sub-classes cluster_assignments, centroids = distributed_kmeans( args, len(dataset), args.k // args.nmb_super_clusters, cache, args.clustering_local_rank, args.clustering_local_world_size, centroids, world_id=args.clustering_local_world_id, group=groups[args.clustering_local_world_id], ) # free RAM del cache # write cluster assignments and centroids if not args.clustering_local_rank: pickle.dump( cluster_assignments, open(os.path.join(args.dump_path, 'cluster_assignments' + str(args.clustering_local_world_id) + '.pkl'), 'wb'), ) pickle.dump( centroids, open(centroids_path, 'wb'), ) dist.barrier() return cluster_assignments
ece4965e17720ca03c2c34b093a01ce446cf833b
3,637,083
import ast import random def t_rename_local_variables(the_ast, all_sites=False): """ Local variables get replaced by holes. """ changed = False candidates = [] for node in ast.walk(the_ast): if isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store): if node.id not in [ c.id for c in candidates ]: # print(node.id, node.lineno) candidates.append(node) if len(candidates) == 0: return False, the_ast if not all_sites: selected = [random.choice(candidates)] else: selected = candidates local_var_defs = {} for cnt, s in enumerate(selected, start=1): local_var_defs[s.id] = cnt to_rename = [] for node in ast.walk(the_ast): if isinstance(node, ast.Name) and node.id in local_var_defs: to_rename.append((node, local_var_defs[node.id])) for node, idx in to_rename: changed = True node.id = 'VAR' + str(idx) return changed, the_ast
8faeea81faac55d5d45b897776cd87cb508404a5
3,637,084
from typing import List def get_scale(notes: List[str]) -> int: """Convert a list of notes to a scale constant. # Args - *notes*: list of notes in the scale. This should be a list of string where each string is a note ABC notation. Sharps should be represented with a pound sign preceding the note e.g. '#A' and flats should be represented with a lower case b preceding the note e.g. 'bB'. # Returns An integer mask used to represent a musical key or scale as an argument to any of the MusicalHash methods. # Raises A ValueError if an invalid string is included in the input list. """ note_map = {'A': 0x1, '#A': 0x2, 'bB': 0x2, 'B': 0x4, 'C': 0x8, '#C': 0x10, 'bD': 0x10, 'D': 0x20, '#D': 0x40, 'bE': 0x40, 'E': 0x80, 'F': 0x100, '#F': 0x200, 'bG': 0x200, 'G': 0x400, '#G': 0x800, 'bA': 0x800} scale = 0x0 for note in notes: try: scale |= note_map[note] except KeyError: raise ValueError( 'The string {} is not a valid musical note'.format(note)) return scale
91cbcc7bfa05df52adf741b85f78beeabf819966
3,637,085
import math def slurm_format_bytes_ceil(n): """ Format bytes as text. SLURM expects KiB, MiB or Gib, but names it KB, MB, GB. SLURM does not handle Bytes, only starts at KB. >>> slurm_format_bytes_ceil(1) '1K' >>> slurm_format_bytes_ceil(1234) '2K' >>> slurm_format_bytes_ceil(12345678) '13M' >>> slurm_format_bytes_ceil(1234567890) '2G' >>> slurm_format_bytes_ceil(15000000000) '14G' """ if n >= (1024 ** 3): return "%dG" % math.ceil(n / (1024 ** 3)) if n >= (1024 ** 2): return "%dM" % math.ceil(n / (1024 ** 2)) if n >= 1024: return "%dK" % math.ceil(n / 1024) return "1K" % n
ce48c778b9605105ed9b66a55d27796fb90499cc
3,637,086
def factory_payment_account(corp_number: str = 'CP0001234', corp_type_code: str = 'CP', payment_system_code: str = 'PAYBC'): """Factory.""" return PaymentAccount( corp_number=corp_number, corp_type_code=corp_type_code, payment_system_code=payment_system_code, party_number='11111', account_number='4101', site_number='29921', )
896fe2ac0162455c4da97bd629d0e3f2d9b2a1e2
3,637,087
import glob def p_l_species_input_geos(wd, ver='1.7', rm_multiple_tagged_rxs=False, debug=False): """ Extract prod/loss species (input.geos) and reaction tags (globchem.dat) Parameters ---------- wd (str): Specify the wd to get the results from a run. debug (boolean): legacy debug option, replaced by python logging ver (str): The GEOS-Chem halogen version that is being used rm_multiple_tagged_rxs(boolean): only return one tag per rxn. Returns ------- (list) globchem.dat tags and prod/loss ("PD") vars from input.geos Notes ----- - This function is useful, but update to GEOS-Chem flexchem ( in >v11) will make it redundent and therefore this is not being maintained. """ # find and open input.geos file fn = glob.glob(wd+'/*input.geos*')[0] if any([(i in fn) for i in ('~', '#')]): print(('Trying next "input.geos" file - as FAIL for :', fn)) fn = glob.glob(wd+'/*input.geos*')[1] if debug: print(('p_l_species_input_geos called using : ', wd, fn)) file_ = open(fn, 'rb') # Read in just the prod loss section strs_in_1st_line = 'Number', 'of', 'P/L', 'families' section_line_divider = '------------------------+----------' + \ '--------------------------------------------' readrxn = False for row in file_: row = row.split() # once at prod/loss section, start added to list if all([i in row for i in strs_in_1st_line]): readrxn = True # if not at end of prod/loss section, add to list if section_line_divider in row: readrxn = False if readrxn: try: rxns.append(row) except: rxns = [row] # -- Only consider 'Family' ( no headers e.g. 'families' ) rxns = [i for i in rxns if ('families' not in i)] rxns = [[i.replace(':', '') for i in r] for r in rxns] # Kludge, adjust for extra space 12-99 # ( This is no longer required for 1.7 + ) if ver == '1.6': [i.pop(0) for i in rxns if ('th' not in i[0])] # Extract just PD (input.geos) and vars (globchem.dat vars ) PD = [rxn[4] for rxn in rxns] vars = [rxn[5:] for rxn in rxns] if debug: print((rxns, PD, vars, ver)) # remove p/l with muliple values ( start from 12th input) - Kludge? if rm_multiple_tagged_rxs: PD, vars = [i[11:] for i in (PD, vars)] vars = [i[0] for i in vars] return PD, vars
f316fe2616c8c37e129b88d50b7e1db1330bfd80
3,637,088
def foo(): """多参数函数的传参书写格式, 和类实例化的格式""" ret = foo_long(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8) # 类实例化,传多个参数的格式 object_ = ClassName( a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8 ) return ret
4571ef723cab1601acfa01eb0765eaf8002df2e0
3,637,089
def posture_seq(directory,postures,sampling_fraction): """posture_seq grabs samples locomotion files from a directory and converts them to strings of posture_sequences Input: directory = the directory containing locomotion files postures = the mat file or numpy array of template postures sampling_fraction = the fraction of files you want to sample Output: all_postures = a list of posture_sequences(of type string) """ num_postures = len(postures) angle_data = loading_data(directory,sampling_fraction)[0] i = 0 while i < len(angle_data): if len(angle_data[i][1]) > 1000: #get angles for the skeletons angles, m_a = angle_data[i] #X, Y = MA2skel(angles, m_a, 1) #initialize Vars and posture_sequence: #Vars = np.zeros(len(X)) posture_sequence = '' for i in range(len(angles)): distances = [np.inf]*num_postures for j in range(num_postures): distances[j] = np.linalg.norm(angles[i]-postures[:,j]) val = min(distances) #angle_err[i] = val ind = distances.index(val) #Vars[i] = np.corrcoef(angles[i],postures[:,ind])[0][1]**2 posture_sequence = posture_sequence + ' ' + str(ind) all_postures.append(posture_sequence) i+=1 else: i+=1 return all_postures
7e1554f85dfc68b293c9db5a5db3aa5bd6414bff
3,637,090
from ._finite_differences import _window1d, _lincomb import torch def membrane_diag(voxel_size=1, bound='dct2', dim=None, weights=None): """Diagonal of the membrane regulariser. If no weight map is provided, the diagonal of the membrane regulariser is a scaled identity with scale `2 * alpha`, where `alpha = vx.reciprocal().square().sum()` However, is a weight map is provided, the diagonal of the regulariser is a convolved version of the weight map. In 2D, the convolution kernel has a first order "diamond" shape: b0 b1 a b1 b0 Parameters ---------- weights : (..., *spatial) tensor Weights from the reweighted least squares scheme voxel_size : float or sequence[float], default=1 Voxel size bound : str, default='dct2' Boundary condition. dim : int, optional Number of spatial dimensions. Default: from voxel_size Returns ------- diag : () or (..., *spatial) tensor Convolved weight map if provided. Else, central convolution weight. """ vx = core.utils.make_vector(voxel_size) if dim is None: dim = len(vx) vx = core.utils.make_vector(vx, dim) if weights is not None: weights = torch.as_tensor(weights) backend = dict(dtype=weights.dtype, device=weights.device) # move spatial dimensions to the front spdim = list(range(weights.dim() - dim, weights.dim())) weights = core.utils.movedim(weights, spdim, list(range(dim))) else: backend = dict(dtype=vx.dtype, device=vx.device) vx = vx.to(**backend) vx = vx.square().reciprocal() if weights is None: return 2 * vx.sum() values = [[weights]] dims = [None] + [d for d in range(dim) for _ in range(2)] kernel = [2 * vx.sum()] for d in range(dim): values.extend(_window1d(weights, d, [-1, 1], bound=bound)) kernel += [vx[d], vx[d]] weights = _lincomb(values, kernel, dims, ref=weights) # send spatial dimensions to the back weights = core.utils.movedim(weights, list(range(dim)), spdim) return weights
3329c43aa5ae025a14660e1ddd4c1f658740e1d4
3,637,091
import os import tty def openpty(mode=None, winsz=None, name=False): """openpty() -> (master_fd, slave_fd) Open a pty master/slave pair, using os.openpty() if possible.""" master_fd, slave_fd = os.openpty() if mode: tty.tcsetattr(slave_fd, tty.TCSAFLUSH, mode) if tty.HAVE_WINSZ and winsz: tty.tcsetwinsize(slave_fd, winsz) if name: return master_fd, slave_fd, os.ttyname(slave_fd) else: return master_fd, slave_fd
08dcc90967b32509775e86d6da338788e15014c4
3,637,092
def get_groups(parsed, store, conf): """ Return groups based on argument provided :param Namespace parsed: arguments parsed :param store: Otter scaling group collection :param dict conf: config :return: Deferred fired with list of {"tenantId": .., "groupId": ..} dict """ log = mock_log() if parsed.group: groups = [g.split(":") for g in parsed.group] return succeed( [{"tenantId": tid, "groupId": gid} for tid, gid in groups]) elif parsed.all: d = store.get_all_valid_groups() elif parsed.tenant_id: d = get_groups_of_tenants(log, store, parsed.tenant_id) elif parsed.disabled_tenants: non_conv_tenants = conf["non-convergence-tenants"] d = store.get_all_valid_groups() d.addCallback( filter(lambda g: g["tenantId"] not in set(non_conv_tenants))) d.addCallback(list) elif parsed.conf_conv_tenants: d = get_groups_of_tenants(log, store, conf["convergence-tenants"]) else: raise SystemExit("Unexpected group selection") return d
0441863984173236b09b50987c6f22838679a497
3,637,093
import json def get_content_details(site_code, release_uuid, content_type, content_key): """ get_content_details """ publisher_api = PublisherAPI() content_release = None try: if release_uuid: # get ContentRelease content_release = WSSPContentRelease.objects.get( site_code=site_code, uuid=release_uuid, ) else: # get live ContentRelease response = publisher_api.get_live_content_release(site_code) if response['status'] == 'error': return response else: release = response['content'] content_release = WSSPContentRelease.objects.get(id=release.id) release_uuid = content_release.uuid except WSSPContentRelease.DoesNotExist: pass # Fetch document from the content release. response = publisher_api.get_document_from_content_release( site_code, release_uuid, content_key, content_type, ) base_content_release = None if response['status'] == 'error' and response['error_code'] == 'release_document_does_not_exist': # Release doc not found, try in the base release for preview releases. if content_release.status == 0: if content_release.use_current_live_as_base_release: response = publisher_api.get_live_content_release(site_code) if response['status'] == 'success': release = response['content'] base_content_release = WSSPContentRelease.objects.get(id=release.id) else: base_content_release = content_release.base_release if base_content_release != None: # Fetch document from the base content release if available (should only happen for preview releases). response = publisher_api.get_document_from_content_release( site_code, base_content_release.uuid, content_key, content_type, ) if response['status'] == 'success': data = json.loads(response['content'].document_json) response_extra = publisher_api.get_document_extra_from_content_release( site_code, release_uuid, content_key, content_type, ) if response_extra['status'] == 'success': try: dynamic_element_keys = json.loads(response_extra['content'].get(key='dynamic_element_keys').content) data, updated = document_load_dynamic_elements(content_release, data, dynamic_element_keys) except: pass else: return response return data
f71a4e4584474e24cfb6d25aad2465538575cbdf
3,637,094
import scipy def _czt(x, M=None, W=None, A=1.0): """Calculate CZT (Stripped down to the basics).""" # Unpack arguments N = len(x) if M is None: M = N if W is None: W = np.exp(-2j * np.pi / M) A = np.complex128(A) W = np.complex128(W) # CZT algorithm k = np.arange(max(M, N)) Wk22 = W ** (-(k ** 2) / 2) r = Wk22[:N] c = Wk22[:M] X = A ** -k[:N] * x / r X = scipy.linalg.matmul_toeplitz((c, r), X) X /= c return X
a0852eacd8d4e35e0c6e96cc59e8692d9d806c5d
3,637,095
import subprocess import threading def measure_link_vsize(output_file, args): """ Execute |args|, and measure the maximum virtual memory usage of the process, printing it to stdout when finished. """ proc = subprocess.Popen(args) t = threading.Thread(target=measure_vsize_threadfunc, args=(proc, output_file)) t.start() # Wait for the linker to finish. exitcode = proc.wait() # ...and then wait for the background thread to finish. t.join() return exitcode
ca918a111dd5c8a627538f30554cae21331f9558
3,637,096
from typing import Any from typing import Optional def build_obs_act_forward_fc( n_out: int, depth: int, hidden: int, act_layer: Any, last_layer: Optional[Any] = None, ) -> hk.Transformed: """Build a simple fully-connected forward step that takes an observation & an action. Args: n_out (int): Number of outputs. depth (int): Depth of layers. hidden (int): # of hidden units of fc. act_layer (Any): Activation layer. last_layer (Any): Last activation layer. Returns: hk.Transformed: Takes [batch x ?] observation and [batch x ?] actions. Returns [batch x n_out] Array. """ @jax.vmap def forward(obs: Array, act: Array) -> Array: # concat observation and action chex.assert_equal_rank((obs, act)) obs_act = jnp.hstack((obs, act)) # set up layers modules = [] if depth > 0: modules.append(hk.Linear(hidden)) for _ in range(depth - 1): modules += [act_layer, hk.Linear(hidden)] modules += [act_layer, hk.Linear(n_out)] else: modules.append(hk.Linear(n_out)) if last_layer is not None: modules.append(last_layer) return hk.Sequential(modules)(obs_act.astype(float)) return hk.without_apply_rng(hk.transform(forward))
0d330910730ccf80213852aa7cd08950f09e6300
3,637,097
def update_nested(key, d, other): """Update *d[key]* with the *other* dictionary preserving data. If *d* doesn't contain the *key*, it is updated with *{key: other}*. If *d* contains the *key*, *d[key]* is inserted into *other[key]* (so that it is not overriden). If *other* contains *key* (and possibly more nested *key*-s), then *d[key]* is inserted into the deepest level of *other.key.key...* Finally, *d[key]* becomes *other*. Example: >>> context = {"variable": {"name": "x"}} >>> new_var_context = {"name": "n"} >>> update_nested("variable", context, copy.deepcopy(new_var_context)) >>> context == {'variable': {'name': 'n', 'variable': {'name': 'x'}}} True >>> >>> update_nested("variable", context, {"name": "top"}) >>> context == { ... 'variable': {'name': 'top', ... 'variable': {'name': 'n', 'variable': {'name': 'x'}}} ... } True *other* is modified in general. Create that on the fly or use *copy.deepcopy* when appropriate. Recursive dictionaries (containing references to themselves) are strongly discouraged and meaningless when nesting. If *other[key]* is recursive, :exc:`.LenaValueError` may be raised. """ # there was an idea to add a keyword argument copy_other # (by default True), but the user can do that him/herself # with copy.deepcopy when needed. Otherwise it would be # unnecessary complication of this interface. # Only one key is nested. This encourages design when # 1) elements combine their contexts into one key # (like {"split_into_bins": {"variable": {}, "histogram": {}}}) # 2) elements change only one key ("variable", "histogram",...). def get_most_nested_subdict_with(key, d): nested_dicts = [] while True: if key in d: if d in nested_dicts: raise lena.core.LenaValueError( "recursive *other* is forbidden" ) nested_dicts.append(d) d = d[key] else: return d if key in d: other_most_nested = get_most_nested_subdict_with(key, other) # insert d[key] at the lowest other.key.key.... other_most_nested[key] = d[key] d[key] = other
efbbfd576652710c92939581c48e32edce1a956e
3,637,098
def quicksort(arr, low, high): """ Quicksort function uses the partition helper function. """ if low < high: pi = partition(arr, low, high) quicksort(arr, low, pi-1) quicksort(arr, pi+1, high) return arr
aa51f8536f47f8529c2bda74ea96138062d939e7
3,637,099