content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import time import hmac import hashlib import requests import json def bittrex_get_balance(api_key, api_secret): """Get your total balances for your bittrex account args: required: api_key (str) api_secret (str) return: results (DataFrame) of balance information for each crypto """ nonce = int(time.time()*1000) url = "https://bittrex.com/api/v1.1/account/getbalances?apikey={}&nonce={}".format(api_key, nonce) # url = 'https://bittrex.com/api/v1.1/account/getbalances' sign = hmac.new(api_secret.encode('utf-8'), url.encode('utf-8'), hashlib.sha512).hexdigest() headers = {'apisign': sign} r = requests.get(url, headers=headers) j = json.loads(r.text) results = j['result'] df = pd.DataFrame.from_dict(results) return df
a90979a495fe9410d76996006063ca01fdcfe04c
22,700
def check_vm_snapshot_sanity(vm_id): """ Checks if the snapshot information of VM is in sync with actual snapshots of the VM. """ vm_data = db.vm_data[vm_id] snapshot_check = [] try: conn = libvirt.openReadOnly('qemu+ssh://root@'+vm_data.host_id.host_ip.private_ip+'/system') domain = conn.lookupByName(vm_data.vm_identity) dom_snapshot_names = domain.snapshotListNames(0) logger.debug(dom_snapshot_names) conn.close() snapshots = db(db.snapshot.vm_id == vm_id).select() for snapshot in snapshots: if snapshot.snapshot_name in dom_snapshot_names: snapshot_check.append({'snapshot_name' : snapshot.snapshot_name, 'snapshot_type' : get_snapshot_type(snapshot.type), 'message' : 'Snapshot present', 'operation' : 'None'}) dom_snapshot_names.remove(snapshot.snapshot_name) else: snapshot_check.append({'snapshot_id' : snapshot.id, 'snapshot_name' : snapshot.snapshot_name, 'snapshot_type' : get_snapshot_type(snapshot.type), 'message' : 'Snapshot not present', 'operation' : 'Undefined'}) for dom_snapshot_name in dom_snapshot_names: snapshot_check.append({'vm_name' : vm_data.vm_identity, 'snapshot_name' : dom_snapshot_name, 'snapshot_type' : 'Unknown', 'message' : 'Orphan Snapshot', 'operation' : 'Orphan'}) except Exception: log_exception() logger.debug(snapshot_check) return (vm_data.id, vm_data.vm_name, snapshot_check)
c73985741c154178151aa6e2b2e3a9a64b0eee12
22,701
def evaluate_functions(payload, context, get_node_instances_method, get_node_instance_method, get_node_method): """ Evaluate functions in payload. :param payload: The payload to evaluate. :param context: Context used during evaluation. :param get_node_instances_method: A method for getting node instances. :param get_node_instance_method: A method for getting a node instance. :param get_node_method: A method for getting a node. :return: payload. """ #print '!!! evaluate_function', payload, context context = PostProcessingContext(None, context, get_node_instances_method, get_node_instance_method, get_node_method) return context.evaluate(payload)
2cac04f35ac6032ec0d06ff5c25da9b64c700f7c
22,702
import torch def rollout(render=False): """ Execute a rollout and returns minus cumulative reward. Load :params: into the controller and execute a single rollout. This is the main API of this class. :args params: parameters as a single 1D np array :returns: minus cumulative reward # Why is this the minus cumulative reward?!?!!? """ print('a rollout dims', len(a_rollout)) #env.seed(int(rand_env_seed)) # ensuring that each rollout has a differnet random seed. obs = env.reset() # This first render is required ! env.render() next_hidden = [ torch.zeros(1, LATENT_RECURRENT_SIZE).to(device) for _ in range(2)] cumulative = 0 i = 0 rollout_dict = {k:[] for k in ['obs', 'rew', 'act', 'term']} obs = transform(obs).unsqueeze(0).to(device) mu, logsigma = vae.encoder(obs) next_z = mu + logsigma.exp() * torch.randn_like(mu) while True: #print(i) action = torch.Tensor(a_rollout[i]).to(device).unsqueeze(0) #print('into mdrnn',action.shape, next_z.shape, next_hidden[0].shape) # commented out reward and done. mus, sigmas, logpi, _, _, next_hidden = mdrnn(action, next_z, next_hidden) # decode current z to see what it looks like. recon_obs = vae.decoder(next_z) if i>dream_point: if type(obs) != torch.Tensor: obs = transform(obs).unsqueeze(0) to_save = torch.cat([obs, recon_obs.cpu()], dim=0) #print(to_save.shape) # .view(args.batch_size*2, 3, IMAGE_RESIZE_DIM, IMAGE_RESIZE_DIM) save_image(to_save, join(mdir, 'dream/sample_' + str(i) + '.png')) obs, reward, done, _ = env.step(a_rollout[i]) if i < dream_point or np.random.random()>0.95: print('using real obs at point:', i) obs = transform(obs).unsqueeze(0).to(device) mu, logsigma = vae.encoder(obs) next_z = mu + logsigma.exp() * torch.randn_like(mu) else: # sample the next z. g_probs = Categorical(probs=torch.exp(logpi).permute(0,2,1)) which_g = g_probs.sample() #print(logpi.shape, mus.permute(0,2,1)[:,which_g].shape ,mus[:,:,which_g].shape, which_g, mus.shape ) #print(mus.squeeze().permute(1,0).shape, which_g.permute(1,0)) mus_g, sigs_g = torch.gather(mus.squeeze(), 0, which_g), torch.gather(sigmas.squeeze(), 0, which_g) #print(mus_g.shape) next_z = mus_g + sigs_g * torch.randn_like(mus_g) #print(next_z.shape) #for key, var in zip(['obs', 'rew', 'act', 'term'], [obs,reward, action, done]): # rollout_dict[key].append(var) if render: env.render() cumulative += reward if done or i >= time_limit: return - cumulative i += 1
9165642f37ef1ea6882c6be0a6fe493d2aff342c
22,703
import requests import json def get_request(url, access_token, origin_address: str = None): """ Create a HTTP get request. """ api_headers = { 'Authorization': 'Bearer {0}'.format(access_token), 'X-Forwarded-For': origin_address } response = requests.get( url, headers=api_headers ) if response.status_code == 200: return json.loads(response.text) else: raise Exception(response.text)
0c7f577132b1fb92a8ea9073cb68e9b7bf3cd2a5
22,704
def join_metadata(df: pd.DataFrame) -> pd.DataFrame: """Joins data including 'agent_id' to work out agent settings.""" assert 'agent_id' in df.columns sweep = make_agent_sweep() data = [] for agent_id, agent_ctor_config in enumerate(sweep): agent_params = {'agent_id': agent_id} agent_params.update(agent_ctor_config.settings) data.append(agent_params) agent_df = pd.DataFrame(data) # Suffixes should not be needed... but added to be safe in case of clash. return pd.merge(df, agent_df, on='agent_id', suffixes=('', '_agent'))
c09a524484424fb0fdf329fd73e3ea489b8ae523
22,705
def mocked_get_release_by_id(id_, includes=[], release_status=[], release_type=[]): """Mimic musicbrainzngs.get_release_by_id, accepting only a restricted list of MB ids (ID_RELEASE_0, ID_RELEASE_1). The returned dict differs only in the release title and artist name, so that ID_RELEASE_0 is a closer match to the items created by ImportHelper._create_import_dir().""" # Map IDs to (release title, artist), so the distances are different. releases = {ImportMusicBrainzIdTest.ID_RELEASE_0: ('VALID_RELEASE_0', 'TAG ARTIST'), ImportMusicBrainzIdTest.ID_RELEASE_1: ('VALID_RELEASE_1', 'DISTANT_MATCH')} return { 'release': { 'title': releases[id_][0], 'id': id_, 'medium-list': [{ 'track-list': [{ 'id': 'baz', 'recording': { 'title': 'foo', 'id': 'bar', 'length': 59, }, 'position': 9, 'number': 'A2' }], 'position': 5, }], 'artist-credit': [{ 'artist': { 'name': releases[id_][1], 'id': 'some-id', }, }], 'release-group': { 'id': 'another-id', } } }
647b9bf54f27353834a30ec907ecc5114a782b93
22,706
import os import warnings import pickle def mp_rf_optimizer_func(fn_tuple): """Executes in parallel creation of random forrest creation.""" fn, flags, file_suffix = fn_tuple n_trees = flags["n_trees"] is_regressor = flags["is_regressor"] sample_size = flags["sample_size"] n_features = flags["n_features"] max_depth = flags["max_depth"] if not file_suffix: file_suffix = "none" path_split = fn.split("/") path = "/".join(path_split[:-1]) + "/" fn_split = path_split[-1].split(".") # o_file = path + ".".join(fn_split[0:-2] + [fn_split[-1]]) cv_file = path + ".".join(fn_split[0:-2] + [file_suffix]) rfb_file = path + ".".join(fn_split[0:-2] + ["rb", "bin"]) # let's compress the table first to make the job easier for random forest. # compression can usually achieve a ratio of 50x or more. # compress(fn, o_file) train = load(fn) n_features = "auto" if not n_features else float(n_features) # min_size = 1 if max_depth: max_depth = int(max_depth) print("... creating random forrest for " + os.path.basename(fn) + " with " + str(sample_size) + " samples") if is_regressor: rf = RandomForestRegressor( n_estimators=n_trees, max_depth=max_depth, # min_samples_split=2, # min_samples_leaf=min_size, max_features=n_features, # max_leaf_nodes=100, # oob_score=True, # warm_start=True, bootstrap=True, random_state=42, n_jobs=1) else: rf = RandomForestClassifier( n_estimators=n_trees, max_depth=max_depth, # min_samples_split=2, # min_samples_leaf=min_size, max_features=n_features, # max_leaf_nodes=100, # oob_score=True, # warm_start=True, bootstrap=True, random_state=42, n_jobs=1) if sample_size and train.shape[0] >= 10000: sample_size = int(sample_size) np.random.seed(42) idx = np.random.choice(train.shape[0], train.shape[0], replace=False) x = train[idx[sample_size:], 0:-1] y = train[idx[sample_size:], -1] x_test = train[idx[0:sample_size], 0:-1] y_test = train[idx[0:sample_size], -1] else: x = train[:, 0:-1] y = train[:, -1] x_test = x y_test = y estimators = [] with warnings.catch_warnings(): warnings.simplefilter("ignore") rf.fit(x, y) func_name = fn_split[0] bits = np.ceil( np.log2( np.abs( np.amax(x, axis=0) - np.amin(x, axis=0) + 1))).astype(np.int32) is_neg = (np.amin(x, axis=0) < 0).astype(np.int8) o_bits = np.ceil( np.log2( np.abs( np.amax(y, axis=0) - np.amin(y, axis=0) + 1))).astype(np.int32) o_is_neg = (np.amin(y, axis=0) < 0).astype(np.int8) rf.bits = bits rf.is_neg = is_neg rf.o_bits = o_bits rf.o_is_neg = o_is_neg code = gen_random_forest( rf, func_name, bits, is_neg, o_bits, o_is_neg, is_regressor=is_regressor, is_top_level=False, is_cc=file_suffix == "cc") open(cv_file, "w").write("\n".join(code)) p = 1.0 * np.round(rf.predict(x_test)) dy = np.max(train[:, -1]) - np.min(train[:, -1]) error = np.sum(np.abs(y_test - p)) / (1.0 * p.shape[0] * dy) score = np.sum(y_test == p) / p.shape[0] print("y:", np.max(y_test), y_test[0:30].astype(np.int32)) print("p:", np.max(p), p[0:30].astype(np.int32)) print("... model {} with score of {:.2f}% and error of {:.2f}%".format( func_name, 100.0*score, 100.0*error)) print("... saving model in {}".format(rfb_file)) pickle.dump(rf, open(rfb_file, "wb")) return rfb_file
b046a3dc6d49f4f39ee72893ae5fe751fff3f437
22,707
import os def GetTracePaths(bucket): """Returns a list of trace files in a bucket. Finds and loads the trace databases, and returns their content as a list of paths. This function assumes a specific structure for the files in the bucket. These assumptions must match the behavior of the backend: - The trace databases are located in the bucket. - The trace databases files are the only objects with the TRACE_DATABASE_PREFIX prefix in their name. Returns: list: The list of paths to traces, as strings. """ traces = [] prefix = os.path.join('/', bucket, common.clovis_paths.TRACE_DATABASE_PREFIX) file_stats = cloudstorage.listbucket(prefix) for file_stat in file_stats: database_file = file_stat.filename clovis_logger.info('Loading trace database: ' + database_file) with cloudstorage.open(database_file) as remote_file: json_string = remote_file.read() if not json_string: clovis_logger.warning('Failed to download: ' + database_file) continue database = LoadingTraceDatabase.FromJsonString(json_string) if not database: clovis_logger.warning('Failed to parse: ' + database_file) continue for path in database.ToJsonDict(): traces.append(path) return traces
a067b21d570de398034ad74bbc04f5aca028ac88
22,708
import typing import pathlib import os def FindRunfilesDirectory() -> typing.Optional[pathlib.Path]: """Find the '.runfiles' directory, if there is one. Returns: The absolute path of the runfiles directory, else None if not found. """ # Follow symlinks, looking for my module space stub_filename = os.path.abspath(__file__) module_space = stub_filename + '.runfiles' if os.path.isdir(module_space): return pathlib.Path(module_space) match = RUNFILES_PATTERN.match(os.path.abspath(__file__)) if match: return pathlib.Path(match.group(1)) return None
44390f1a4e973e8b5ee8fa1060706447ae07263f
22,709
def get_name_with_template_specialization(node): """ node is a class returns the name, possibly added with the <..> of the specialisation """ if not node.kind in ( CursorKind.CLASS_DECL, CursorKind.STRUCT_DECL, CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION): return None tokens = get_tokens(node) name = node.spelling if tokens and tokens[0] == 'template': t = tokens[len(extract_bracketed(tokens[1:])) + 3:] if t and t[0] == '<': name = name + ''.join(extract_bracketed(t)) return name
4cf19f9f383174789c7ff3003bc1144167d3e84d
22,710
from typing import Optional from typing import Union def linear_timeseries( start_value: float = 0, end_value: float = 1, start: Optional[Union[pd.Timestamp, int]] = pd.Timestamp("2000-01-01"), end: Optional[Union[pd.Timestamp, int]] = None, length: Optional[int] = None, freq: str = "D", column_name: Optional[str] = "linear", dtype: np.dtype = np.float64, ) -> TimeSeries: """ Creates a univariate TimeSeries with a starting value of `start_value` that increases linearly such that it takes on the value `end_value` at the last entry of the TimeSeries. This means that the difference between two adjacent entries will be equal to (`end_value` - `start_value`) / (`length` - 1). Parameters ---------- start_value The value of the first entry in the TimeSeries. end_value The value of the last entry in the TimeSeries. start The start of the returned TimeSeries' index. If a pandas Timestamp is passed, the TimeSeries will have a pandas DatetimeIndex. If an integer is passed, the TimeSeries will have a pandas Int64Index index. Works only with either `length` or `end`. end Optionally, the end of the returned index. Works only with either `start` or `length`. If `start` is set, `end` must be of same type as `start`. Else, it can be either a pandas Timestamp or an integer. length Optionally, the length of the returned index. Works only with either `start` or `end`. freq The time difference between two adjacent entries in the returned TimeSeries. Only effective if `start` is a pandas Timestamp. A DateOffset alias is expected; see `docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_. column_name Optionally, the name of the value column for the returned TimeSeries dtype The desired NumPy dtype (np.float32 or np.float64) for the resulting series Returns ------- TimeSeries A linear TimeSeries created as indicated above. """ index = _generate_index(start=start, end=end, freq=freq, length=length) values = np.linspace(start_value, end_value, len(index), dtype=dtype) return TimeSeries.from_times_and_values( index, values, freq=freq, columns=pd.Index([column_name]) )
ae8ef8252beee1e799182d0aaa499167c1abb78d
22,711
from typing import Dict def outlierBySd(X: Matrix, max_iterations: int, **kwargs: Dict[str, VALID_INPUT_TYPES]): """ Builtin function for detecting and repairing outliers using standard deviation :param X: Matrix X :param k: threshold values 1, 2, 3 for 68%, 95%, 99.7% respectively (3-sigma rule) :param repairMethod: values: 0 = delete rows having outliers, 1 = replace outliers as zeros 2 = replace outliers as missing values :param max_iterations: values: 0 = arbitrary number of iteration until all outliers are removed, n = any constant defined by user :return: Matrix X with no outliers """ params_dict = {'X': X, 'max_iterations': max_iterations} params_dict.update(kwargs) vX_0 = Matrix(X.sds_context, '') vX_1 = Matrix(X.sds_context, '') vX_2 = Matrix(X.sds_context, '') vX_3 = Scalar(X.sds_context, '') vX_4 = Scalar(X.sds_context, '') output_nodes = [vX_0, vX_1, vX_2, vX_3, vX_4, ] op = MultiReturn(X.sds_context, 'outlierBySd', output_nodes, named_input_nodes=params_dict) vX_0._unnamed_input_nodes = [op] vX_1._unnamed_input_nodes = [op] vX_2._unnamed_input_nodes = [op] vX_3._unnamed_input_nodes = [op] vX_4._unnamed_input_nodes = [op] return op
eda872a6dd6f8de22620ecf599381d186641a772
22,712
from typing import Dict def encode_address(address: Dict) -> bytes: """ Creates bytes representation of address data. args: address: Dictionary containing the address data. returns: Bytes to be saved as address value in DB. """ address_str = '' address_str += address['balance'] + '\0' address_str += address['code'] + '\0' address_str += str(address['inputTxIndex']) + '\0' address_str += str(address['outputTxIndex']) + '\0' address_str += str(address['minedIndex']) + '\0' address_str += address['tokenContract'] + '\0' address_str += str(address['inputTokenTxIndex']) + '\0' address_str += str(address['outputTokenTxIndex']) + '\0' address_str += str(address['inputIntTxIndex']) + '\0' address_str += str(address['outputIntTxIndex']) + '\0' return address_str.encode()
fcf05da104551561e44b7ab9c2bf54a9bfcf801e
22,713
def analyze_image(image_url, tag_limit=10): """ Given an image_url and a tag_limit, make requests to both the Clarifai API and the Microsoft Congnitive Services API to return two things: (1) A list of tags, limited by tag_limit, (2) A description of the image """ clarifai_tags = clarifai_analysis(image_url) ms_tags, ms_caption = oxford_project_analysis(image_url) clarifai_tags = map(lambda s: s.lower(), clarifai_tags) ms_tags = map(lambda s: s.lower(), ms_tags) # Get tags that occur in both merged_tags = [] set(ms_tags) for tag in clarifai_tags: if tag in ms_tags: merged_tags.append(tag) merged_tags_set = set(merged_tags) merged_tags += [tag for tag in clarifai_tags if tag not in merged_tags] merged_tags += [tag for tag in ms_tags if tag not in merged_tags] # Limit the tags merged_tags = merged_tags[:tag_limit] return merged_tags, ms_caption
8d3337c34369d69c9ae48f43100ecb2b930f8a15
22,714
def _decision_function(scope, operator, container, model, proto_type): """Predict for linear model. score = X * coefficient + intercept """ coef_name = scope.get_unique_variable_name('coef') intercept_name = scope.get_unique_variable_name('intercept') matmul_result_name = scope.get_unique_variable_name( 'matmul_result') score_name = scope.get_unique_variable_name('score') coef = model.coef_.T container.add_initializer(coef_name, proto_type, coef.shape, coef.ravel()) container.add_initializer(intercept_name, proto_type, model.intercept_.shape, model.intercept_) input_name = operator.inputs[0].full_name if type(operator.inputs[0].type) in (BooleanTensorType, Int64TensorType): cast_input_name = scope.get_unique_variable_name('cast_input') apply_cast(scope, operator.input_full_names, cast_input_name, container, to=proto_type) input_name = cast_input_name container.add_node( 'MatMul', [input_name, coef_name], matmul_result_name, name=scope.get_unique_operator_name('MatMul')) apply_add(scope, [matmul_result_name, intercept_name], score_name, container, broadcast=0) return score_name
e5f105bfb09ac0b5aba0c7adcfd6cb6538911040
22,715
def create_mapping(dico): """ Create a mapping (item to ID / ID to item) from a dictionary. Items are ordered by decreasing frequency. """ sorted_items = sorted(list(dico.items()), key=lambda x: (-x[1], x[0])) id_to_item = {i: v[0] for i, v in enumerate(sorted_items)} item_to_id = {v: k for k, v in list(id_to_item.items())} return item_to_id, id_to_item
cdfb0bd9ffa047e0214486a1b2e63b45e437cf22
22,716
import os def _splitall(path): """ This function splits a path /a/b/c into a list [/,a,b,c] """ allparts = [] while True: parts = os.path.split(path) if parts[0] == path: allparts.insert(0, parts[0]) break if parts[1] == path: allparts.insert(0, parts[1]) break path = parts[0] allparts.insert(0, parts[1]) return allparts
fe9d2124dad684b9a2e7a3366b80784c0779c105
22,717
def read_library(args): """Read in a haplotype library. Returns a HaplotypeLibrary() and allele coding array""" assert args.library or args.libphase filename = args.library if args.library else args.libphase print(f'Reading haplotype library from: {filename}') library = Pedigree.Pedigree() if args.library: library.readInPed(args.library, args.startsnp, args.stopsnp, haps=True, update_coding=True) elif args.libphase: library.readInPhase(args.libphase, args.startsnp, args.stopsnp) else: # This shouldn't happen raise ValueError('No library specified') print(f'Haplotype library contains {len(library)} individuals with {library.nLoci} markers') haplotype_library = HaplotypeLibrary.HaplotypeLibrary(library.nLoci) for individual in library: for haplotype in individual.haplotypes: haplotype_library.append(haplotype, individual.idx) haplotype_library.freeze() return haplotype_library, library.allele_coding
e96b156db9cdcf0b70dfcdb2ba155f26a59f8d44
22,718
from typing import Callable def to_async(func: Callable, scheduler=None) -> Callable: """Converts the function into an asynchronous function. Each invocation of the resulting asynchronous function causes an invocation of the original synchronous function on the specified scheduler. Example: res = Observable.to_async(lambda x, y: x + y)(4, 3) res = Observable.to_async(lambda x, y: x + y, Scheduler.timeout)(4, 3) res = Observable.to_async(lambda x: log.debug(x), Scheduler.timeout)('hello') Keyword arguments: func -- Function to convert to an asynchronous function. scheduler -- [Optional] Scheduler to run the function on. If not specified, defaults to Scheduler.timeout. Returns asynchronous function. """ scheduler = scheduler or timeout_scheduler def wrapper(*args) -> ObservableBase: subject = AsyncSubject() def action(scheduler, state): try: result = func(*args) except Exception as ex: subject.on_error(ex) return subject.on_next(result) subject.on_completed() scheduler.schedule(action) return subject.as_observable() return wrapper
aa66337b7463d97c47e8c42ea044157cb5bf616c
22,719
import sympy def get_symbolic_quaternion_from_axis_angle(axis, angle, convention='xyzw'): """Get the symbolic quaternion associated from the axis/angle representation. Args: axis (np.array[float[3]], np.array[sympy.Symbol[3]]): 3d axis vector. angle (float, sympy.Symbol): angle. convention (str): convention to be adopted when representing the quaternion. You can choose between 'xyzw' or 'wxyz'. Returns: np.array[float[4]]: symbolic quaternion. """ w = sympy.cos(angle / 2.) x, y, z = sympy.sin(angle / 2.) * axis if convention == 'xyzw': return np.array([x, y, z, w]) elif convention == 'wxyz': return np.array([w, x, y, z]) else: raise NotImplementedError("Asking for a convention that has not been implemented")
8d753c72fc775de38b349e2bf77e3a61a84b07e9
22,720
def get_session(role_arn, session_name, duration_seconds=900): """ Returns a boto3 session for the specified role. """ response = sts_client.assume_role( RoleArn=role_arn, RoleSessionName=session_name, DurationSeconds=duration_seconds, ) creds = response["Credentials"] return boto3.Session( aws_access_key_id=creds["AccessKeyId"], aws_secret_access_key=creds["SecretAccessKey"], aws_session_token=creds["SessionToken"], )
d60b0b1c6288a8a594e0a1fe4175c69da80ffe29
22,721
import traceback def base_kinesis_role(construct, resource_name: str, principal_resource: str, **kwargs): """ Function that generates an IAM Role with a Policy for SQS Send Message. :param construct: Custom construct that will use this function. From the external construct is usually 'self'. :param resource_name: Name of the resource. Used for naming purposes. :param principal_resource: Resource used to define a Service Principal. Has to match an AWS Resource. For example, 'iot' -> 'iot.amazonaws.com'. :param kwargs: Other parameters that could be used by the construct. :return: IAM Role with an IAM Policy attached. """ try: actions = ["kinesis:PutRecord"] resources = [construct._kinesis_stream.stream_arn] role = base_service_role(construct, resource_name, principal_resource, actions=actions, resources=resources) except Exception: print(traceback.format_exc()) else: return role
82c2d7b7b32857baa619ed7891956930e206bf78
22,722
def set_age_distribution_default(dic, value=None, drop=False): """ Set the ages_distribution key of dictionary to the given value or to the World's age distribution. """ ages = dic.pop("age_distribution", None) if ages is None: ages = world_age_distribution() if value is None else value if isinstance(ages, str): ages = mdm.age_distribution(value) elif not isinstance(ages, (pd.Series, pd.DataFrame)): ages = get_param("age_distribution", value) if not drop: dic["age_distribution"] = ages return ages
98ca6e784b240ee76ddb4a9d77b691ef08fa7057
22,723
def home(): """Home page""" return render_template('home.html')
dc63ced89e5176de1f77ea995678c2f5c37c2593
22,724
import copy def csv2dict(file_csv, delimiter=','): """ This function is used to load the csv file and return a dict which contains the information of the csv file. The first row of the csv file contains the column names. Parameters ---------- file_csv : str The input filename including path of the csv file. Returns ------- outdic : dict The return dict which contains all information in the csv file. """ # load station infomation: SED COSEISMIQ CSV format, temporary format df = pd.read_csv(file_csv, delimiter=delimiter, header="infer", skipinitialspace=True, encoding='utf-8') outdic = {} for column in df: outdic[column] = copy.deepcopy(df[column].values) return outdic
d971941b7c5f0bbf021c64cf7c30d1dcee710b9d
22,725
def make_bb_coord_l(contour_l, img, IMG_HEIGHT): """ Take in a list of contour arrays and return a list of four coordinates of a bounding box for each contour array. """ assert isinstance(contour_l, list) coord_l = [] for i in range(len(contour_l)): c = contour_l[i] bb = get_bb_coord(contour=c, img=img, IMG_HEIGHT=IMG_HEIGHT) # extend if bb is a list (i.e. a split bounding box) if isinstance(bb, list): coord_l.extend(bb) else: coord_l.append(bb) return coord_l
4f30c95db8a7d2ef81376aa0fa77d7cedc0a913c
22,726
def calc_pair_scale(seqs, obs1, obs2, weights1, weights2): """Return entropies and weights for comparable alignment. A comparable alignment is one in which, for each paired state ij, all alternate observable paired symbols are created. For instance, let the symbols {A,C} be observed at position i and {A,C} at position j. If we observe the paired types {AC, AA}. A comparable alignment would involve replacing an AC pair with a CC pair.""" # scale is calculated as the product of mi from col1 with alternate # characters. This means the number of states is changed by swapping # between the original and selected alternate, calculating the new mi pair_freqs = CategoryCounter(seqs) weights1 = dict(weights1) weights2 = dict(weights2) scales = [] for a, b in list(pair_freqs.keys()): weights = weights1[a] pr = a + b pair_freqs -= pr obs1 -= a # make comparable alignments by mods to col 1 for c, w in list(weights.items()): new_pr = c + b pair_freqs += new_pr obs1 += c entropy = mi(obs1.entropy, obs2.entropy, pair_freqs.entropy) scales += [(pr, entropy, w)] pair_freqs -= new_pr obs1 -= c obs1 += a # make comparable alignments by mods to col 2 weights = weights2[b] obs2 -= b for c, w in list(weights.items()): new_pr = a + c pair_freqs += new_pr obs2 += c entropy = mi(obs1.entropy, obs2.entropy, pair_freqs.entropy) scales += [(pr, entropy, w)] obs2 -= c pair_freqs -= new_pr obs2 += b pair_freqs += pr return scales
6781b86719d669970d67753eabc91c60bc258dcc
22,727
def distance_to_line(pt, line_pt_pair): """ Returns perpendicular distance of point 'pt' to a line given by the pair of points in second argument """ x = pt[0] y = pt[1] p, q = line_pt_pair q0_m_p0 = q[0]-p[0] q1_m_p1 = q[1]-p[1] denom = sqrt(q0_m_p0*q0_m_p0 + q1_m_p1*q1_m_p1) return (q0_m_p0*p[1]-q1_m_p1*p[0] - q0_m_p0*y + q1_m_p1*x)/denom
93500c0f8a4d8d11435647e1868fb929128d1273
22,728
def define_wfr(ekev): """ defines the wavefront in the plane prior to the mirror ie., after d1 :param ekev: energy of the source """ spb = Instrument() spb.build_elements(focus = 'nano') spb.build_beamline(focus = 'nano') spb.crop_beamline(element1 = "d1") bl = spb.get_beamline() wfr = construct_SA1_wavefront(512, 512, ekev, 0.25) bl.propagate(wfr) return wfr
8ccf7880ff22dc13f979b45c288a58bbb4e3c5c9
22,729
def ratlab(top="K+", bottom="H+", molality=False): """ Python wrapper for the ratlab() function in CHNOSZ. Produces a expression for the activity ratio between the ions in the top and bottom arguments. The default is a ratio with H+, i.e. (activity of the ion) / [(activity of H+) ^ (charge of the ion)] Parameters ---------- top : str, default "K+" The ion in the numerator of the ratio. bottom : str, default "H+" The ion in the denominator of the ratio. molality : bool, default False Use molality (m) instead of activity (a) for aqueous species? Returns ------- A formatted string representing the activity ratio. """ top_formula = chemparse.parse_formula(top) if "+" in top_formula.keys(): top_charge = top_formula["+"] elif "-" in top_formula.keys(): top_charge = top_formula["-"] else: raise Exception("Cannot create an ion ratio involving one or more neutral species.") bottom_formula = chemparse.parse_formula(bottom) if "+" in bottom_formula.keys(): bottom_charge = bottom_formula["+"] elif "-" in bottom_formula.keys(): top_charge = bottom_formula["-"] else: raise Exception("Cannot create an ion ratio involving one or more neutral species.") if top_charge.is_integer(): top_charge = int(top_charge) if bottom_charge.is_integer(): bottom_charge = int(bottom_charge) if top_charge != 1: top_charge = "<sup>"+str(top_charge)+"</sup>" else: top_charge = "" if bottom_charge != 1: bottom_charge = "<sup>"+str(bottom_charge)+"</sup>" else: bottom_charge = "" if molality: sym = "m" else: sym = "a" return "log("+sym+bottom_charge+"<sub>"+html_chemname_format(top)+"</sub>/"+sym+top_charge+"<sub>"+html_chemname_format(bottom)+"</sub>)"
69c6a5fbbb344e5b0e063ea438994c3ce7e6cafb
22,730
def demean_dataframe_two_cat(df_copy, consist_var, category_col, is_unbalance): """ reference: Baltagi http://library.wbi.ac.id/repository/27.pdf page 176, equation (9.30) :param df_copy: Dataframe :param consist_var: List of columns need centering on fixed effects :param category_col: List of fixed effects :return: Demeaned dataframe """ if is_unbalance: # first determine which is uid or the category that has the most items max_ncat = df_copy[category_col[0]].nunique() max_cat = category_col[0] for cat in category_col: if df_copy[cat].nunique() >= max_ncat: max_ncat = df_copy[cat].nunique() max_cat = cat min_cat = category_col.copy() min_cat.remove(max_cat) min_cat = min_cat[0] df_copy.sort_values(by=[max_cat, min_cat], inplace=True) # demean on the first category variable, max_cat for consist in consist_var: df_copy[consist] = df_copy[consist] - df_copy.groupby(max_cat)[consist].transform('mean') dummies = get_dummies(df_copy[min_cat]) # time dummies dummies[max_cat] = df_copy[max_cat] dummies[min_cat] = df_copy[min_cat] dummies[max_cat] = dummies[max_cat].apply(str) dummies[min_cat] = dummies[min_cat].apply(str) dummies.set_index([max_cat, min_cat], inplace = True) group_mu = dummies.groupby(level=max_cat).transform("mean") out = dummies - group_mu # q_delta_1 @ delta_2 e = df_copy[consist_var].values d = out.values resid = e - d @ lstsq(d, e, rcond=None)[0] df_out = pd.DataFrame(data=resid, columns=consist_var) df_out[max_cat] = df_copy[max_cat] df_out[min_cat] = df_copy[min_cat] else: # balance for consist in consist_var: for cat in category_col: df_copy[consist] = df_copy[consist] - df_copy.groupby(cat)[consist].transform('mean') df_out = df_copy return df_out
a6a3f0bd56be214660eca857f0fb8630879bb2a8
22,731
from datetime import datetime import time def get_time_string(time_obj=None): """The canonical time string format (in UTC). :param time_obj: an optional datetime.datetime or timestruct (defaults to gm_time) Note: Changing this function will change all times that this project uses in the returned data. """ if isinstance(time_obj, datetime.datetime): if time_obj.tzinfo: offset = time_obj.tzinfo.utcoffset(time_obj) utc_dt = time_obj + offset return datetime.datetime.strftime(utc_dt, STRING_FORMAT) return datetime.datetime.strftime(time_obj, STRING_FORMAT) elif isinstance(time_obj, time.struct_time): return time.strftime(STRING_FORMAT, time_obj) elif time_obj is not None: raise TypeError("get_time_string takes only a time_struct, none, or a " "datetime. It was given a %s" % type(time_obj)) return time.strftime(STRING_FORMAT, time.gmtime())
73a623474e70850dc4194e2657b7e15aaa53996f
22,732
def apply_activation_checkpointing_wrapper( model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=lambda _: True ): """ Applies :func:`checkpoint_wrapper` to modules within `model` based on a user-defined configuration. For each module within `model`, the `check_fn` is used to decide whether `module` should be wrapped with :func:`checkpoint_wrapper` or not. Note:: This function modifies `model` in place and replaces appropriate layers with their checkpoint-wrapped modules. Note:: This function will not wrap the overall root module. If this is needed, please directly use :class:`CheckpointWrapper`. Usage:: model = nn.Sequential( nn.Linear(10, 10), nn.Linear(10, 10), nn.Linear(10, 10) ) check_fn = lambda l: isinstance(l, nn.Linear) apply_activation_checkpointing(model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=check_fn) Args: module (nn.Module): The model who's submodules (or self) should be wrapped with activation checkpointing. checkpoint_wrapper_fn (Optional[Callable[nn.Module]]) A `Callable` which will wrap modules check_fn (Optional[Callable[nn.Module, nn.Module]]) A lambda function which will be passed current layer and returns ``True`` or ``False`` depending on whether input layer should be wrapped. Returns: None (`model` is modified inplace) """ return _recursive_wrap( module=model, auto_wrap_policy=partial(lambda_auto_wrap_policy, lambda_fn=check_fn), wrapper_cls=checkpoint_wrapper_fn, ignored_modules=set(), ignored_params=set(), only_wrap_children=True )
ea4f7efef1f1c1c49a7cd078cae95be754f68c93
22,733
def train_validation_split(x, y): """ Prepare validation data with proper size Args: x: (pandas.DataFrame) Feature set / Affecting features y: (pandas.Dataframe) Target set / dependent feature Returns: x_train: (pandas.DataFrame) Feature set / Affecting features for training y_train: (pandas.Dataframe) Target set / dependent feature for training x_val: (pandas.DataFrame) Feature set / Affecting features for validation y_val: (pandas.Dataframe) Target set / dependent feature for validation """ # For large datasets if x.shape[0] > 100000: val_ratio = 0.2 # For medium size datasets elif x.shape[0] > 1000: val_ratio = 0.15 # For small datasets else: val_ratio = 0.1 # Splitting dataset into train and validation x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=val_ratio, random_state=42) print(f"Validation data prepared." f" Train - Validation ratio taken {int(100 - val_ratio * 100)} % - {int(val_ratio * 100)} % .") return x_train, y_train, x_val, y_val
eac70c13b2ebd592681dfdc5cc181b558a93b233
22,734
from typing import Union from typing import Tuple from typing import List def topk_accuracy( rankings: np.ndarray, labels: np.ndarray, ks: Union[Tuple[int, ...], int] = (1, 5) ) -> List[float]: """Computes Top-K accuracies for different values of k Args: rankings: 2D rankings array: shape = (instance_count, label_count) labels: 1D correct labels array: shape = (instance_count,) ks: The k values in top-k, either an int or a list of ints. Returns: list of float: TOP-K accuracy for each k in ks Raises: ValueError If the dimensionality of the rankings or labels is incorrect, or if the length of rankings and labels aren't equal """ if isinstance(ks, int): ks = (ks,) _check_label_predictions_preconditions(rankings, labels) # trim to max k to avoid extra computation maxk = np.max(ks) # compute true positives in the top-maxk predictions tp = rankings[:, :maxk] == labels.reshape(-1, 1) # trim to selected ks and compute accuracies accuracies = [tp[:, :k].max(1).mean() for k in ks] if any(np.isnan(accuracies)): raise ValueError(f"NaN present in accuracies {accuracies}") return accuracies
36d3ac84b69b7d0f8764ad1213f56f82da717482
22,735
def has_substr(line, chars): """ checks to see if the line has one of the substrings given """ for char in chars: if char in line: return True return False
cf438600894ca43c177af1661a95447daa8b6b0d
22,736
def multifiltertestmethod(testmethod, strfilters): """returns a version of the testmethod that operates on filtered strings using strfilter""" def filteredmethod(str1, str2): return testmethod(multifilter(str1, strfilters), multifilter(str2, strfilters)) filteredmethod.__doc__ = testmethod.__doc__ filteredmethod.name = getattr(testmethod, 'name', testmethod.__name__) return filteredmethod
eec5e580bcc2987f8abfc23dd118897ed5d2b4c4
22,737
def getbasins(basin,Nx,Ny,Nz,S1,S2,S3): """ Args: basin (numpy array): including the Returns: N/A Only Extend CHGCAR while mode is 'all' """ temp = np.zeros(Nx*Ny*Nz*S1*S2*S3) basins = np.resize(temp,(Nz*S3,Ny*S2,Nx*S1)) block = np.resize(temp,(Nz*S3,Ny*S2,Nx*S1)) flag = 0 b = 1 teemp = [] for kss in range(Nz*S3): for jss in range(Ny*S2): for iss in range(Nx*S1): flag += 1 if (flag == Nx*Ny*Nz+1): b += 1 flag = 1 # print ('Nx:{:0} Ny:{:1} Nz:{:2} flagx:{:3} flagy:{:4} flagz:{:5}'.format(Nx,Ny,Nz,flagx,flagy,flagz)) block[kss,jss,iss] = b basins[kss,jss,iss] = int(S1*S2*S3*(basin[kss%Nz,jss%Ny,iss%Nx]-1)) + b basins_1D = np.resize(basins,Nx*Ny*Nz*S1*S2*S3) # numindex = [] # numcount = [0,0,0,0,0,0,0,0] # for i in basins_1D: # numcount[int(i)-1] += 1 # print (numcount) return basins_1D
97c489a7453aced3624f6898896924647f387d55
22,738
def makeframefromhumanstring(s): """Create a frame from a human readable string Strings have the form: <request-id> <stream-id> <stream-flags> <type> <flags> <payload> This can be used by user-facing applications and tests for creating frames easily without having to type out a bunch of constants. Request ID and stream IDs are integers. Stream flags, frame type, and flags can be specified by integer or named constant. Flags can be delimited by `|` to bitwise OR them together. If the payload begins with ``cbor:``, the following string will be evaluated as Python literal and the resulting object will be fed into a CBOR encoder. Otherwise, the payload is interpreted as a Python byte string literal. """ fields = s.split(b' ', 5) requestid, streamid, streamflags, frametype, frameflags, payload = fields requestid = int(requestid) streamid = int(streamid) finalstreamflags = 0 for flag in streamflags.split(b'|'): if flag in STREAM_FLAGS: finalstreamflags |= STREAM_FLAGS[flag] else: finalstreamflags |= int(flag) if frametype in FRAME_TYPES: frametype = FRAME_TYPES[frametype] else: frametype = int(frametype) finalflags = 0 validflags = FRAME_TYPE_FLAGS[frametype] for flag in frameflags.split(b'|'): if flag in validflags: finalflags |= validflags[flag] else: finalflags |= int(flag) if payload.startswith(b'cbor:'): payload = b''.join( cborutil.streamencode(stringutil.evalpythonliteral(payload[5:])) ) else: payload = stringutil.unescapestr(payload) return makeframe( requestid=requestid, streamid=streamid, streamflags=finalstreamflags, typeid=frametype, flags=finalflags, payload=payload, )
b588e61fb8b67b4160ac673eb3e70f373c7027b4
22,739
def display_full_name_with_correct_capitalization(full_name): """ See documentation here: https://github.com/derek73/python-nameparser :param full_name: :return: """ full_name.strip() full_name_parsed = HumanName(full_name) full_name_parsed.capitalize() full_name_capitalized = str(full_name_parsed) return full_name_capitalized
05133fc04631a39a19f2e27355456418ab7c78a7
22,740
from typing import Optional from typing import Iterable from typing import Dict from typing import Any import collections def load_experiment_artifacts( src_dir: str, file_name: str, selected_idxs: Optional[Iterable[int]] = None ) -> Dict[int, Any]: """ Load all the files in dirs under `src_dir` that match `file_name`. This function assumes subdirectories withing `dst_dir` have the following structure: ``` {dst_dir}/result_{idx}/{file_name} ``` where `idx` denotes an integer encoded in the subdirectory name. The function returns the contents of the files, indexed by the integer extracted from the subdirectory index name. :param src_dir: directory containing subdirectories of experiment results It is the directory that was specified as `--dst_dir` in `run_experiment.py` and `run_notebook.py` :param file_name: the file name within each run results subdirectory to load E.g., `result_bundle.pkl` :param selected_idxs: specific experiment indices to load - `None` (default) loads all available indices """ artifact_tuples = yield_experiment_artifacts( src_dir, file_name, selected_idxs ) artifacts = collections.OrderedDict() for key, artifact in artifact_tuples: artifacts[key] = artifact return artifacts
67d8b9aba64f79b0361e7ed175ae597b22367f8a
22,741
from datetime import datetime from typing import Tuple def get_market_metrics(market_portfolio: pd.DataFrame, t_costs: float, index_id: str, index_name: str, test_data_start_date: datetime.date, test_data_end_date: datetime.date, market_logs=False) -> \ Tuple[pd.Series, pd.Series, pd.Series]: """ Get performance metrics for full equal-weighted market portfolio :param market_logs: Write log data for market portfolio :param test_data_start_date: Start date (with regard to test set) :param test_data_end_date: End date (with regard to test set) :param index_name: Index name :param index_id: Index ID :param t_costs: Transaction costs per half-turn :param market_portfolio: DataFrame including full test set (market portfolio) :return: Tuple of market portfolio metrics (Series) and cumulative returns series (Series) """ market_portfolio_metrics = pd.Series([]).rename('Market') market_portfolio_metrics.index.name = 'Metrics' excess_return_series = calc_excess_returns( market_portfolio.loc[:, 'daily_return'].groupby(level=['datadate']).mean()).rename('daily_excess_return') excess_return_series = excess_return_series.reset_index() excess_return_series.loc[:, 'datadate'] = excess_return_series['datadate'].dt.strftime( '%Y-%m-%d') excess_return_series.set_index('datadate', inplace=True) cumulative_excess_return = (excess_return_series.get('daily_excess_return') + 1).cumprod().rename( 'Cumulative Market Return') cumulative_excess_return.index.name = 'Time' # cumulative_return.plot(title='Cumulative Market Performance') # plt.legend(loc='best') # plt.show() # JOB: Calculate metrics # noinspection DuplicatedCode annualized_sharpe = calc_sharpe(market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(), annualize=True) annualized_sharpe_atc = calc_sharpe( market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs, annualize=True) annualized_sortino = calc_sortino(market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean(), annualize=True) annualized_sortino_atc = calc_sortino( market_portfolio.loc[:, ['daily_return']].groupby(level=['datadate']).mean() - 4 * t_costs, annualize=True) mean_daily_return = market_portfolio.groupby(level=['datadate'])['daily_return'].mean().mean() mean_daily_excess_return = calc_excess_returns( market_portfolio.groupby(level=['datadate'])['daily_return'].mean().rename('daily_return')).mean() market_portfolio_metrics.loc['Mean Daily Return'] = mean_daily_return market_portfolio_metrics.loc['Annualized Return'] = annualize_metric(mean_daily_return) market_portfolio_metrics.loc['Mean Daily Excess Return'] = mean_daily_excess_return market_portfolio_metrics.loc['Annualized Excess Return'] = annualize_metric(mean_daily_excess_return) market_portfolio_metrics.loc['Annualized Sharpe'] = annualized_sharpe market_portfolio_metrics.loc['Annualized Sortino'] = annualized_sortino # JOB: Add metrics incl. transaction costs of 5 bps per half-turn market_portfolio_metrics.loc['Mean Daily Return_atc'] = mean_daily_return - 4 * t_costs market_portfolio_metrics.loc['Annualized Return_atc'] = annualize_metric(mean_daily_return - 4 * t_costs) market_portfolio_metrics.loc['Mean Daily Excess Return_atc'] = mean_daily_excess_return - 4 * t_costs market_portfolio_metrics.loc['Annualized Excess Return_atc'] = annualize_metric( mean_daily_excess_return - 4 * t_costs) market_portfolio_metrics.loc['Annualized Sharpe_atc'] = annualized_sharpe_atc market_portfolio_metrics.loc['Annualized Sortino_atc'] = annualized_sortino_atc data_record = { 'ID': config.run_id, 'Experiment Run End': datetime.datetime.now().isoformat(), 'Parent Model Type': 'Market', 'Model Type': 'Market', 'Index ID': index_id, 'Index Name': index_name, 'Study Period ID': config.study_period_id, 'Study Period Length': None, 'Period Range': None, 'Study Period Start Date': None, 'Study Period End Date': None, 'Test Set Size': None, 'Days Test Set': None, 'Constituent Number': None, 'Average Cross Section Size': None, 'Test Set Start Date': test_data_start_date.isoformat(), 'Test Set End Date': test_data_end_date.isoformat(), 'Total Accuracy': None, 'Top-k Accuracy Scores': None, 'Top-k Mean Daily Return': market_portfolio_metrics['Mean Daily Return'], 'Top-k Mean Daily Excess Return': market_portfolio_metrics['Mean Daily Excess Return'], 'Top-k Annualized Excess Return': market_portfolio_metrics['Annualized Excess Return'], 'Top-k Annualized Return': market_portfolio_metrics['Annualized Return'], 'Top-k Annualized Sharpe': market_portfolio_metrics['Annualized Sharpe'], 'Top-k Annualized Sortino': market_portfolio_metrics['Annualized Sortino'], 'Mean Daily Return (Short)': None, 'Mean Daily Return (Long)': None, 'Top-k Mean Daily Return_atc': market_portfolio_metrics['Mean Daily Return_atc'], 'Top-k Annualized Return_atc': market_portfolio_metrics['Annualized Return_atc'], 'Top-k Mean Daily Excess Return_atc': market_portfolio_metrics['Mean Daily Excess Return_atc'], 'Top-k Annualized Excess Return_atc': market_portfolio_metrics['Annualized Excess Return_atc'], 'Top-k Annualized Sharpe_atc': market_portfolio_metrics['Annualized Sharpe_atc'], 'Top-k Annualized Sortino_atc': market_portfolio_metrics['Annualized Sortino_atc'], 'Top-k Mean Daily Return (Short)_atc': None, 'Top-k Mean Daily Return (Long)_atc': None, 'Model Configs': None, 'Total Epochs': None, 'Return Series': excess_return_series['daily_excess_return'].to_dict(), 'Prediction Error': None } if market_logs: write_to_logs(data_record) return market_portfolio_metrics, excess_return_series, cumulative_excess_return
f5c77114c79ef901683ffdd4495a8a1022e42dc9
22,742
def get_puf_columns(seed=True, categorical=True, calculated=True): """Get a list of columns. Args: seed: Whether to include standard seed columns: ['MARS', 'XTOT', 'S006'] categorical: Whether to include categorical columns: ['F6251', 'MIDR', 'FDED', 'DSI'] calculated: Whether to include calculated columns: ['E00100', 'E09600'] Returns: List of columns. """ res = [] if seed: res += SEED_COLS if categorical: res += CATEGORICAL_COLS if calculated: res += CALCULATED_COLS return res
af7d6799b7f17b2b05b62a7ed88d0695784cde59
22,743
def list_all_vms(osvars): """Returns a listing of all VM objects as reported by Nova""" novac = novaclient.Client('2', osvars['OS_USERNAME'], osvars['OS_PASSWORD'], osvars['OS_TENANT_NAME'], osvars['OS_AUTH_URL'], service_type="compute") return novac.servers.list(True, {'all_tenants': '1'})
b03bc9c5e458d62403b86346bdb4f9a2c3081909
22,744
def exec_cmd(cmd, path): """ Execute the specified command and return the result. """ out = '' err = '' sys.stdout.write("-------- Running \"%s\" in \"%s\"...\n" % (cmd, path)) parts = cmd.split() try: process = subprocess.Popen(parts, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=(sys.platform == 'win32')) out, err = process.communicate() except IOError, (errno, strerror): raise except: raise return {'out': out, 'err': err}
ea75de47eebe526188533539a4d33e07d5a5bed3
22,745
def contains_numbers(iterable): """ Check if first iterable item is a number. """ return isinstance(iterable[0], Number)
0c6dc3031087e14ea50cb7d228da50b19a55a013
22,746
import tempfile from pathlib import Path import requests def get_image(img: PathStr) -> PILImage: """Get picture from either a path or URL""" if str(img).startswith("http"): with tempfile.TemporaryDirectory() as tmpdirname: dest = Path(tmpdirname) / str(img).split("?")[0].rpartition("/")[-1] # NOTE: to be replaced by download(url, dest=dest) [from unpackai.utils] with requests.get(str(img)) as resp: resp.raise_for_status() dest.write_bytes(resp.content) return PILImage.create(dest) else: return PILImage.create(img)
374e2ff8f97c4d63ceb3d621ced25451d10b6793
22,747
def FAIMSNETNN_model(train_df, train_y, val_df, val_y, model_args, cv=3): """FIT neuralnetwork model.""" input_dim = train_df.shape[1] if model_args["grid"] == "tiny": param_grid = {"n1": [100], "d1": [0.3, 0.1], "lr": [0.001, 0.01], "epochs": [50], "batch_size": [32, 128], "input_dim": [input_dim]} else: param_grid = {"n1": [100, 200, 500], "d1": [0.5, 0.3, 0.1], "lr": [0.0001, 0.001, 0.01], "epochs": [50], "batch_size": [32, 64, 128], "input_dim": [input_dim]} model = keras.wrappers.scikit_learn.KerasRegressor(build_fn=create_model, verbose=0) gs = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=model_args["jobs"], cv=cv, return_train_score=True, verbose=2) gsresults = gs.fit(train_df, train_y) # history = model.fit(train_df, train_y, validation_split=0.1, epochs=200, batch_size=16) print(gs.best_params_) gs.best_params_["epochs"] = 100 model = create_model(**gs.best_params_) history = model.fit(train_df, train_y, validation_split=0.1, epochs=gs.best_params_["epochs"], batch_size=gs.best_params_["batch_size"]) df_results, cv_res = format_summary(train_df, val_df, train_y, val_y, model, "FNN", gsresults) cv_res["params"] = str(gs.best_params_) return df_results, cv_res, gs, model
6f6520a3f9a746e6e1241f638358f00ce4f20ede
22,748
def assign_exam_blocks(data, departments, splitted_departments, number_exam_days): """ Assign departments to exam blocks and optimize this schedule to reduce conflicts. data (pandas.DataFrame): Course enrollments data departments (dict): Departments (str key) and courses in departments (list value) number_exam_days (int): The number of days for exams returns (list): Departments for each exam block """ # create two exam blocks per day exam_blocks = [[] for i in range(2*number_exam_days)] # sequentially fill exam_blocks with departments in random order i = 0 department_list = list(departments) index = np.random.permutation(np.arange(len(department_list))) for j in range(len(department_list)): department = department_list[index[j]] exam_blocks[i%(2*number_exam_days)].append(department) i += 1 # swap exam blocks until this swap method can no longer reduce conflicts total_conflicts = sum([count_conflicts(data, departments, block) for block in exam_blocks]) conflicts_reduced = True while conflicts_reduced: conflicts_reduced = False # do swapping between departments in exam block i and exam block j for i in range(len(exam_blocks)-1): for j in range(i+1, len(exam_blocks)): do_swapping(data, departments, exam_blocks[i], exam_blocks[j]) # do swapping between the two blocks of split departments to try to minimize # conflicts on a course basis course_conflicts_reduced = True while course_conflicts_reduced: current_conflicts = sum([count_conflicts(data, departments, block) for block in exam_blocks]) course_conflicts_reduced = False # do this for every splitted department for department in splitted_departments: courses = [departments[department+"-1"], departments[department+"-2"]] # this allows us to swap from first department exam block to other department exam block for course_index in (0, 1): # swap any course from one department section to the other if it reduces conflicts i = 0 while i < len(courses[course_index]): courses[~course_index].append(courses[course_index].pop(i)) tmp_conflicts = sum([count_conflicts(data, departments, block) for block in exam_blocks]) if tmp_conflicts >= current_conflicts: courses[course_index].insert(i, courses[~course_index].pop()) else: course_conflicts_reduced = True i += 1 current_conflicts = sum([count_conflicts(data, departments, block) for block in exam_blocks]) if current_conflicts < total_conflicts: total_conflicts = current_conflicts conflicts_reduced = True return exam_blocks
ae272ea00a277960497d96b2593371bc35a9c3cb
22,749
import os def download_table_dbf(file_name, cache=True): """ Realiza o download de um arquivo auxiliar de dados do SINAN em formato "dbf" ou de uma pasta "zip" que o contém (se a pasta "zip" já não foi baixada), em seguida o lê como um objeto pandas DataFrame e por fim o elimina Parâmetros ---------- file_name: objeto str String do nome do arquivo "dbf" Retorno ------- df: objeto pandas DataFrame Dataframe que contém os dados de um arquivo auxiliar de dados originalmente em formato "dbf" """ ftp = FTP('ftp.datasus.gov.br') ftp.login() try: if file_name == 'CADMUN': fname = file_name + '.DBF' ftp.cwd('/dissemin/publicos/SIM/CID10/TABELAS/') ftp.retrbinary(f'RETR {fname}', open(fname, 'wb').write) elif file_name == 'rl_municip_regsaud': folder = 'base_territorial.zip' ftp.cwd('/territorio/tabelas/') ftp.retrbinary(f'RETR {folder}', open(folder, 'wb').write) zip = ZipFile(folder, 'r') fname = file_name + '.dbf' zip.extract(fname) except: raise Exception('Could not download {}'.format(fname)) dbf = DBF(fname) df = pd.DataFrame(iter(dbf)) os.unlink(fname) return df
90992f009a5a9d6f27c60b2dd68a83ec68f8540a
22,750
def aq_name(path_to_shp_file): """ Computes the name of a given aquifer given it's shape file :param path_to_shp_file: path to the .shp file for the given aquifer :return: a string (name of the aquifer) """ str_ags = path_to_shp_file.split('/') str_aq = "" if len(str_ags) >= 2: str_aq = str(str_ags[1]) print(str_aq) return str_aq
1cb6f9881383b4627ea4f78bf2f6fd9cdf97dbc4
22,751
def gini(arr, mode='all'): """Calculate the Gini coefficient(s) of a matrix or vector. Parameters ---------- arr : array-like Array or matrix on which to compute the Gini coefficient(s). mode : string, optional One of ['row-wise', 'col-wise', 'all']. Default is 'all'. Returns ------- coeffs : array-like Array of Gini coefficients. Note ---- If arr is a transition matrix A, such that Aij = P(S_k=j|S_{k-1}=i), then 'row-wise' is equivalent to 'tmat_departure' and 'col-wise' is equivalent to 'tmat_arrival'. Similarly, if arr is the observation (lambda) matrix of an HMM such that lambda \in \mathcal{C}^{n_states \times n_units}, then 'row-wise' is equivalent to 'lambda_across_units' and 'col-wise' is equivalent to 'lambda_across_units'. If mode = 'all', then the matrix is unwrapped into a numel-dimensional array before computing the Gini coefficient. """ if mode is None: mode = 'row-wise' if mode not in ['row-wise', 'col-wise', 'all']: raise ValueError("mode '{}' not supported!".format(mode)) gini_coeffs = None if mode=='all': arr = np.atleast_1d(arr).astype(float) gini_coeffs = _gini(arr) elif mode=='row-wise': arr = np.atleast_2d(arr).astype(float) gini_coeffs = [] for row in arr: gini_coeffs.append(_gini(row)) elif mode=='col-wise': arr = np.atleast_2d(arr).astype(float) gini_coeffs = [] for row in arr.T: gini_coeffs.append(_gini(row)) return gini_coeffs
9fb3116506db949d273000510724bcce0ed165e2
22,752
import requests def getIndex(): """ Retrieves index value. """ headers = { 'accept': 'application/json', } indexData = requests.get( APIUrls.lnapi+APIUrls.indexUrl, headers=headers, ) if indexData.status_code == 200: return indexData.json() else: raise RuntimeError( 'Unable to fetch index data:\n' f'{indexData.text}' )
ba35e1573a62e76d1f413036761b8a9054a3a878
22,753
import os def getDataFile(fname): """Return complete path to datafile fname. Data files are in the directory skeleton/skeleton/data """ return os.path.join(getDataPath(),fname)
8b359910ceac5216e5a968e7a07da47d558a20c1
22,754
def input_as_string(filename:str) -> str: """returns the content of the input file as a string""" with open(filename, encoding="utf-8") as file: return file.read().rstrip("\n")
0343de48580a71a62895aa093af1213c3f0c0b84
22,755
def channame_to_python_format_string(node, succgen=None): """See channame_str_to_python_format_string @succgen is optional, if given will check that identifiers can be found. """ if not node: #empty AST return (True, "") if node.type == 'Identifier': # and len(node.children) >= 1: #if no succgen, assume its a channel if not succgen or node.children[0] in succgen.channel_identifiers: #Of the form "channame[x][x]...[x]" static = True if node.leaf: #Have IndexList? idxs = [] for c in node.leaf.children: assert c.type == 'Index' (exprstatic, expr) = _expression_to_python_format_string(c.leaf, succgen) static = static and exprstatic idxs += [expr] idxs = "".join(["[" + x + "]" for x in idxs]) return (static, node.children[0] + idxs) else: return (True, node.children[0]) else: print node, succgen.channel_identifiers raise IllegalExpressionException('Unknown channel ' + node.children[0]) else: raise IllegalExpressionException('Illegal expression type for channame: ' + node.type)
f7e71bd49624657e98e6f2c172e6f02d8bfc7307
22,756
import numpy import pandas def is_bad(x): """ for numeric vector x, return logical vector of positions that are null, NaN, infinite""" if can_convert_v_to_numeric(x): x = safe_to_numeric_array(x) return numpy.logical_or( pandas.isnull(x), numpy.logical_or(numpy.isnan(x), numpy.isinf(x)) ) return pandas.isnull(x)
b4cf9de18cd8e52ff90a801f1eccf6a4ee2500db
22,757
import matplotlib.pyplot as plt import cv2 from tifffile import imsave from tifffile import imsave def psf_generator(cmap='hot', savebin=False, savetif=False, savevol=False, plot=False, display=False, psfvol=False, psftype=0, expsf=False, empsf=False, realshape=(0,0), **kwargs): """Calculate and save point spread functions.""" args = { 'shape': (50, 50), # number of samples in z and r direction 'dims': (5.0, 5.0), # size in z and r direction in micrometers 'ex_wavelen': 488.0, # excitation wavelength in nanometers 'em_wavelen': 520.0, # emission wavelength in nanometers 'num_aperture': 1.2, 'refr_index': 1.333, 'magnification': 1.0, 'pinhole_radius': 0.05, # in micrometers 'pinhole_shape': 'round', } args.update(kwargs) if (psftype == 0): psf_matrix = psf.PSF(psf.ISOTROPIC | psf.EXCITATION, **args) print('psf.ISOTROPIC | psf.EXCITATION generated') if (psftype == 1): psf_matrix = psf.PSF(psf.ISOTROPIC | psf.EMISSION, **args) print('psf.ISOTROPIC | psf.EMISSION generated') if (psftype == 2): psf_matrix = psf.PSF(psf.ISOTROPIC | psf.WIDEFIELD, **args) print('psf.ISOTROPIC | psf.WIDEFIELD generated') if (psftype == 3): psf_matrix = psf.PSF(psf.ISOTROPIC | psf.CONFOCAL, **args) print('psf.ISOTROPIC | psf.CONFOCAL generated') if (psftype == 4): psf_matrix = psf.PSF(psf.ISOTROPIC | psf.TWOPHOTON, **args) print('psf.ISOTROPIC | psf.TWOPHOTON generated') if (psftype == 5): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.EXCITATION, **args) print('psf.GAUSSIAN | psf.EXCITATION generated') if (psftype == 6): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.EMISSION, **args) print('psf.GAUSSIAN | psf.EMISSION generated') if (psftype == 7): print('psf.GAUSSIAN | psf.WIDEFIELD generated') psf_matrix = psf.PSF(psf.GAUSSIAN | psf.WIDEFIELD, **args) if (psftype == 8): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.CONFOCAL, **args) print('psf.GAUSSIAN | psf.CONFOCAL generated') if (psftype == 9): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.TWOPHOTON, **args) print('psf.GAUSSIAN | psf.TWOPHOTON generated') if (psftype == 10): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.EXCITATION | psf.PARAXIAL, **args) print('psf.GAUSSIAN | psf.EXCITATION | psf.PARAXIAL generated') if (psftype == 11): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.EMISSION | psf.PARAXIAL, **args) print('psf.GAUSSIAN | psf.EMISSION | psf.PARAXIAL generated') if (psftype == 12): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.WIDEFIELD | psf.PARAXIAL, **args) print('psf.GAUSSIAN | psf.WIDEFIELD | psf.PARAXIAL generated') if (psftype == 13): print('psf.GAUSSIAN | psf.CONFOCAL | psf.PARAXIAL generated') psf_matrix = psf.PSF(psf.GAUSSIAN | psf.CONFOCAL | psf.PARAXIAL, **args) if (psftype == 14): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.TWOPHOTON | psf.PARAXIAL, **args) print('psf.GAUSSIAN | psf.TWOPHOTON | psf.PARAXIAL generated') if empsf: psf_matrix = psf_matrix.expsf if expsf: psf_matrix = psf_matrix.empsf if psfvol: # psf_matrix = normalize_matrix(psf_matrix.volume()) psf_matrix = psf_matrix.volume() psf_matrix = psf_matrix[:realshape[0],:,:] psf_matrix = psf_matrix[:,:realshape[1],:realshape[1]] else: #psf_matrix = normalize_matrix(psf.mirror_symmetry(psf_matrix.data)) psf_matrix = psf.mirror_symmetry(psf_matrix.data) psf_matrix = psf_matrix[:realshape[1],:realshape[1]] if plot: plt.imshow(psf_matrix, cmap=cmap) plt.show() if display: cv2.imshow('PSF',psf_matrix) cv2.waitKey(0) cv2.destroyAllWindows() if savetif: # save zr slices to TIFF files imsave('psf_matrix.tif', psf_matrix, metadata = {'axes':'TZCYX'}, imagej=True) if savevol: # save xyz volumes to files. imsave('psf_matrix_vol.tif', psf_matrix, metadata = {'axes':'TZCYX'}, imagej=True) print('PSF shape: ', psf_matrix.shape) return psf_matrix
3921a39839ece8be2aa762f27371707ff2c79914
22,758
def correct_format(): """ This method will be called by iolite when the user selects a file to import. Typically, it uses the provided name (stored in plugin.fileName) and parses as much of it as necessary to determine if this importer is appropriate to import the data. For example, although X Series II and Agilent data are both comma separated value files, they can be distinguished by the characteristic formatting in each. In our implementation, distinguishing the two is done with 'regular expressions' (QRegularExpression) parsing of the first several lines of the file. Keep in mind that there is nothing stopping you from just returning True (thus supporting all files!) or simply checking the file extension, but such generic checks can yield unexpected results. You cannot be sure which order the various importer plugins will be checked for compatibility. This method must return either True or False. """ IoLog.debug("correct_format called on file = %s"%(importer.fileName)) if importer.fileName.endswith('ioe'): return True return False
043238e6e26fb2844266b0b7462fe17c1b310d35
22,759
def T0_T0star(M, gamma): """Total temperature ratio for flow with heat addition (eq. 3.89) :param <float> M: Initial Mach # :param <float> gamma: Specific heat ratio :return <float> Total temperature ratio T0/T0star """ t1 = (gamma + 1) * M ** 2 t2 = (1.0 + gamma * M ** 2) ** 2 t3 = 2.0 + (gamma - 1.0) * M ** 2 return t1 / t2 * t3
2e5c8ec2ab24dd0d4dfa2feddd0053f277665b33
22,760
from typing import Optional def remount_as( ip: Optional[str] = None, writeable: bool = False, folder: str = "/system" ) -> bool: """ Mount/Remount file-system. Requires root :param folder: folder to mount :param writeable: mount as writeable or readable-only :param ip: device ip :rtype: true on success """ if writeable: return ( shell(f"mount -o rw,remount {folder}", ip=ip).code == ADBCommandResult.RESULT_OK ) else: return ( shell(f"mount -o ro,remount {folder}", ip=ip).code == ADBCommandResult.RESULT_OK )
8f343f96d066543359bdfcea3c42f41f40dcaf4d
22,761
def flip_channels(img): """Flips the order of channels in an image; eg, BGR <-> RGB. This function assumes the image is a numpy.array (what's returned by cv2 function calls) and uses the numpy re-ordering methods. The number of channels does not matter. If the image array is strictly 2D, no re-ordering is possible and the original data is returned untouched. """ if len(img.shape) == 2: return img; return img[:,:,::-1]
7aab0222f6fd66c06f8464cd042f30c6eac01c72
22,762
import os def autodiscover_datafiles(varmap): """Return list of (dist directory, data file list) 2-tuples. The ``data_dirs`` setup var is used to give a list of subdirectories in your source distro that contain data files. It is assumed that all such files will go in the ``share`` subdirectory of the prefix where distutils is installing your distro (see the distutils docs); within that directory, a subdirectory with the same name as your program (i.e., the ``name`` setup var) will be created, and each directory in ``data_dirs`` will be a subdirectory of that. So, for example, if you have example programs using your distro in the ``"examples"`` directory in your distro, you would declare ``data_dirs = "examples"`` in your setup vars, and everything under that source directory would be installed into ``share/myprog/examples``. """ result = [] try: datadirs = varmap['data_dirs'] except KeyError: pass else: pathprefix = "share/{}".format(varmap['name']) for datadir in datadirs: for dirname, subdirs, filenames in os.walk(datadir): if filenames and ("." not in dirname): distdir = dirname.replace(os.sep, '/') distfiles = [ "{}/{}".format(distdir, filename) for filename in filenames if not filename.startswith(".") ] if distfiles: distdir = dirname.replace(os.sep, '/') result.append( ("{}/{}".format(pathprefix, distdir), distfiles) ) return result
412e8beda31e19a4003b499e2e3596eb8e600424
22,763
def parse_main(index): """Parse a main function containing block items. Ex: int main() { return 4; } """ err = "expected main function starting" index = match_token(index, token_kinds.int_kw, ParserError.AT, err) index = match_token(index, token_kinds.main, ParserError.AT, err) index = match_token(index, token_kinds.open_paren, ParserError.AT, err) index = match_token(index, token_kinds.close_paren, ParserError.AT, err) node, index = parse_compound_statement(index) return nodes.Main(node), index
ab932cf3d99340b97ec7d32fa668c4e00e16a3d1
22,764
def elist2tensor(elist, idtype): """Function to convert an edge list to edge tensors. Parameters ---------- elist : iterable of int pairs List of (src, dst) node ID pairs. idtype : int32, int64, optional Integer ID type. Must be int32 or int64. Returns ------- (Tensor, Tensor) Edge tensors. """ if len(elist) == 0: u, v = [], [] else: u, v = zip(*elist) u = list(u) v = list(v) return F.tensor(u, idtype), F.tensor(v, idtype)
a38c26a13b2fc7f111e3ec2c036e592b5b4c3c70
22,765
from datetime import datetime def _term_to_xapian_value(term, field_type): """ Converts a term to a serialized Xapian value based on the field_type. """ assert field_type in FIELD_TYPES def strf(dt): """ Equivalent to datetime.datetime.strptime(dt, DATETIME_FORMAT) but accepts years below 1900 (see http://stackoverflow.com/q/10263956/931303) """ return '%04d%02d%02d%02d%02d%02d' % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) if field_type == 'boolean': assert isinstance(term, bool) if term: value = 't' else: value = 'f' elif field_type == 'integer': value = INTEGER_FORMAT % term elif field_type == 'float': value = xapian.sortable_serialise(term) elif field_type == 'date' or field_type == 'datetime': if field_type == 'date': # http://stackoverflow.com/a/1937636/931303 and comments term = datetime.datetime.combine(term, datetime.time()) value = strf(term) else: # field_type == 'text' value = _to_xapian_term(term) return value
8fe2926a7093ff9a7b22cc222c4a3c5c8f6bc155
22,766
def pop_stl1(osurls, radiourls, splitos): """ Replace STL100-1 links in 10.3.3+. :param osurls: List of OS platforms. :type osurls: list(str) :param radiourls: List of radio platforms. :type radiourls: list(str) :param splitos: OS version, split and cast to int: [10, 3, 3, 2205] :type splitos: list(int) """ if newer_103(splitos, 3): osurls = osurls[1:] radiourls = radiourls[1:] return osurls, radiourls
d88576028bbfbf61ab6fec517e7a66d731b4ebf3
22,767
def empty_search(): """ :return: json response of empty list, meaning empty search result """ return jsonify(results=[])
59ac0a6d3b9a3d17f7a80e633ea4bb5b2d07ca33
22,768
def clip_raster_mean(raster_path, feature, var_nam): """ Opens a raster file from raster_path and applies a mask based on a polygon (feature). It then extracts the percentage of every class with respects to the total number of pixels contained in the mask. :param raster_path: raster path (raster must contain classes) :param feature: polygon feature (extracted from a shapefile or geojson) :return: dictionary containing the percentage of pixels contained in the mask """ with rasterio.open(raster_path) as src: # Apply mask to raster and crop out_image, out_transform = rasterio.mask.mask(src, [feature["geometry"]], crop=True) if var_nam == 'PTED': out_image[out_image < 0] = np.nan return np.nanmean(out_image)
82244272c2da713f679d0f56f5736810fcf8649c
22,769
import json def load_data(in_file): """load json file from seqcluster cluster""" with open(in_file) as in_handle: return json.load(in_handle)
93c1766cb1e36410a8c67e2291b93aa7280abd63
22,770
import os def applyPatch(sourceDir, f, patchLevel='0'): """apply single patch""" if os.path.isdir(f): # apply a whole dir of patches out = True with os.scandir(f) as scan: for patch in scan: if patch.is_file() and not patch.name.startswith("."): out = applyPatch(sourceDir, os.path.join(f, patch), patchLevel) and out return out cmd = ["patch", "--ignore-whitespace", "-d", sourceDir, "-p", str(patchLevel), "-i", f] result = system(cmd) if not result: CraftCore.log.warning(f"applying {f} failed!") return result
031ed88aa8407debacbf4c76f343ed8cd5646d2a
22,771
import re def expand_at_linestart(P, tablen): """只扩展行开头的制表符号""" def exp(m): return m.group().expandtabs(tablen) return ''.join([ re.sub(r'^\s+', exp, s) for s in P.splitlines(True) ])
2b8310e89efdba54b121667e11454281e2c214e3
22,772
def configs(): """Create a mock Configuration object with sentinel values Eg. Configuration( base_jar=sentinel.base_jar, config_file=sentinel.config_file, ... ) """ return Configuration(**dict( (k, getattr(sentinel, k)) for k in DEFAULTS._asdict().keys() ))
c8aa44e1c9695a8fe0188d739c87be07ab06bdb0
22,773
def svn_fs_revision_root_revision(root): """svn_fs_revision_root_revision(svn_fs_root_t * root) -> svn_revnum_t""" return _fs.svn_fs_revision_root_revision(root)
ce153da9527fb8b1235f5591dbd68e2f1c1ecab2
22,774
def config_ospf_interface( tgen, topo=None, input_dict=None, build=False, load_config=True ): """ API to configure ospf on router. Parameters ---------- * `tgen` : Topogen object * `topo` : json file data * `input_dict` : Input dict data, required when configuring from testcase * `build` : Only for initial setup phase this is set as True. * `load_config` : Loading the config to router this is set as True. Usage ----- r1_ospf_auth = { "r1": { "links": { "r2": { "ospf": { "authentication": "message-digest", "authentication-key": "ospf", "message-digest-key": "10" } } } } } result = config_ospf_interface(tgen, topo, r1_ospf_auth) Returns ------- True or False """ logger.debug("Enter lib config_ospf_interface") result = False if topo is None: topo = tgen.json_topo if not input_dict: input_dict = deepcopy(topo) else: input_dict = deepcopy(input_dict) config_data_dict = {} for router in input_dict.keys(): config_data = [] for lnk in input_dict[router]["links"].keys(): if "ospf" not in input_dict[router]["links"][lnk]: logger.debug( "Router %s: ospf config is not present in" "input_dict", router ) continue ospf_data = input_dict[router]["links"][lnk]["ospf"] data_ospf_area = ospf_data.setdefault("area", None) data_ospf_auth = ospf_data.setdefault("authentication", None) data_ospf_dr_priority = ospf_data.setdefault("priority", None) data_ospf_cost = ospf_data.setdefault("cost", None) data_ospf_mtu = ospf_data.setdefault("mtu_ignore", None) try: intf = topo["routers"][router]["links"][lnk]["interface"] except KeyError: intf = topo["switches"][router]["links"][lnk]["interface"] # interface cmd = "interface {}".format(intf) config_data.append(cmd) # interface area config if data_ospf_area: cmd = "ip ospf area {}".format(data_ospf_area) config_data.append(cmd) # interface ospf auth if data_ospf_auth: if data_ospf_auth == "null": cmd = "ip ospf authentication null" elif data_ospf_auth == "message-digest": cmd = "ip ospf authentication message-digest" else: cmd = "ip ospf authentication" if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) if "message-digest-key" in ospf_data: cmd = "ip ospf message-digest-key {} md5 {}".format( ospf_data["message-digest-key"], ospf_data["authentication-key"] ) if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) if ( "authentication-key" in ospf_data and "message-digest-key" not in ospf_data ): cmd = "ip ospf authentication-key {}".format( ospf_data["authentication-key"] ) if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) # interface ospf dr priority if data_ospf_dr_priority: cmd = "ip ospf priority {}".format(ospf_data["priority"]) if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) # interface ospf cost if data_ospf_cost: cmd = "ip ospf cost {}".format(ospf_data["cost"]) if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) # interface ospf mtu if data_ospf_mtu: cmd = "ip ospf mtu-ignore" if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) if build: return config_data if config_data: config_data_dict[router] = config_data result = create_common_configurations( tgen, config_data_dict, "interface_config", build=build ) logger.debug("Exiting lib API: config_ospf_interface()") return result
9d5f91bd9d3e2ede8e83a538cc7c1e36a8cf9596
22,775
from typing import Any def is_floatscalar(x: Any) -> bool: """Check whether `x` is a float scalar. Parameters: ---------- x: A python object to check. Returns: ---------- `True` iff `x` is a float scalar (built-in or Numpy float). """ return isinstance(x, ( float, np.float16, np.float32, np.float64, ))
2a93524290eaa4b4e1f0b0cc7a8a0dcb2a46f9d3
22,776
def http_header_control_cache(request): """ Tipo de control de cache url: direccion de la pagina web""" print "--------------- Obteniendo cache control -------------------" try: cabecera = request.headers cache_control = cabecera.get("cache-control") except Exception: cache_control = "NA" print "Error inesperado en la %s no se encontro cache_control" % (url) return cache_control
976adffa3c3601c6f0fd49617e15e25aa9cb2c9b
22,777
def summation(limit): """ Returns the summation of all natural numbers from 0 to limit Uses short form summation formula natural summation :param limit: {int} :return: {int} """ return (limit * (limit + 1)) // 2 if limit >= 0 else 0
1ff16c7c4131458e50c9c9bd5c0f20895d8ab121
22,778
def load_and_initialize_hub_module(module_path, signature='default'): """Loads graph of a TF-Hub module and initializes it into a session. Args: module_path: string Path to TF-Hub module. signature: string Signature to use when creating the apply graph. Return: graph: tf.Graph Graph of the module. session: tf.Session Session with initialized variables and tables. inputs: dict Dictionary of input tensors. outputs: dict Dictionary of output tensors. Raises: ValueError: If signature contains a SparseTensor on input or output. """ graph = tf.Graph() with graph.as_default(): tf.compat.v1.logging.info('Importing %s', module_path) module = hub.Module(module_path) signature_inputs = module.get_input_info_dict(signature) signature_outputs = module.get_output_info_dict(signature) # First check there are no SparseTensors in input or output. for key, info in list(signature_inputs.items()) + list( signature_outputs.items()): if info.is_sparse: raise ValueError( 'Signature "%s" has a SparseTensor on input/output "%s".' ' SparseTensors are not supported.' % (signature, key)) # Create placeholders to represent the input of the provided signature. inputs = {} for input_key, input_info in signature_inputs.items(): inputs[input_key] = tf.compat.v1.placeholder( shape=input_info.get_shape(), dtype=input_info.dtype, name=input_key) outputs = module(inputs=inputs, signature=signature, as_dict=True) session = tf.compat.v1.Session(graph=graph) session.run(tf.compat.v1.global_variables_initializer()) session.run(tf.compat.v1.tables_initializer()) return graph, session, inputs, outputs
b04b5f77c7e0207d314ebb5910ec1c5e61f4755c
22,779
def j_index(true_labels, predicts): """ j_index Computes the Jaccard Index of the given set, which is also called the 'intersection over union' in multi-label settings. It's defined as the intersection between the true label's set and the prediction's set, divided by the sum, or union, of those two sets. Parameters ---------- true_labels: numpy.ndarray of shape (n_samples, n_target_tasks) A matrix with the true labels for all the classification tasks and for n_samples. predicts: numpy.ndarray of shape (n_samples, n_target_tasks) A matrix with the predictions for all the classification tasks and for n_samples. Returns ------- float The J-index, or 'intersection over union', for the given sets. Examples -------- >>> from skmultiflow.evaluation.metrics.metrics import j_index >>> true_labels = [[0,1,0,1],[0,0,0,1],[1,1,0,1],[1,1,1,1]] >>> predictions = [[0,1,0,1],[0,1,1,0],[0,1,0,1],[1,1,1,1]] >>> j_index(true_labels, predictions) 0.66666666666666663 """ if not hasattr(true_labels, 'shape'): true_labels = np.asarray(true_labels) if not hasattr(predicts, 'shape'): predicts = np.asarray(predicts) N, L = true_labels.shape s = 0.0 for i in range(N): inter = sum((true_labels[i, :] * predicts[i, :]) > 0) * 1. union = sum((true_labels[i, :] + predicts[i, :]) > 0) * 1. if union > 0: s += inter / union elif np.sum(true_labels[i, :]) == 0: s += 1. return s * 1. / N
33bef64196acf441c299a4a90da64b2bb866e364
22,780
import torch def odefun(x, t, net, alph=[1.0,1.0,1.0]): """ neural ODE combining the characteristics and log-determinant (see Eq. (2)), the transport costs (see Eq. (5)), and the HJB regularizer (see Eq. (7)). d_t [x ; l ; v ; r] = odefun( [x ; l ; v ; r] , t ) x - particle position l - log determinant v - accumulated transport costs (Lagrangian) r - accumulates violation of HJB condition along trajectory """ nex, d_extra = x.shape d = d_extra - 3 z = pad(x[:, :d], (0, 1, 0, 0), value=t) # concatenate with the time t gradPhi, trH = net.trHess(z) dx = -(1.0/alph[0]) * gradPhi[:,0:d] dl = -(1.0/alph[0]) * trH.unsqueeze(1) dv = 0.5 * torch.sum(torch.pow(dx, 2) , 1 ,keepdims=True) dr = torch.abs( -gradPhi[:,-1].unsqueeze(1) + alph[0] * dv ) return torch.cat( (dx,dl,dv,dr) , 1 )
1523b28f1568bcd668a3f8cc8ce39dfb7d8096fe
22,781
def create_transform(num_flow_steps, param_dim, context_dim, base_transform_kwargs): """Build a sequence of NSF transforms, which maps parameters x into the base distribution u (noise). Transforms are conditioned on strain data y. Note that the forward map is f^{-1}(x, y). Each step in the sequence consists of * A linear transform of x, which in particular permutes components * A NSF transform of x, conditioned on y. There is one final linear transform at the end. This function was adapted from the uci.py example in https://github.com/bayesiains/nsf Arguments: num_flow_steps {int} -- number of transforms in sequence param_dim {int} -- dimensionality of x context_dim {int} -- dimensionality of y base_transform_kwargs {dict} -- hyperparameters for NSF step Returns: Transform -- the constructed transform """ transform = transforms.CompositeTransform([ transforms.CompositeTransform([ create_linear_transform(param_dim), create_base_transform(i, param_dim, context_dim=context_dim, **base_transform_kwargs) ]) for i in range(num_flow_steps) ] + [ create_linear_transform(param_dim) ]) return transform
d4d556163af777f50aed2f4d86b1ae9c1de81047
22,782
import tarfile import sys def filterUniques(tar, to_filter, score, ns): """ Filters unique psms/peptides/proteins from (multiple) Percolator output XML files. Takes a tarred set of XML files, a filtering query (e.g. psms), a score to filter on and a namespace. Outputs an ElementTree. """ for tf in to_filter: assert tf in ['psms', 'peptides', 'proteins'], Exception('filterUnique function needs a specified to_filter list of psms, peptides, proteins.') assert score in ['q','pep','p'], Exception('filterUnique function needs a specified score to filter on of q, pep or p.') try: with tarfile.open(tar, 'r') as f: members = f.getmembers() f.extractall() except: sys.stderr.write('Could not extract Percolator files from dataset: %s \n' % tar) return 1 docs = [] for fn in members: docs.append(etree.parse(fn.name)) # lookup dicts scores = {'q':'q_value', 'pep':'pep', 'p':'p_value'} filt_el_dict = {'psms':'xmlns:peptide_seq', 'peptides':'@xmlns:peptide_id' } # result dict filtered = {'psms':{}, 'peptides':{}, 'proteins':{} } for doc in docs: for filt_el in to_filter: feattree = doc.xpath('//xmlns:%s' % filt_el, namespaces=ns) if feattree == []: sys.stdout.write('%s not found in (one of the) Percolator output documents. Continuing...\n' % filt_el) continue for feat in feattree[0]: # It's actually faster to loop through the feat's children, # but this is 2-line code and still readable. featscore = float(feat.xpath('xmlns:%s' % scores[score], namespaces=ns)[0].text) seq = feat.xpath('%s' % filt_el_dict[filt_el], namespaces=ns) try: # psm seqs are parsed here seq = seq[0].attrib['seq'] except Exception: ## caught when parsing peptide seqs (different format) seq = str(seq[0]) if seq not in filtered[filt_el]: filtered[filt_el][seq] = feat elif featscore < filtered[filt_el][seq]: #FIXME now it only works for LOWER than scores (eg q-vals, pep, but not for scores that are better when higher) filtered[filt_el][seq] = feat # make trees from filtered dicts for filt_el in filtered: outlist = [] for feat in filtered[filt_el].values(): outlist.append(feat) filtered[filt_el] = outlist # node = etree.Element(filt_el) # node.extend(filtered[filt_el].values()) # filtered[filt_el] = node outdoc = refillTree(docs[0], ['psms', 'peptides', 'proteins'], filtered, ns) return outdoc
0fdfb1084d803afab2b13241f5c0b75726fd10e0
22,783
def for_in_pyiter(it): """ >>> for_in_pyiter(Iterable(5)) [0, 1, 2, 3, 4] """ l = [] for item in it: l.append(item) return l
7d5c44ce771ea9847d57749235a31f200a01b67f
22,784
def train_test_split_with_none(X, y=None, sample_weight=None, random_state=0): """ Splits into train and test data even if they are None. @param X X @param y y @param sample_weight sample weight @param random_state random state @return similar to :epkg:`scikit-learn:model_selection:train_test_split`. """ not_none = [_ for _ in [X, y, sample_weight] if _ is not None] res = train_test_split(*not_none) inc = len(not_none) trains = [] tests = [] for i in range(inc): trains.append(res[i * 2]) tests.append(res[i * 2 + 1]) while len(trains) < 3: trains.append(None) tests.append(None) X_train, y_train, w_train = trains X_test, y_test, w_test = tests return X_train, y_train, w_train, X_test, y_test, w_test
8a789d6001a56096eba556301e130c57edd8cf87
22,785
def measure_time(func, repeat=1000): """ Repeatedly executes a function and records lowest time. """ def wrapper(*args, **kwargs): min_time = 1000 for _ in range(repeat): start = timer() result = func(*args, **kwargs) curr_time = timer() - start if curr_time < min_time: min_time = curr_time return [min_time, result] return wrapper
0515eca9cfa96a7395b3461bd3302a9780d05366
22,786
def initialise_players(frame_data, params): """ initialise_players(team,teamname,params) create a list of player objects that holds their positions and velocities from the tracking data dataframe Parameters ----------- team: row (i.e. instant) of either the home or away team tracking Dataframe teamname: team name "Home" or "Away" params: Dictionary of model parameters (default model parameters can be generated using default_model_params() ) Returns ----------- team_players: list of player objects for the team at at given instant """ # get player ids player_ids = np.unique([x.split("_")[0] for x in frame_data.keys()]) # create list team_players = [] for p in player_ids: # create a player object for player_id 'p' team_player = player(p, frame_data, params) if team_player.inframe: team_players.append(team_player) return team_players
4126ba5cf1cdcd61017692260026dbdd03523874
22,787
import gc def read_edgelist(f, directed=True, sep=r"\s+", header=None, keep_default_na=False, **readcsvkwargs): """ Creates a csrgraph from an edgelist. The edgelist should be in the form [source destination] or [source destination edge_weight] The first column needs to be the source, the second the destination. If there is a third column it's assumed to be edge weights. Otherwise, all arguments from pandas.read_csv can be used to read the file. f : str Filename to read directed : bool Whether the graph is directed or undirected. All csrgraphs are directed, undirected graphs simply add "return edges" sep : str CSV-style separator. Eg. Use "," if comma separated header : int or None pandas read_csv parameter. Use if column names are present keep_default_na: bool pandas read_csv argument to prevent casting any value to NaN read_csv_kwargs : keyword arguments for pd.read_csv Pass these kwargs as you would normally to pd.read_csv. Returns : csrgraph """ # Read in csv correctly to each column elist = pd.read_csv( f, sep=sep, header=header, keep_default_na=keep_default_na, **readcsvkwargs ) if len(elist.columns) == 2: elist.columns = ['src', 'dst'] elist['weight'] = np.ones(elist.shape[0]) elif len(elist.columns) == 3: elist.columns = ['src', 'dst', 'weight'] else: raise ValueError(f""" Invalid columns: {elist.columns} Expected 2 (source, destination) or 3 (source, destination, weight) Read File: \n{elist.head(5)} """) # Create name mapping to normalize node IDs # Somehow this is 1.5x faster than np.union1d. Shame on numpy. allnodes = list( set(elist.src.unique()) .union(set(elist.dst.unique()))) # Factor all nodes to unique IDs names = ( pd.Series(allnodes).astype('category') .cat.categories ) nnodes = names.shape[0] # Get the input data type if nnodes > UINT16_MAX: dtype = np.uint32 if nnodes > UINT32_MAX: dtype = np.uint64 else: dtype = np.uint16 name_dict = dict(zip(names, np.arange(names.shape[0], dtype=dtype))) elist.src = elist.src.map(name_dict).astype(dtype) elist.dst = elist.dst.map(name_dict).astype(dtype) # clean up temp data allnodes = None name_dict = None gc.collect() # If undirected graph, append edgelist to reversed self if not directed: other_df = elist.copy() other_df.columns = ['dst', 'src', 'weight'] elist = pd.concat([elist, other_df]) other_df = None gc.collect() # Need to sort by src for _edgelist_to_graph elist = elist.sort_values(by='src') # extract numpy arrays and clear memory src = elist.src.to_numpy() dst = elist.dst.to_numpy() weight = elist.weight.to_numpy() elist = None gc.collect() G = methods._edgelist_to_graph( src, dst, weight, nnodes, nodenames=names ) return G
dd4110700857c3deb86c53a176ab93b0366cd900
22,788
def is_comment(txt_row): """ Tries to determine if the current line of text is a comment line. Args: txt_row (string): text line to check. Returns: True when the text line is considered a comment line, False if not. """ if (len(txt_row) < 1): return True if ((txt_row[0] == '(') and (txt_row[len(txt_row) - 1] == ')')): return True else: return False
db54b90053244b17ec209ed1edb1905b62151165
22,789
import json def updateBillingPlanPaymentDefinition(pk, paypal_payment_definition): """Update an existing payment definition of a billing plan :param pk: the primary key of the payment definition (associated with a billing plan) :type pk: integer :param paypal_payment_definition: Paypal billing plan payment definition :type paypal_payment_definition: object :returns: True for successful update or False in any other case :rtype: bool """ try: try: frequency_interval = paypal_payment_definition['frequency_interval'] except: frequency_interval = None try: cycles = paypal_payment_definition['cycles'] except: cycles = None try: charge_models = paypal_payment_definition['charge_models'] except: charge_models = dict() try: amount_value = paypal_payment_definition['amount']['value'] except: amount_value = None try: amount_currency = paypal_payment_definition['amount']['currency'] except: amount_currency = None BillingPlanPaymentDefinition.objects.filter(pk=pk).update( name=paypal_payment_definition['name'], type=paypal_payment_definition['type'], frequency=paypal_payment_definition['frequency'], frequency_interval=frequency_interval, cycles=cycles, charge_models=json.dumps(utilities.object2dict(charge_models, False)), amount_value=amount_value, amount_currency=amount_currency, json=json.dumps(utilities.object2dict(paypal_payment_definition, False)) ) return True except Exception as ex: log.error("Error in billing plan's payment definition modification (pk:=%d): %s" % (pk, str(ex)) ) return False
b4bf58088c8e501ccf380dda98587467a8683ff9
22,790
from typing import List def format_float_list(array: List[float], precision: int = 4) -> List[str]: """ Formats a list of float values to a specific precision. :param array: A list of float values to format. :param precision: The number of decimal places to use. :return: A list of strings containing the formatted floats. """ return [format_float(f, precision) for f in array]
b790379327acc5ebdf54f99621a06edd6228941d
22,791
import html def counts_card() -> html.Div: """Return the div that contains the overall count of patients/studies/images.""" return html.Div( className="row", children=[ html.Div( className="four columns", children=[ html.Div( className="card gold-left-border", children=html.Div( className="container", children=[ html.H4(id="patient-count", children=""), html.P(children="patients"), ], ), ) ], ), html.Div( className="four columns", children=[ html.Div( className="card green-left-border", children=html.Div( className="container", children=[ html.H4(id="study-count", children=""), html.P(children="studies"), ], ), ) ], ), html.Div( className="four columns", children=[ html.Div( className="card purple-left-border", children=html.Div( className="container", children=[ html.H4(id="image-count", children=""), html.P(children="images"), ], ), ) ], ), ], )
f80ba28b7ef1b2407d6a8b3e8eaccf26c734566a
22,792
import logging def validate_est(est: EstData, include_elster_responses: bool = False): """ Data for a Est is validated using ERiC. If the validation is successful then this should return a 200 HTTP response with {'success': bool, 'est': est}. Otherwise this should return a 400 response if the validation failed with {‘code’ : int,‘message’: str,‘description’: str,‘‘validation_problems’ : [{‘code’: int, ‘message’: str}]} or a 400 response for other client errors and a 500 response for server errors with {‘code’ : int, ‘message’: str, ‘description’: str} :param est: the JSON input data for the ESt :param include_elster_responses: query parameter which indicates whether the ERiC/Server response are returned """ try: request = EstValidationRequestController(est, include_elster_responses) result = request.process() if "transferticket" in result: result["transfer_ticket"] = result.pop("transferticket") return result except EricProcessNotSuccessful as e: logging.getLogger().info("Could not validate est", exc_info=True) raise HTTPException(status_code=422, detail=e.generate_error_response(include_elster_responses))
2565572efd9b1ee52fabb98473b7934e13b691ca
22,793
from typing import Callable from typing import List async def list_solver_releases( solver_key: SolverKeyId, user_id: int = Depends(get_current_user_id), catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)), url_for: Callable = Depends(get_reverse_url_mapper), ): """ Lists all releases of a given solver """ releases: List[Solver] = await catalog_client.list_solver_releases( user_id, solver_key ) for solver in releases: solver.url = url_for( "get_solver_release", solver_key=solver.id, version=solver.version ) return sorted(releases, key=attrgetter("pep404_version"))
0461e6ba72c01e789af8571d75b7da21d2f17801
22,794
def read_silicon_data(tool, target: Target): """ Reads silicon data from device :param tool: Programming/debugging tool used for communication :param target: The target object. :return: Device response """ logger.debug('Read silicon data') tool.reset(ResetType.HW) passed, response = provision_keys_and_policies(tool, None, target.register_map) return response
3b94234218f0a5573438851ec97d49856356fc7b
22,795
import copy def offset_perimeter(geometry, offset, side='left', plot_offset=False): """Offsets the perimeter of a geometry of a :class:`~sectionproperties.pre.sections.Geometry` object by a certain distance. Note that the perimeter facet list must be entered in a consecutive order. :param geometry: Cross-section geometry object :type geometry: :class:`~sectionproperties.pre.sections.Geometry` :param float offset: Offset distance for the perimeter :param string side: Side of the perimeter offset, either 'left' or 'right'. E.g. 'left' for a counter-clockwise offsets the perimeter inwards. :param bool plot_offset: If set to True, generates a plot comparing the old and new geometry The following example 'corrodes' a 200UB25 I-section by 1.5 mm and compares a few of the section properties:: import sectionproperties.pre.sections as sections from sectionproperties.pre.offset import offset_perimeter from sectionproperties.analysis.cross_section import CrossSection # calculate original section properties original_geometry = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=16) original_mesh = original_geometry.create_mesh(mesh_sizes=[3.0]) original_section = CrossSection(original_geometry, original_mesh) original_section.calculate_geometric_properties() original_area = original_section.get_area() (original_ixx, _, _) = original_section.get_ic() # calculate corroded section properties corroded_geometry = offset_perimeter(original_geometry, 1.5, plot_offset=True) corroded_mesh = corroded_geometry.create_mesh(mesh_sizes=[3.0]) corroded_section = CrossSection(corroded_geometry, corroded_mesh) corroded_section.calculate_geometric_properties() corroded_area = corroded_section.get_area() (corroded_ixx, _, _) = corroded_section.get_ic() # compare section properties print("Area reduction = {0:.2f}%".format( 100 * (original_area - corroded_area) / original_area)) print("Ixx reduction = {0:.2f}%".format( 100 *(original_ixx - corroded_ixx) / original_ixx)) The following plot is generated by the above example: .. figure:: ../images/offset_example.png :align: center :scale: 75 % 200UB25 with 1.5 mm corrosion. The following is printed to the terminal: .. code-block:: text Area reduction = 41.97% Ixx reduction = 39.20% """ # initialise perimeter points list perimeter_points = [] # add perimeter points to the list for facet_idx in geometry.perimeter: # get the facet facet = geometry.facets[facet_idx] # get the first point on the facet point = geometry.points[facet[0]] # add the (x,y) tuple to the list perimeter_points.append((point[0], point[1])) # create LinearRing object perimeter = LinearRing(perimeter_points) # offset perimeter new_perimeter = perimeter.parallel_offset( distance=offset, side=side, resolution=0, join_style=2 ) (new_xcoords, new_ycoords) = new_perimeter.xy # create deep copy of original geometry object new_geometry = copy.deepcopy(geometry) # replace offset points in new geometry for (i, facet_idx) in enumerate(new_geometry.perimeter): # get the facet facet = new_geometry.facets[facet_idx] # get the first point on the facet point = new_geometry.points[facet[0]] # replace the point location with the offset location point[0] = new_xcoords[i] point[1] = new_ycoords[i] if plot_offset: (fig, ax) = plt.subplots() # plot new geometry for (i, f) in enumerate(new_geometry.facets): if i == 0: ax.plot([new_geometry.points[f[0]][0], new_geometry.points[f[1]][0]], [new_geometry.points[f[0]][1], new_geometry.points[f[1]][1]], 'ko-', markersize=2, label='Offset Geometry') else: ax.plot([new_geometry.points[f[0]][0], new_geometry.points[f[1]][0]], [new_geometry.points[f[0]][1], new_geometry.points[f[1]][1]], 'ko-', markersize=2) # plot the original perimeter for (i, facet_idx) in enumerate(geometry.perimeter): f = geometry.facets[facet_idx] if i == 0: ax.plot([geometry.points[f[0]][0], geometry.points[f[1]][0]], [geometry.points[f[0]][1], geometry.points[f[1]][1]], 'r--', markersize=2, label='Original Perimeter') else: ax.plot([geometry.points[f[0]][0], geometry.points[f[1]][0]], [geometry.points[f[0]][1], geometry.points[f[1]][1]], 'r--', markersize=2) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.set_title('Offset Geometry') ax.set_aspect('equal', anchor='C') plt.tight_layout() plt.show() return new_geometry
e8b2851ea5ffd17faeb62bd3ca094e4cb6dd162a
22,796
def WalterComposition(F,P): """ Calculates the melt composition generated as a function of F and P, using the parameterisation of Duncan et al. (2017). Parameters ----- F: float Melt fraction P: float Pressure in GPa Returns ----- MeltComposition: series Major elements in wt% T: float Temperatures associated with the melt in C """ if isinstance(F,float): F = np.array([F]) P = np.array([P]) if isinstance(F,list): F = np.array(F) P = np.array(P) comp = pd.DataFrame(np.array([P,F]).T,columns=['P','X']) F = F*100 if F[F.argmin()] == 0: F[F.argmin()] = F[F.argmin()+1] comp['SiO2'] = ((-2.137e-5*P-9.83e-4)*F**2 + (5.975e-3*P+8.513e-2)*F +(-4.236e-1*P+4.638e1)) comp['Al2O3'] = ((-1.908e-4*P-1.366e-4)*F**2+(4.589e-2*P-1.525e-1)*F +(-2.685*P+2.087e1)) comp['FeO'] = ((2.365e-4*P-8.492e-4)*F**2+(-3.723e-2*P+1.1e-1)*F +(1.366*P+5.488)) comp['MgO'] = ((-8.068e-5*P+1.747e-3)*F**2+(-1.268e-2*P+9.761e-2)*F +(2.12*P+9.914)) comp['CaO'] = ((4.305e-5*P-4.513e-4)*F**2+(1.104e-3*P-4.948e-2)*F +(-5.564e-1*P+1.294e1)) comp['TiO2'] = 12.370*F**-0.917 comp['Na2O'] = 5.339*F**-0.654 comp['K2O'] = 6.612*F**-0.923 limTiO2 = 12.370*3**-0.917 limNa2O = 5.339*3**-0.654 limK2O = 6.612*3**-0.923 comp.TiO2[comp.TiO2>limTiO2] = limTiO2 comp.Na2O[comp.Na2O>limNa2O] = limNa2O comp.K2O[comp.K2O>limK2O] = limK2O comp['Cr2O3'] = -7.86e-5*F**2 + 9.705e-3*F + 2.201e-1 comp['MnO'] = -6.757e-6*F**2 + -2.04e-5*F + 2.014e-1 T = ((8.597e-3*P-1.963e-2)*F**2 + (-1.374*P+7.383)*F + 9.369e1*P + 1.177e3) return comp, T
54b2b5d6e6f4500da1c17e54bcb9b8804a65e9b0
22,797
from pathlib import Path def fetch_study_metadata( data_dir: Path, version: int = 7, verbose: int = 1 ) -> pd.DataFrame: """ Download if needed the `metadata.tsv.gz` file from Neurosynth and load it into a pandas DataFrame. The metadata table contains the metadata for each study. Each study (ID) is stored on its own line. These IDs are in the same order as the id column of the associated `coordinates.tsv.gz` file, but the rows will differ because the coordinates file will contain multiple rows per study. They are also in the same order as the rows in the `features.npz` files for the same version. The metadata will therefore have N rows, N being the number of studies in the Neurosynth dataset. The columns (for version 7) are: - id - doi - space - title - authors - year - journal Parameters ---------- data_dir : Path the path for the directory where downloaded data should be saved. version : int, optional the neurosynth data version, by default 7 verbose : int, optional verbose param for nilearn's `_fetch_files`, by default 1 Returns ------- pd.DataFrame the study metadata dataframe """ metadata_filename = f"data-neurosynth_version-{version}_metadata.tsv.gz" metadata_file = _fetch_files( data_dir, [ ( metadata_filename, NS_DATA_URL + metadata_filename, {}, ), ], verbose=verbose, )[0] metadata = pd.read_table(metadata_file) return metadata
e04a0dd631f8b8a53708118134b8d5039e83bcdf
22,798
from typing import Union def proveFormula(formula: str) -> Union[int, str]: """ Implements proveFormula according to grader.py >>> proveFormula('p') 1 >>> proveFormula('(NOT (NOT (NOT (NOT not)) )\t)') 1 >>> proveFormula('(NOT (NOT (NOT (NOT not)) )') 'E' >>> proveFormula('(IF p p)') 'T' >>> proveFormula('(AND p (NOT p))') 'U' >>> proveFormula('(OR p (NOT q))') 3 """ ast = parse(formula) if ast is None: return 'E' result = determine_satisfiability(ast) if result is True: return 'T' if result is False: return 'U' return result
4c078bdfa586b9807b6265db43ee7187e6aef349
22,799