content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_service(hass, config, discovery_info=None): """Get the ClickSend notification service.""" if not _authenticate(config): _LOGGER.error("You are not authorized to access ClickSend") return None return ClicksendNotificationService(config)
4b3dd52d7ebcb37012bc847288d0ea38c4ca91f6
23,800
from faker import Faker def mock_features_dtypes(num_rows=100): """Internal function that returns the default full dataset. :param num_rows: The number of observations in the final dataset. Defaults to 100. :type num_rows: int, optional :return: The dataset with all columns included. :rtype tuple: (str, str) """ fake = Faker() def _remove_x_from_number(phone): if "x" in phone: phone = phone[: phone.find("x")] return phone phone_numbers = pd.Series([fake.phone_number() for _ in range(num_rows)]) phone_numbers = phone_numbers.apply(_remove_x_from_number) def _remove_newline_from_address(address): address = address.replace("\n", ", ") return address addresses = pd.Series([fake.address() for _ in range(num_rows)]) addresses = addresses.apply(_remove_newline_from_address) dtypes_dict = { "ints": [i for i in range(-num_rows // 2, num_rows // 2)], "rand_ints": np.random.choice([i for i in range(-5, 5)], num_rows), "floats": [float(i) for i in range(-num_rows // 2, num_rows // 2)], "rand_floats": np.random.uniform(low=-5.0, high=5.0, size=num_rows), "booleans": np.random.choice([True, False], num_rows), "categoricals": np.random.choice( ["First", "Second", "Third", "Fourth"], num_rows ), "dates": pd.date_range("1/1/2001", periods=num_rows), "texts": [ f"My children are miserable failures, all {i} of them!" for i in range(num_rows) ], "ints_nullable": np.random.choice( [i for i in range(-10 // 2, 10 // 2)] + [pd.NA], num_rows ), "floats_nullable": np.random.choice( np.append([float(i) for i in range(-5, 5)], pd.NA), num_rows ), "booleans_nullable": np.random.choice([True, False, None], num_rows), "full_names": pd.Series([fake.name() for _ in range(num_rows)]), "phone_numbers": phone_numbers, "addresses": addresses, "countries": pd.Series([fake.country() for _ in range(num_rows)]), "email_addresses": pd.Series( [fake.ascii_free_email() for _ in range(num_rows)] ), "urls": pd.Series([fake.url() for _ in range(num_rows)]), "currencies": pd.Series([fake.pricetag() for _ in range(num_rows)]), "file_paths": pd.Series([fake.file_path(depth=3) for _ in range(num_rows)]), "ipv4": pd.Series([fake.ipv4() for _ in range(num_rows)]), "ipv6": pd.Series([fake.ipv6() for _ in range(num_rows)]), "lat_longs": pd.Series([fake.latlng() for _ in range(num_rows)]), } return dtypes_dict
c9d9bef26d908b2e47d4bc2e013f0c29e328b2b3
23,801
def random_bitstring(n, p, failcount=0): """ Constructs a random bitstring of length n with parity p Parameters ---------- n : int Number of bits. p : int Parity. failcount : int, optional Internal use only. Returns ------- numpy.ndarray """ bitstring = _np.random.randint(0, 2, size=n) if _np.mod(sum(bitstring), 2) == p: return bitstring elif failcount < 100: return _np.array(random_bitstring(n, p, failcount + 1), dtype='int')
07637061e50bc1fe853aeb2eef19505ee1a6b612
23,802
import json def serializer(message): """serializes the message as JSON""" return json.dumps(message).encode('utf-8')
7e8d9ae8e31653aad594a81e9f45170a915e291d
23,803
def send_mail(request, format=None): """ Send mail to admin """ # serialize request data serializer = MailSerializer(data=request.data) if serializer.is_valid(): try: # create data for mail subject = settings.EMAIL_SUBJECT.format( first_name=request.data["first_name"], last_name=request.data["last_name"], ) msg = request.data["message"] email_from = request.data["email_from"] # send mail EmailMessage(subject, msg, email_from, [settings.EMAIL_TO]).send() # save mail instance serializer.save( owner=request.user, email_to=settings.EMAIL_TO, host_ip=request.META["REMOTE_ADDR"], ) return Response(serializer.data, status=status.HTTP_201_CREATED) except Exception: pass return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
02ac2d0f7bf76fcea0afcff0a2a690cf72836e08
23,804
def correct_name(name): """ Ensures that the name of object used to create paths in file system do not contain characters that would be handled erroneously (e.g. \ or / that normally separate file directories). Parameters ---------- name : str Name of object (course, file, folder, etc.) to correct Returns ------- corrected_name Corrected name """ corrected_name = name.replace(" ", "_") corrected_name = corrected_name.replace("\\", "_") corrected_name = corrected_name.replace("/", "_") corrected_name = corrected_name.replace(":", "_") return corrected_name
b1df7a503324009a15f4f08e7641722d15a826b7
23,805
def runner(parallel, config): """Run functions, provided by string name, on multiple cores on the current machine. """ def run_parallel(fn_name, items): items = [x for x in items if x is not None] if len(items) == 0: return [] items = diagnostics.track_parallel(items, fn_name) logger.info("multiprocessing: %s" % fn_name) fn = get_fn(fn_name, parallel) if "wrapper" in parallel: wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources"])} items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items] return run_multicore(fn, items, config, parallel=parallel) return run_parallel
66ba2c5cd57d4d2738d9dd3d57ca9d5daca2ec8d
23,806
def hsv_to_hsl(hsv): """ HSV to HSL. https://en.wikipedia.org/wiki/HSL_and_HSV#Interconversion """ h, s, v = hsv s /= 100.0 v /= 100.0 l = v * (1.0 - s / 2.0) return [ HSV._constrain_hue(h), 0.0 if (l == 0.0 or l == 1.0) else ((v - l) / min(l, 1.0 - l)) * 100, l * 100 ]
4fee4508f6db770265ef46e928cfed6dee094892
23,807
import os def is_ci() -> bool: """Returns if current execution is running on CI Returns: `True` if current executions is on CI """ return os.getenv('CI', 'false') == 'true'
84e746a9adec77139c88c4042fd31b08f2c6098a
23,808
from .._mesh import Mesh def reindex_faces(mesh, ordering): """ Reorder the faces of the given mesh, returning a new mesh. Args: mesh (lacecore.Mesh): The mesh on which to operate. ordering (np.arraylike): An array specifying the order in which the original faces should be arranged. Returns: lacecore.Mesh: The reindexed mesh. """ vg.shape.check(locals(), "ordering", (mesh.num_f,)) unique_values = np.unique(ordering) if not np.array_equal(unique_values, np.arange(mesh.num_f)): raise ValueError( "Expected new face indices to be unique, and range from 0 to {}".format( mesh.num_f - 1 ) ) return Mesh( v=mesh.v, f=mesh.f[ordering], face_groups=None if mesh.face_groups is None else mesh.face_groups.reindexed(ordering), )
254cf3a036fa92253b105f3acd93dfe26d33a61c
23,809
import re def check_exact_match(line, expected_line): """ Uses regular expressions to find an exact (not partial) match for 'expected_line' in 'line', i.e. in the example below it matches 'foo' and succeeds: line value: '66118.999958 - INFO - [MainThread] - ly_test_tools.o3de.asset_processor - foo' expected_line: 'foo' :param line: The log line string to search, i.e. '9189.9998188 - INFO - [MainThread] - example.tests.test_system_example - Log Monitoring test 1' :param expected_line: The exact string to match when searching the line param, i.e. 'Log Monitoring test 1' :return: An exact match for the string if one is found, None otherwise. """ # Look for either start of line or whitespace, then the expected_line, then either end of the line or whitespace. # This way we don't partial match inside of a string. So for example, 'foo' matches 'foo bar' but not 'foobar' regex_pattern = re.compile("(^|\\s){}($|\\s)".format(re.escape(expected_line)), re.UNICODE) if regex_pattern.search(line) is not None: return expected_line return None
d01eaa13c40d66999e870d3b287ac869f64ae314
23,810
def rounding_filters(filters, w_multiplier): """ Calculate and round number of filters based on width multiplier. """ if not w_multiplier: return filters divisor = 8 filters *= w_multiplier new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor) if new_filters < 0.9 * filters: # prevent rounding by more than 10% new_filters += divisor return int(new_filters)
eb2938732792564fd324602fd74be41e6f88b265
23,811
def process(request, service, identifier): """ View that displays a detailed description for a WPS process. """ wps = get_wps_service_engine(service) wps_process = wps.describeprocess(identifier) context = {'process': wps_process, 'service': service, 'is_link': abstract_is_link(wps_process)} return render(request, 'tethys_wps/process.html', context)
0d9f5a0cdf7c15470547ff82ff3534cf6f624960
23,812
def update(pipeline_id, name, description): """Submits a request to CARROT's pipelines update mapping""" # Create parameter list params = [ ("name", name), ("description", description), ] return request_handler.update("pipelines", pipeline_id, params)
e808fb0fc313e8a5e51bb448d0aca68e389bfc30
23,813
from re import S def symmetric_poly(n, *gens, **args): """Generates symmetric polynomial of order `n`. """ gens = _analyze_gens(gens) if n < 0 or n > len(gens) or not gens: raise ValueError("can't generate symmetric polynomial of order %s for %s" % (n, gens)) elif not n: poly = S.One else: poly = Add(*[ Mul(*s) for s in subsets(gens, int(n)) ]) if not args.get('polys', False): return poly else: return Poly(poly, *gens)
51177adf8873628669c1b75267c3c65821920044
23,814
from typing import Any def load(name: str, *args, **kwargs) -> Any: """ Loads the unit specified by `name`, initialized with the given arguments and keyword arguments. """ entry = get_entry_point(name) return entry.assemble(*args, **kwargs)
e924dcebf082443fb9f36cd81302cb62ac53775d
23,815
from typing import List def get_gate_names_2qubit() -> List[str]: """Return the list of valid gate names of 2-qubit gates.""" names = [] names.append("cx") names.append("cz") names.append("swap") names.append("zx90") names.append("zz90") return names
d3d7f20263805a186d9142ec087039eb53076346
23,816
def positive_leading_quat(quat): """Returns the positive leading version of the quaternion. This function supports inputs with or without leading batch dimensions. Args: quat: A quaternion [w, i, j, k]. Returns: The equivalent quaternion [w, i, j, k] with w > 0. """ # Ensure quat is an np.array in case a tuple or a list is passed quat = np.asarray(quat) quat = np.where(np.tile(quat[..., 0:1] < 0, quat.shape[-1]), -quat, quat) return quat
e0dce2e8fce42a15abdeeccc4bbf63c9e9241cf1
23,817
def comp_avg_silh_metric(data_input, cluster_indices, silh_max_samples, silh_distance): """ Given a input data matrix and an array of cluster indices, returns the average silhouette metric for that clustering result (computed across all clusters). Parameters ---------- data_input : ndarray Data to be clustered (each row contains a n-dimensional data sample) cluster_indices : list List containing for each data point (each row in data input) its cluster id silh_max_samples: int Maximum number of samples to compute the silhouette metric (higher for more exact values at higher computing costs) silh_distance: string Metric to use when calculating distance between instances e.g. 'euclidean', 'manhattan', 'cosine' Returns ------- avg_silhouette : float Silhouette metric averaged across all clusters """ # Sample data for computing the silhouette metric input_data_x_sample = None cluster_indices_sample = None for curr_cluster_id in set(cluster_indices): list_occurrences = [ i for i, x in enumerate(cluster_indices) if x == curr_cluster_id ] if input_data_x_sample is None: input_data_x_sample = data_input[list_occurrences[0:silh_max_samples]] else: input_data_x_sample = np.vstack( ( input_data_x_sample, data_input[list_occurrences[0:silh_max_samples]], ) ) if cluster_indices_sample is None: cluster_indices_sample = np.array(cluster_indices)[ list_occurrences[0:silh_max_samples] ] else: cluster_indices_sample = np.hstack( ( cluster_indices_sample, np.array(cluster_indices)[list_occurrences[0:silh_max_samples]], ) ) # Compute mean silhouette for each class and the average across all classes try: silh_array = metrics.silhouette_samples( input_data_x_sample, np.asarray(cluster_indices_sample), metric=silh_distance, ) np_silh_samples = np.column_stack((cluster_indices_sample, silh_array.tolist())) df_silh_samples = pd.DataFrame( data=np_silh_samples[0:, 0:], columns=["cluster_id", "silhouette"] ) df_silh_mean_per_class = df_silh_samples.groupby( ["cluster_id"] ).mean() # .sort_values(by='cluster_id') df_silh_mean_per_class.reset_index(level=0, inplace=True) df_silh_mean_per_class.sort_values(by="cluster_id") avg_silhouette = df_silh_mean_per_class["silhouette"].mean() except ValueError: avg_silhouette = np.nan return avg_silhouette
0d2cb42b1f0b354f4776c9c2064d23ed4b8f0b75
23,818
def prepare_url(base_url, path, url_params=None): """Prepare url from path and params""" if url_params is None: url_params = {} url = '{0}{1}'.format(base_url, path) if not url.endswith('/'): url += '/' url_params_str = urlencode(url_params) if url_params_str: url += '?' + url_params_str return url
0a447d9f340a4ea9c99b98ca1e6f778f907d8a3d
23,819
def extract_at_interval(da: xr.DataArray, interval) -> xr.DataArray: """Reduce size of an Error Grid by selecting data at a fixed interval along both the number of high- and low-fidelity samples. """ return da.where( da.n_high.isin(da.n_high[slice(None, None, interval)]) * da.n_low.isin(da.n_low[slice(None, None, interval)]) )
cad3dce9850edbad9decadbc37e0372001b8ecc9
23,820
def compute_log_zT_var(log_rho_var, log_seebeck_sqr_var, log_kappa_var): """Compute the variance of the logarithmic thermoelectric figure of merit zT. """ return log_rho_var + log_seebeck_sqr_var + log_kappa_var
3528181796aeafb3df5eac09b06852afe028cb13
23,821
def main(global_config, **settings): """ Very basic pyramid app """ config = Configurator(settings=settings) config.include('pyramid_swagger') config.add_route( 'sample_nonstring', '/sample/nonstring/{int_arg}/{float_arg}/{boolean_arg}', ) config.add_route('standard', '/sample/{path_arg}/resource') config.add_route('get_with_non_string_query_args', '/get_with_non_string_query_args') config.add_route('post_with_primitive_body', '/post_with_primitive_body') config.add_route('post_with_form_params', '/post_with_form_params') config.add_route('post_with_file_upload', '/post_with_file_upload') config.add_route('sample_post', '/sample') config.include(include_samples, route_prefix='/sample') config.add_route('throw_400', '/throw_400') config.add_route('swagger_undefined', '/undefined/path') config.add_route('echo_date', '/echo_date') config.add_route('echo_date_json_renderer', '/echo_date_json_renderer') config.add_route('post_endpoint_with_optional_body', '/post_endpoint_with_optional_body') config.scan() return config.make_wsgi_app()
677187e63b6b885f5dc27850039a54b7510ed9cf
23,822
def get_name(): """MUST HAVE FUNCTION! Returns plugin name.""" return "ASP.NET MVC"
08a8b413ad1c86c270c79da245f0718aa22883a8
23,823
import os def tag_images( x_test, test_case_images, tag_list_file, image_source, clean=False ): """ Performs the tagging of abnormal images using RTEX@R :param test_case_ids: :param x_test: :param x_test: :param tag_list_file: :param clean: if True the prediction is performed in any case if False a dump file is loaded if it exists :return: a dict containing the abnormal image paths :param num: number of images that should be returned :param abnormal: whether to return abnormal cases or not """ rtex_t_model = load_model("data/models/rtex_t/iu_xray_tag_cxn.hdf5") dump_file_name = f"data/{image_source}_rtex_t_tags_pre_calc.txt" if not clean and os.path.isfile(dump_file_name): print("Using pre-stored RTEX@T results from dump file!") predicted_tags_file = open(dump_file_name, "r") test_tag_probs = np.loadtxt(predicted_tags_file) else: print("Performing RTEX@T predictions!") test_tag_probs = rtex_t_model.predict(x_test, batch_size=16, verbose=1) predicted_tags_file = open(dump_file_name, "w") np.savetxt(predicted_tags_file, test_tag_probs) tag_df = pd.read_csv(tag_list_file, header=None) tag_list = tag_df[0].to_list() best_threshold = 0.097 tagging_results = {} # for each exam, assign all tags above threshold for i in range(len(test_tag_probs)): predicted_tags = [] for j in range(len(tag_list)): if test_tag_probs[i, j] >= best_threshold: predicted_tags.append(tag_list[j]) tagging_results[list(test_case_images.keys())[i]] = ";".join(predicted_tags) results = tagging_results return results, tag_list, rtex_t_model
d8e3f0bc98c3b7fefb27b6d75cf650f0dddb3f47
23,824
def load_CIFAR(model_mode): """ Loads CIFAR-100 or CIFAR-10 dataset and maps it to Target Model and Shadow Model. :param model_mode: one of "TargetModel" and "ShadowModel". :param num_classes: one of 10 and 100 and the default value is 100 :return: Tuple of numpy arrays:'(x_train, y_train), (x_test, y_test), member'. :raise: ValueError: in case of invalid `model_mode`. """ if model_mode not in ['TargetModel', 'ShadowModel']: raise ValueError('model_mode must be one of TargetModel, ShadowModel.') (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar100.load_data(label_mode='fine') if model_mode == "TargetModel": (x_train, y_train), (x_test, y_test) = (x_train[40000:50000], y_train[40000:50000]), \ (x_test, y_test) elif model_mode == "ShadowModel": (x_train, y_train), (x_test, y_test) = (x_train[:10000], y_train[:10000]), \ (x_train[10000:20000], y_train[10000:20000]) y_train = tf.keras.utils.to_categorical(y_train, num_classes=100) m_train = np.ones(y_train.shape[0]) y_test = tf.keras.utils.to_categorical(y_test, num_classes=100) m_test = np.zeros(y_test.shape[0]) member = np.r_[m_train, m_test] return (x_train, y_train), (x_test, y_test), member
d47d5546c26b1a776b84acb407782837170da43d
23,825
def _jbackslashreplace_error_handler(err): """ Encoding error handler which replaces invalid characters with Java-compliant Unicode escape sequences. :param err: An `:exc:UnicodeEncodeError` instance. :return: See https://docs.python.org/2/library/codecs.html?highlight=codecs#codecs.register_error """ if not isinstance(err, UnicodeEncodeError): raise err return _escape_non_ascii(err.object[err.start:err.end]), err.end
2bec9e9563a7f4a4d206f630f7d8372fa7c56d89
23,826
import math import sys def calculate_spark_settings(instance_type, num_slaves, max_executor=192, memory_overhead_coefficient=0.15, num_partitions_factor=3): """ More info: http://c2fo.io/c2fo/spark/aws/emr/2016/07/06/apache-spark-config-cheatsheet/ """ all_instances = load_emr_instance() inst = all_instances[instance_type] executor_memory_upper_bound = 64 executor_core_upper_bound = 5 available_memory = inst["memory"] - 1 available_cpu = inst["cpu"] - 1 parallelism_per_core = 2 ret = [] for x in range(1, max_executor): total_memory_per_executor = math.floor(available_memory / x) unused_memory_per_node = available_memory - ( x * total_memory_per_executor) total_core_per_executor = available_cpu / x unused_cores_per_node = available_cpu - ( x * math.floor(total_core_per_executor)) overhead_mem = total_memory_per_executor * memory_overhead_coefficient mem_executer = total_memory_per_executor - overhead_mem cores_executer = math.floor(available_cpu / x) if (total_memory_per_executor < executor_memory_upper_bound and total_core_per_executor < executor_core_upper_bound): ret.append({ "executor_per_node": x, "overhead_mem": max(overhead_mem * 1024, 384), "unused_cores": unused_cores_per_node, "mem_executer": mem_executer, "cores_executer": cores_executer, "unused_mem": unused_memory_per_node, "sum": unused_cores_per_node + unused_memory_per_node }) val, idx = min( (val, idx) for (idx, val) in enumerate([x["sum"] for x in ret])) # plot(max_executor, ret) opt = ret[idx] executer_instances = (opt["executor_per_node"] * num_slaves) - 1 parallelism = ( executer_instances * opt["cores_executer"] * parallelism_per_core) num_partitions = int(opt["cores_executer"] * executer_instances * num_partitions_factor) print("Optimal numPartitions: %s " % num_partitions, file=sys.stderr) ret = { "spark.executor.instances": str(executer_instances), "spark.executor.memoryOverhead": "%sm" % int(opt["overhead_mem"]), "spark.executor.memory": "%sm" % int(opt["mem_executer"] * 1024), "spark.driver.memoryOverhead": "%sm" % int(opt["overhead_mem"]), "spark.driver.memory": "%sm" % int(opt["mem_executer"] * 1024), "spark.driver.maxResultSize": "%sm" % int(opt["mem_executer"] * 1024), "spark.executor.cores": str(int(opt["cores_executer"])), "spark.driver.cores": str(int(opt["cores_executer"])), "spark.default.parallelism": str(int(parallelism)), } return ret, opt
618c038c969ce3d414063d07b6b6e11c403306dc
23,827
from pathlib import Path def run_on_host(con_info, command): """ Runs a command on a target pool of host defined in a hosts.yaml file. """ # Paramiko client configuration paramiko.util.log_to_file(base + "prt_paramiko.log") UseGSSAPI = (paramiko.GSS_AUTH_AVAILABLE) DoGSSAPIKeyExchange = (paramiko.GSS_AUTH_AVAILABLE) try: client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.load_system_host_keys() if not UseGSSAPI and not DoGSSAPIKeyExchange: client.connect( con_info[1]['IP'], port=con_info[1]['PORT'], username=con_info[1]['USER'], key_filename=str(base + 'prt_rsa.key') ) else: client.connect( con_info[1]['IP'], port=con_info[1]['PORT'], username=con_info['USER'], key_filename=str(Path.home()) + 'prt_rsa.key', gss_auth=UseGSSAPI, gss_kex=DoGSSAPIKeyExchange, ) con_status = str('Connection Succeeded') stdin, stdout, stderr = client.exec_command(command) results_dict = { 'name': con_info[0], 'uname': con_info[1]['NAME'], 'status': con_status, 'stdout': [x.replace('\n', '') for x in stdout.readlines()], 'stderr': [x.replace('\n', '') for x in stderr.readlines()] } client.close() except Exception as error: con_status = str("Connection Failed : PRT Caught exception(%s: %s" % (error.__class__, error) + ')') results_dict = { 'name': con_info[0], 'uname': con_info[1]['NAME'], 'status': con_status, 'stdout': [], 'stderr': [] } try: client.close() except Exception: pass return results_dict
e3747daa1ea6e68ae4900bd596458bf756017c69
23,828
import colorsys import hashlib def uniqueColor(string): """ Returns a color from the string. Same strings will return same colors, different strings will return different colors ('randomly' different) Internal: string =md5(x)=> hex =x/maxhex=> float [0-1] =hsv_to_rgb(x,1,1)=> rgb =rgb_to_int=> int :param string: input string :return: int color """ return sum(round(c * 255) << d for c, d in zip(colorsys.hsv_to_rgb(int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16) / 2 ** 128, 1, 1), [16, 8, 0]))
0c895612c3bf2dd5f594a15daf6f2aa5d778eeb0
23,829
def _quoteattr(data, entities={}): """ Escape and quote an attribute value. Escape &, <, and > in a string of data, then quote it for use as an attribute value. The \" character will be escaped as well, if necessary. You can escape other strings of data by passing a dictionary as the optional entities parameter. The keys and values must all be strings; each key will be replaced with its corresponding value. """ entities['\n']='&#10;' entities['\r']='&#12;' data = _escape(data, entities) if '"' in data: if "'" in data: data = '"%s"' % data.replace('"', "&quot;") else: data = "'%s'" % data else: data = '"%s"' % data return data
1f03a09e19d349458ec48b6041159e48ef93d97e
23,830
import http import urllib def external_login_confirm_email_get(auth, uid, token): """ View for email confirmation links when user first login through external identity provider. HTTP Method: GET When users click the confirm link, they are expected not to be logged in. If not, they will be logged out first and redirected back to this view. After OSF verifies the link and performs all actions, they will be automatically logged in through CAS and redirected back to this view again being authenticated. :param auth: the auth context :param uid: the user's primary key :param token: the verification token """ user = User.load(uid) if not user: raise HTTPError(http.BAD_REQUEST) destination = request.args.get('destination') if not destination: raise HTTPError(http.BAD_REQUEST) # if user is already logged in if auth and auth.user: # if it is a wrong user if auth.user._id != user._id: return auth_logout(redirect_url=request.url) # if it is the expected user new = request.args.get('new', None) if destination in campaigns.get_campaigns(): # external domain takes priority campaign_url = campaigns.external_campaign_url_for(destination) if not campaign_url: campaign_url = campaigns.campaign_url_for(destination) return redirect(campaign_url) if new: status.push_status_message(language.WELCOME_MESSAGE, kind='default', jumbotron=True, trust=True) return redirect(web_url_for('dashboard')) # token is invalid if token not in user.email_verifications: raise HTTPError(http.BAD_REQUEST) verification = user.email_verifications[token] email = verification['email'] provider = verification['external_identity'].keys()[0] provider_id = verification['external_identity'][provider].keys()[0] # wrong provider if provider not in user.external_identity: raise HTTPError(http.BAD_REQUEST) external_status = user.external_identity[provider][provider_id] try: ensure_external_identity_uniqueness(provider, provider_id, user) except ValidationError as e: raise HTTPError(http.FORBIDDEN, e.message) if not user.is_registered: user.register(email) if email.lower() not in user.emails: user.emails.append(email.lower()) user.date_last_logged_in = timezone.now() user.external_identity[provider][provider_id] = 'VERIFIED' user.social[provider.lower()] = provider_id del user.email_verifications[token] user.verification_key = generate_verification_key() user.save() service_url = request.url if external_status == 'CREATE': mails.send_mail( to_addr=user.username, mail=mails.WELCOME, mimetype='html', user=user ) service_url += '&{}'.format(urllib.urlencode({'new': 'true'})) elif external_status == 'LINK': mails.send_mail( user=user, to_addr=user.username, mail=mails.EXTERNAL_LOGIN_LINK_SUCCESS, external_id_provider=provider, ) # redirect to CAS and authenticate the user with the verification key return redirect(cas.get_login_url( service_url, username=user.username, verification_key=user.verification_key ))
18a92d289e63224b245e4e958efd6d5924495ce1
23,831
def date_since_epoch(date, unit='day'): """ Get the date for the specified date in unit :param date: the date in the specified unit :type date: int :param unit: one of 'year', 'month' 'week', 'day', 'hour', 'minute', or 'second' :return: the corresponding date :rtype: ee.Date """ epoch = ee.Date(EE_EPOCH.isoformat()) return epoch.advance(date, unit)
f787170869ba081a2d321d0198d27948dc44ffa6
23,832
def eval_assoc(param_list, meta): """ Evaluate the assoication score between a given text and a list of categories or statements. Param 1 - string, the text in question Param 2 - list of strings, the list of categories to associate Param 1 to """ data = { 'op': 'eval_assoc', 'text': param_list[0], 'cats': param_list[1] } return BART_API.post(data)['sorted_associations']
f49d5080d0b5f6a526be11b487bbbf17782d7197
23,833
def quiver3d(*args, **kwargs): """Wraps `mayavi.mlab.quiver3d` Args: *args: passed to `mayavi.mlab.quiver3d` **kwargs: Other Arguments are popped, then kwargs is passed to `mayavi.mlab.quiver3d` Keyword Arguments: cmap (str, None, False): see :py:func:`apply_cmap` alpha (number, sequence): see :py:func:`apply_cmap` clim (sequence): see :py:func:`apply_cmap` symmetric (bool): see :py:func:`apply_cmap` logscale (bool): see :py:func:`apply_cmap` Returns: TYPE: Description """ kwargs, cmap_kwargs = _extract_cmap_kwargs(kwargs) quivers = mlab.quiver3d(*args, **kwargs) apply_cmap(quivers, mode='scalar', **cmap_kwargs) apply_cmap(quivers, mode='vector', **cmap_kwargs) return quivers
8cfd494f0b801490372d94f7ab842c0b5cd19099
23,834
def get_instance_types(self): """ Documentation: --- Description: Generate SSH pub """ instance_types = sorted([instance_type["InstanceType"] for instance_type in self.ec2_client.describe_instance_types()["InstanceTypes"]]) return instance_types
583311de8b2f23a967e40c8be5d140f6ab28244c
23,835
def ret_digraph_points(sed, digraph): """Finds the digraph points of the subject extracted data. Parameters ---------- `sed` (object) "_subject","_track_code", "data": [{"digraph","points"}] Returns --------- (list) The points of the particular digraph found """ ret = [d['points'] for d in sed['data'] if d['digraph'] == digraph] if ret == []: pynocomm.send_to_node( '**Warning: No digraph points found in ret_digraph_points, digraph:' + digraph) _foo = 1 else: ret = ret[0] return ret
853dac3afdb542dbc2878340c8d75b8b4544c531
23,836
def _sizeof_fmt(num): """Format byte size to human-readable format. https://web.archive.org/web/20111010015624/http://blogmag.net/blog/read/38/Print_human_readable_file_size Args: num (float): Number of bytes """ for x in ["bytes", "KB", "MB", "GB", "TB", "PB"]: if num < 1024.0: return f"{num:3.1f} {x}" num /= 1024.0
97c700954248a455592b3da9b274bfda69a7370f
23,837
from .. import sim from typing import Dict def gatherData(gatherLFP = True): """ Function for/to <short description of `netpyne.sim.gather.gatherData`> Parameters ---------- gatherLFP : bool <Short description of gatherLFP> **Default:** ``True`` **Options:** ``<option>`` <description of option> """ sim.timing('start', 'gatherTime') ## Pack data from all hosts if sim.rank==0: print('\nGathering data...') # flag to avoid saving sections data for each cell (saves gather time and space; cannot inspect cell secs or re-simulate) if not sim.cfg.saveCellSecs: for cell in sim.net.cells: cell.secs = None cell.secLists = None # flag to avoid saving conns data for each cell (saves gather time and space; cannot inspect cell conns or re-simulate) if not sim.cfg.saveCellConns: for cell in sim.net.cells: cell.conns = [] # Store conns in a compact list format instead of a long dict format (cfg.compactConnFormat contains list of keys to include) elif sim.cfg.compactConnFormat: sim.compactConnFormat() # remove data structures used to calculate LFP if gatherLFP and sim.cfg.recordLFP and hasattr(sim.net, 'compartCells') and sim.cfg.createNEURONObj: for cell in sim.net.compartCells: try: del cell.imembVec del cell.imembPtr del cell._segCoords except: pass for pop in list(sim.net.pops.values()): try: del pop._morphSegCoords except: pass simDataVecs = ['spkt', 'spkid', 'stims', 'dipole'] + list(sim.cfg.recordTraces.keys()) if sim.cfg.recordDipoles: _aggregateDipoles() simDataVecs.append('dipole') singleNodeVecs = ['t'] if sim.nhosts > 1: # only gather if >1 nodes netPopsCellGids = {popLabel: list(pop.cellGids) for popLabel,pop in sim.net.pops.items()} # gather only sim data if getattr(sim.cfg, 'gatherOnlySimData', False): nodeData = {'simData': sim.simData} data = [None]*sim.nhosts data[0] = {} for k,v in nodeData.items(): data[0][k] = v gather = sim.pc.py_alltoall(data) sim.pc.barrier() if sim.rank == 0: # simData print(' Gathering only sim data...') sim.allSimData = Dict() for k in list(gather[0]['simData'].keys()): # initialize all keys of allSimData dict if gatherLFP and k == 'LFP': sim.allSimData[k] = np.zeros((gather[0]['simData']['LFP'].shape)) elif sim.cfg.recordDipoles and k == 'dipole': for dk in sim.cfg.recordDipoles: sim.allSimData[k][dk] = np.zeros(len(gather[0]['simData']['dipole'][dk])) else: sim.allSimData[k] = {} for key in singleNodeVecs: # store single node vectors (eg. 't') sim.allSimData[key] = list(nodeData['simData'][key]) # fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic) for node in gather: # concatenate data from each node for key,val in node['simData'].items(): # update simData dics of dics of h.Vector if key in simDataVecs: # simData dicts that contain Vectors if isinstance(val, dict): for key2,val2 in val.items(): if isinstance(val2,dict): sim.allSimData[key].update(Dict({key2:Dict()})) for stim,val3 in val2.items(): sim.allSimData[key][key2].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector) #elif key == 'dipole': # sim.allSimData[key][key2] = np.add(sim.allSimData[key][key2],val2.as_numpy()) # add together dipole values from each node else: sim.allSimData[key].update({key2:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector) else: sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors elif gatherLFP and key == 'LFP': sim.allSimData[key] += np.array(val) elif key not in singleNodeVecs: sim.allSimData[key].update(val) # update simData dicts which are not Vectors if len(sim.allSimData['spkt']) > 0: sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid']) sim.net.allPops = ODict() # pops for popLabel,pop in sim.net.pops.items(): sim.net.allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict sim.net.allCells = [c.__dict__ for c in sim.net.cells] # gather cells, pops and sim data else: nodeData = {'netCells': [c.__getstate__() for c in sim.net.cells], 'netPopsCellGids': netPopsCellGids, 'simData': sim.simData} data = [None]*sim.nhosts data[0] = {} for k,v in nodeData.items(): data[0][k] = v #print data gather = sim.pc.py_alltoall(data) sim.pc.barrier() if sim.rank == 0: allCells = [] allPops = ODict() for popLabel,pop in sim.net.pops.items(): allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict allPopsCellGids = {popLabel: [] for popLabel in netPopsCellGids} sim.allSimData = Dict() for k in list(gather[0]['simData'].keys()): # initialize all keys of allSimData dict if gatherLFP and k == 'LFP': sim.allSimData[k] = np.zeros((gather[0]['simData']['LFP'].shape)) elif sim.cfg.recordDipoles and k == 'dipole': for dk in sim.cfg.recordDipoles: sim.allSimData[k][dk] = np.zeros(len(gather[0]['simData']['dipole'][dk])) else: sim.allSimData[k] = {} for key in singleNodeVecs: # store single node vectors (eg. 't') sim.allSimData[key] = list(nodeData['simData'][key]) # fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic) for node in gather: # concatenate data from each node allCells.extend(node['netCells']) # extend allCells list for popLabel,popCellGids in node['netPopsCellGids'].items(): allPopsCellGids[popLabel].extend(popCellGids) for key,val in node['simData'].items(): # update simData dics of dics of h.Vector if key in simDataVecs: # simData dicts that contain Vectors if isinstance(val,dict): for key2,val2 in val.items(): if isinstance(val2,dict): sim.allSimData[key].update(Dict({key2:Dict()})) for stim,val3 in val2.items(): sim.allSimData[key][key2].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector) #elif key == 'dipole': # sim.allSimData[key][key2] = np.add(sim.allSimData[key][key2],val2.as_numpy()) # add together dipole values from each node else: sim.allSimData[key].update({key2:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector) else: sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors elif gatherLFP and key == 'LFP': sim.allSimData[key] += np.array(val) elif key not in singleNodeVecs: sim.allSimData[key].update(val) # update simData dicts which are not Vectors if len(sim.allSimData['spkt']) > 0: sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid']) sim.net.allCells = sorted(allCells, key=lambda k: k['gid']) for popLabel,pop in allPops.items(): pop['cellGids'] = sorted(allPopsCellGids[popLabel]) sim.net.allPops = allPops # clean to avoid mem leaks for node in gather: if node: node.clear() del node for item in data: if item: item.clear() del item else: # if single node, save data in same format as for multiple nodes for consistency if sim.cfg.createNEURONObj: sim.net.allCells = [Dict(c.__getstate__()) for c in sim.net.cells] else: sim.net.allCells = [c.__dict__ for c in sim.net.cells] sim.net.allPops = ODict() for popLabel,pop in sim.net.pops.items(): sim.net.allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict sim.allSimData = Dict() for k in list(sim.simData.keys()): # initialize all keys of allSimData dict sim.allSimData[k] = Dict() for key,val in sim.simData.items(): # update simData dics of dics of h.Vector if key in simDataVecs+singleNodeVecs: # simData dicts that contain Vectors if isinstance(val,dict): for cell,val2 in val.items(): if isinstance(val2,dict): sim.allSimData[key].update(Dict({cell:Dict()})) for stim,val3 in val2.items(): sim.allSimData[key][cell].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector) else: sim.allSimData[key].update({cell:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector) else: sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors else: sim.allSimData[key] = val # update simData dicts which are not Vectors ## Print statistics sim.pc.barrier() if sim.rank == 0: sim.timing('stop', 'gatherTime') if sim.cfg.timing: print((' Done; gather time = %0.2f s.' % sim.timingData['gatherTime'])) print('\nAnalyzing...') sim.totalSpikes = len(sim.allSimData['spkt']) sim.totalSynapses = sum([len(cell['conns']) for cell in sim.net.allCells]) if sim.cfg.createPyStruct: if sim.cfg.compactConnFormat: preGidIndex = sim.cfg.compactConnFormat.index('preGid') if 'preGid' in sim.cfg.compactConnFormat else 0 sim.totalConnections = sum([len(set([conn[preGidIndex] for conn in cell['conns']])) for cell in sim.net.allCells]) else: sim.totalConnections = sum([len(set([conn['preGid'] for conn in cell['conns']])) for cell in sim.net.allCells]) else: sim.totalConnections = sim.totalSynapses sim.numCells = len(sim.net.allCells) if sim.totalSpikes > 0: sim.firingRate = float(sim.totalSpikes)/sim.numCells/sim.cfg.duration*1e3 # Calculate firing rate else: sim.firingRate = 0 if sim.numCells > 0: sim.connsPerCell = sim.totalConnections/float(sim.numCells) # Calculate the number of connections per cell sim.synsPerCell = sim.totalSynapses/float(sim.numCells) # Calculate the number of connections per cell else: sim.connsPerCell = 0 sim.synsPerCell = 0 print((' Cells: %i' % (sim.numCells) )) print((' Connections: %i (%0.2f per cell)' % (sim.totalConnections, sim.connsPerCell))) if sim.totalSynapses != sim.totalConnections: print((' Synaptic contacts: %i (%0.2f per cell)' % (sim.totalSynapses, sim.synsPerCell))) if 'runTime' in sim.timingData: print((' Spikes: %i (%0.2f Hz)' % (sim.totalSpikes, sim.firingRate))) print((' Simulated time: %0.1f s; %i workers' % (sim.cfg.duration/1e3, sim.nhosts))) print((' Run time: %0.2f s' % (sim.timingData['runTime']))) if sim.cfg.printPopAvgRates and not sim.cfg.gatherOnlySimData: trange = sim.cfg.printPopAvgRates if isinstance(sim.cfg.printPopAvgRates, list) else None sim.allSimData['popRates'] = sim.analysis.popAvgRates(tranges=trange) if 'plotfI' in sim.cfg.analysis: sim.analysis.calculatefI() # need to call here so data is saved to file sim.allSimData['avgRate'] = sim.firingRate # save firing rate return sim.allSimData
a40b61088aaedbb8f866014f933671b2264a2031
23,838
def stations_by_distance(stations, p): """For a list of stations (MonitoringStation object) and coordinate p (latitude, longitude), returns list of tuples (station, distance) sorted by the distance from the given coordinate p""" # Create the list of (stations, distance) tuples station_dist = [] # Append data to the list for station in stations: station_dist.append((station, haversine(p, station.coord))) # Return station_dist list sorted by the distance from p return sorted_by_key(station_dist, 1)
098e692c2ec18b7c15cebf84043eaca768566075
23,839
from typing import TextIO from typing import Optional from typing import Dict import csv def process(fh: TextIO, headers: Optional[Dict[str, str]], writer: csv.DictWriter, args: Args) -> int: """ Process the file into Mongo (client) First 5 columns are: STREAM, DATE, STATION, REP, #GRIDS Columns after that are the measurements """ reader = csv.DictReader(fh, delimiter=',') flds = reader.fieldnames values = defaultdict(list) # to average replicates # Parse file into values for each variable, station, and date for i, row in enumerate(reader, start=1): # Base record has station/date station = get_station(row.get('STATION', '')) date = get_date(row.get('DATE', '')) if not all([date, station]): continue for fld in filter(lambda f: f != '', flds[5:]): raw_val = row[fld].strip() if raw_val == '': continue # Remove leading "="? if raw_val.startswith('='): raw_val = raw_val[1:] # Try to convert value to float val = None try: val = float(raw_val) except Exception: continue if val is not None: values[(fld, station, date)].append(val) # Write the averages for each variable, station, and date num_written = 0 for key, replicates in values.items(): fld, station, date = key # Maybe convert "ACENTR" -> "Ephemeroptera Baetidae Acentrella spp." variable = headers.get(fld.upper(), fld) if headers else fld # Take the average of the values val = mean(replicates) print(f'{fld} {station} {date} => {val}') writer.writerow({ 'source': args.source, 'unit': '', 'location_name': station, 'location_type': 'station', 'variable_name': fld, 'variable_desc': variable, 'collected_on': date, 'value': val, 'medium': args.medium }) num_written += 1 return num_written
40a68091d65e1f9a56ca150703aaaa207ef438a2
23,840
def dipole_moment_programs(): """ Constructs a list of program modules implementing static dipole moment output readers. """ return pm.program_modules_with_function(pm.Job.DIP_MOM)
a225997a445451411819ebfb8c7bf14629ee3742
23,841
def kappa(a, b, c, d): """ GO term 2 | yes | no | ------------------------------- GO | yes | a | b | term1 | no | c | d | kapa(GO_1, GO_2) = 1 - (1 - po) / (1 - pe) po = (a + d) / (a + b + c + d) marginal_a = ( (a + b) * ( a + c )) / (a + b + c + d) marginal_b = ( (c + d) * ( b + d )) / (a + b + c + d) pe = (marginal_a + marginal_b) / (a + b + c + d) """ a = float(len(a)) b = float(len(b)) c = float(len(c)) d = float(len(d)) po = (a + d) / (a + b + c + d) marginal_a = ( (a + b) * ( a + c )) / (a + b + c + d) marginal_b = ( (c + d) * ( b + d )) / (a + b + c + d) pe = (marginal_a + marginal_b) / (a + b + c + d) #print (f" {a} | {b}\n {c} | {d}") return 1 - (1 - po) / (1 - pe)
5884a6745f6a93b044eabb1bfe38834cb59366d4
23,842
import functools import torch def test_CreativeProject_integration_ask_tell_ask_works(covars, model_type, train_X, train_Y, covars_proposed_iter, covars_sampled_iter, response_sampled_iter, monkeypatch): """ test that both surrogate model and acquisition functions are added and updated following two rounds of ask-tell. Monkeypatch "_read_covars_manual_input" and "_read_response_manual_input" from ._observe.py to circumvent manual input via builtins.input. This automatically tests the new functionality of random start by starting from no data (train_X, train_Y) """ # initialize the class # random_start = True is default, so this tests random start cc = TuneSession(covars=covars, model=model_type) # set attributes on class (to simulate previous iterations of ask/tell functionality) cc.train_X = train_X cc.proposed_X = train_X cc.train_Y = train_Y cc.model["covars_proposed_iter"] = covars_proposed_iter cc.model["covars_sampled_iter"] = covars_sampled_iter cc.model["response_sampled_iter"] = response_sampled_iter # define decorator to add 1.0 to all entries in monkeypatched returned data. This to be able to tell that the last # entry (from second "tell") is different than the first, and know that it has been overwritten def add_one(func): @functools.wraps(func) def wrapper_add_one(*args, **kwargs): wrapper_add_one.num_calls += 1 output = func(*args, **kwargs) return output + wrapper_add_one.num_calls wrapper_add_one.num_calls = 0 return wrapper_add_one # monkeypatch "_read_covars_manual_input" candidate_tensor = torch.tensor([[tmp[0] for tmp in covars]], dtype=torch.double) @add_one def mock_read_covars_manual_input(additional_text): return candidate_tensor monkeypatch.setattr(cc, "_read_covars_manual_input", mock_read_covars_manual_input) # monkeypatch "_read_response_manual_input" resp_tensor = torch.tensor([[12]], dtype=torch.double) @add_one def mock_read_response_manual_input(additional_text): return resp_tensor monkeypatch.setattr(cc, "_read_response_manual_input", mock_read_response_manual_input) # run the ask method cc.ask() # run the tell method cc.tell() # test that data is added to pretty formats assert cc.x_data.shape[0] == 1 for i in range(candidate_tensor.size()[1]): col = cc.x_data.columns[i] assert cc.x_data[col].iloc[-1] == candidate_tensor[0, i].item() + 1 assert cc.y_data.shape[0] == 1 assert cc.y_data["Response"].iloc[-1] == resp_tensor[0, 0].item() + 1 # grab the model state surrogate_model1 = cc.model["model"] # run the ask method AGAIN cc.ask() # grab the acquisition function acq_func1 = cc.acq_func["object"] # run the tell method AGAIN cc.tell() # test that new rows are added to pretty format data print(candidate_tensor) print(cc.x_data) assert cc.x_data.shape[0] == 2 for i in range(candidate_tensor.size()[1]): col = cc.x_data.columns[i] assert cc.x_data[col].iloc[-1] == candidate_tensor[0, i].item() + 2 assert cc.y_data.shape[0] == 2 assert cc.y_data["Response"].iloc[-1] == resp_tensor[0, 0].item() + 2 # grab the model state surrogate_model2 = cc.model["model"] # run the ask method a THIRD TIME cc.ask() # grab the acquisition function acq_func2 = cc.acq_func["object"] # assert that both model and acquisition functions exist assert cc.model["model"] is not None assert cc.acq_func["object"] is not None # assert that surrogate model has updated assert surrogate_model1 != surrogate_model2 # assert that acquisition function has updated assert acq_func1 != acq_func2
98d665e85b19acf956026848614d9b49e204afb8
23,843
def ferret_init(id): """ Initialization for the stats_chisquare Ferret PyEF """ axes_values = [ pyferret.AXIS_DOES_NOT_EXIST ] * pyferret.MAX_FERRET_NDIM axes_values[0] = pyferret.AXIS_CUSTOM false_influences = [ False ] * pyferret.MAX_FERRET_NDIM retdict = { "numargs": 3, "descript": "Returns chi-square test stat. and prob. (and num. good categories, N) " \ "that sample counts of cat. data matches pop. expected counts. ", "axes": axes_values, "argnames": ( "SAMPLE_CNTS", "EXPECT_CNTS", "DELTA_DEGFREE", ), "argdescripts": ( "Sample counts of categorical data", "Expected counts or relative frequencies (will be adjusted)", "Difference from standard (N-1) degrees of freedom (num. computed parameters)", ), "argtypes": ( pyferret.FLOAT_ARRAY, pyferret.FLOAT_ARRAY, pyferret.FLOAT_ONEVAL, ), "influences": ( false_influences, false_influences, false_influences, ), } return retdict
5231fec470d8c968334d6f72766cc1d0ad9ae61c
23,844
def has(key): """Checks if the current context contains the given key.""" return not not (key in Context.currentContext.values)
0c6c46812e97c9d38d101dcc06346b329a3cd81a
23,845
def VGG_16(weights_path=None): """ Creates a convolutional keras neural network, training it with data from ct scans from both datasets. Using the VGG-16 architecture. ---- Returns the model """ X_train, Y_train = loadfromh5(1, 2, 19) X_train1, Y_train1 = loadfromh5(2, 2, 19) X_train.extend(X_train1) Y_train.extend(Y_train1) X_train = np.asarray(X_train).reshape(np.asarray(X_train).shape[0], 64, 64, 1) # X_train = np.transpose(X_train, (0,3,1,2)) print(X_train.shape) model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=(64, 64, 1))) model.add(Convolution2D(64, 3, 3, activation="relu")) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(64, 3, 3, activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation="relu")) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation="relu")) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation="relu")) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation="relu")) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation="relu")) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation="relu")) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation="relu")) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(4096, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(4096, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(1)) model.compile(loss="mean_squared_error", optimizer="sgd", metrics=["mse"]) K.set_value(model.optimizer.learning_rate, 0.001) model.fit(X_train, Y_train, batch_size=32, epochs=10, verbose=1) if weights_path: model.load_weights(weights_path) return model
640aa7480afac0c8c4b71f3045f702888672172f
23,846
from scipy import optimize def inv_fap_davies(p, fmax, t, y, dy, normalization='standard'): """Inverse of the davies upper-bound""" args = (fmax, t, y, dy, normalization) z0 = inv_fap_naive(p, *args) func = lambda z, *args: fap_davies(z, *args) - p res = optimize.root(func, z0, args=args, method='lm') if not res.success: raise ValueError('inv_fap_baluev did not converge for p={0}'.format(p)) return res.x
9b6f8a82ca25235785d5fe83a5c671130ddd509b
23,847
def public(request): """browse public repos. Login not required""" username = request.user.get_username() public_repos = DataHubManager.list_public_repos() # This should really go through the api... like everything else # in this file. public_repos = serializers.serialize('json', public_repos) return render_to_response("public-browse.html", { 'login': username, 'repo_base': 'repo_base', 'repos': [], 'public_repos': public_repos})
0d44053c6db872032b65b4786c5771dbecad946a
23,848
def get_boundaries_old(im,su=5,sl=5,valley=5,cutoff_max=1.,plt_val=False): """Bintu et al 2018 candidate boundary calling""" im_=np.array(im) ratio,ration,center,centern=[],[],[],[] for i in range(len(im)): x_im_l,y_im_l = [],[] x_im_r,y_im_r = [],[] xn_im_l,yn_im_l = [],[] xn_im_r,yn_im_r = [],[] for j in range(sl): xn_im_l.extend(i+j-np.arange(su)-1) yn_im_l.extend([i+j]*su) xn_im_r.extend(i+j+sl-np.arange(su)-1) yn_im_r.extend([i+j+sl]*su) x_im_l.extend(i+j+np.arange(su)+1) y_im_l.extend([i+j]*su) x_im_r.extend(i+j+sl+np.arange(su)+1) y_im_r.extend([i+j+sl]*su) x_im_l,y_im_l,x_im_r,y_im_r = list(map(np.array,[x_im_l,y_im_l,x_im_r,y_im_r])) xn_im_l,yn_im_l,xn_im_r,yn_im_r = list(map(np.array,[xn_im_l,yn_im_l,xn_im_r,yn_im_r])) in_image = np.all(x_im_l>=0) and np.all(x_im_r>=0) and np.all(y_im_l>=0) and np.all(y_im_r>=0) in_image = in_image and np.all(x_im_l<len(im)) and np.all(x_im_r<len(im)) and np.all(y_im_l<len(im)) and np.all(y_im_r<len(im)) in_imagen = np.all(xn_im_l>=0) and np.all(xn_im_r>=0) and np.all(yn_im_l>=0) and np.all(yn_im_r>=0) in_imagen = in_imagen and np.all(xn_im_l<len(im)) and np.all(xn_im_r<len(im)) and np.all(yn_im_l<len(im)) and np.all(yn_im_r<len(im)) if in_image: val_l,val_r = np.nanmean(im_[x_im_l,y_im_l]),np.nanmean(im_[x_im_r,y_im_r]) ratio.append(val_l/val_r) center.append(i+sl) if in_imagen: val_l,val_r = np.nanmean(im_[xn_im_l,yn_im_l]),np.nanmean(im_[xn_im_r,yn_im_r]) ration.append(val_r/val_l) centern.append(i+sl) if False:#i==9: plt.figure(figsize=(20,20)) plt.plot(xn_im_l,yn_im_l,'mo') plt.plot(xn_im_r,yn_im_r,'go') plt.plot(x_im_l,y_im_l,'ro') plt.plot(x_im_r,y_im_r,'bo') plt.imshow(im,interpolation='nearest',cmap='seismic_r',vmax=1000) plt.show() print(x_im_l,y_im_l,x_im_r,y_im_r) center,ratio=np.array(center),np.array(ratio) centern,ration=np.array(centern),np.array(ration) max_ratio = np.zeros(len(im))+np.nan max_ratio[center]=ratio max_ratio[centern]=np.nanmax([max_ratio[centern],ration],axis=0) local_max_good = get_ind_loc_max(max_ratio,cutoff_max=cutoff_max,valley=valley) #local_max_goodn = get_ind_loc_max(ration,cutoff_max=cutoff_max,valley=valley) ###Plotting if plt_val: #plt.close('all') plt.figure(figsize=(12,7)) #print local_max_good,local_max_goodn plt.plot(center,np.log(ratio),'o-') plt.plot(centern,np.log(ration),'o-') plt.plot(np.log(max_ratio),'k-') if len(local_max_good)>0: plt.plot(local_max_good,np.log(max_ratio[local_max_good]),'o') plt.show() fig, ax = plt.subplots(figsize=(12,7)) if len(local_max_good)>0: ax.plot(local_max_good[:],local_max_good[:],'go',ms=10,mec='k',mew=2) #cax = ax.imshow(set_diag(img,np.nanmax(img)),interpolation='nearest',cmap='bwr')#,vmax=1000,vmin=0) cax = ax.imshow(im,interpolation='nearest',cmap='seismic_r',vmax=1000,vmin=0) cbar = fig.colorbar(cax) plt.show() return local_max_good,max_ratio[local_max_good]
620fa37306f85a06d88b4f08fd84e9873866e011
23,849
def zfsr32(val, n): """zero fill shift right for 32 bit integers""" return (val >> n) if val >= 0 else ((val + 4294967296) >> n)
4b890caa0b7b086e923e7b229e5551fd66d24016
23,850
def n_optimize_fn(step: int) -> int: """`n_optimize` scheduling function.""" if step <= FLAGS.change_n_optimize_at: return FLAGS.n_optimize_1 else: return FLAGS.n_optimize_2
2d8b05a19c05a662119e51ace97817246c38ebe3
23,851
import torch def compute_receptive_field(model, img_size=(1, 3, 3)): """Computes the receptive field for a model. The receptive field is computed using the magnitude of the gradient of the model's output with respect to the input. Args: model: Model for hich to compute the receptive field. Assumes NCHW input. img_size: The (channels, height, width) of the input to the model. """ c, h, w = img_size img = torch.randn((1, c, h, w), requires_grad=True) model(img)[0, 0, h // 2, w // 2].mean().backward() grad = img.grad.abs()[0, 0, :, :] return torch.where(grad > 0, torch.ones_like(grad), torch.zeros_like(grad))
bdc3065e696bf221698d1abdb0717b7da957ca84
23,852
def flippv(pv, n): """Flips the meaning of an index partition vector. Parameters ---------- pv : ndarray The index partition to flip. n : integer The length of the dimension to partition. Returns ------- notpv : ndarray The complement of pv. Example: >>> import numpy as np >>> import locate >>> pv = np.array([0,3,5]) >>> locate.flippv(pv,8) array([1, 2, 4, 6, 7]) """ tf = np.ones(n, dtype=bool) tf[pv] = False return tf.nonzero()[0]
6c063169100eba098460cd12339f7a6266ea01f0
23,853
from typing import Union from pathlib import Path def is_dir(path: Union[str, Path]) -> bool: """Check if the given path is a directory :param path: path to be checked """ if isinstance(path, str): path = Path(path) if path.exists(): return path.is_dir() else: return str(path).endswith("/")
540cce7f5c6a25186427ba71b94aa090c2ab90a7
23,854
import os import sys def work(out_dir: str, in_coord: str, in_imd_path: str, in_topo_path: str, in_perttopo_path: str, in_disres_path: str, nmpi: int = 1, nomp: int = 1, out_trg: bool = False, gromos_bin: str = None, work_dir: str = None): """ Executed by repex_EDS_long_production_run as worker_scripts #TODO: This was used to do TIs, it will go in future to the pygromos package Parameters ---------- out_dir : str final output dir in_coord : str input coordinates in_imd_path : str input imd-parameter file in_topo_path : str input topology in_perttopo_path : str input pertubation in_disres_path : str input disres nmpi : int, optional number of mpi cores (default: 1) nomp : int, optional number of omp cores (default: 1) out_trg : str, optional gromos_bin : str, optional path to gromos binary (default: None) work_dir : str, optional work directory (default: None) Returns ------- int 0 if code was passed through. """ # WORKDIR SetUP if ((isinstance(work_dir, type(None)) or work_dir == "None") and "TMPDIR" in os.environ): work_dir = os.environ["TMPDIR"] print("using TmpDir") elif (isinstance(work_dir, type(None)) and work_dir == "None"): print("Could not find TMPDIR!\n Switched to outdir for work") work_dir = out_dir print("workDIR: " + work_dir) if (not os.path.isdir(work_dir)): bash.make_folder(work_dir) os.chdir(work_dir) print("workDIR: " + work_dir) md = mdGromosXX.GromosXX(bin=gromos_bin) # RUN try: print(spacer + "\n start MD " + str(os.path.basename(imd_path)) + "\n") out_prefix = os.path.splitext(os.path.basename(imd_path))[0] md_failed = False try: md_run = md.md_mpi_run(in_topo_path=in_topo_path, in_coord_path=in_coord, in_imd_path=in_imd_path, nmpi=nmpi, in_pert_topo_path=in_perttopo_path, out_trg=out_trg, in_disres_path=in_disres_path, out_prefix=out_prefix, nomp=nomp, verbose=True) except Exception as err: print("Failed! process returned: \n Err: \n" + "\n".join(err.args)) md_failed = True if (out_dir != work_dir): os.system("mv " + work_dir + "/* " + out_dir) # post simulation cleanup if not (isinstance(work_dir, type(None)) and work_dir == "None" and "TMPDIR" in os.environ): bash.remove_folder(work_dir, verbose=True) # bash.move_file(work_dir + "/*", out_dir) # bash.remove_file(out_dir + "/slave*.out") # os.system("rmdir "+work_dir) except Exception as err: print("\nFailed during simulations: ", file=sys.stderr) print(type(err), file=sys.stderr) print(err.args, file=sys.stderr) exit(1) return 0
c2607e37fedd7e8a121d20585f23b1ea793fc9aa
23,855
from itertools import cycle def _replace_dendro_colours( colours, above_threshold_colour="C0", non_cluster_colour="black", colorscale=None ): """ Returns colorscale used for dendrogram tree clusters. Keyword arguments: colorscale -- Colors to use for the plot in rgb format. Should have 8 colours. """ if isinstance(colorscale, str): colorscale = _mpl_cmap_to_str(colorscale) elif colorscale is None: colorscale = [ 'rgb(0,116,217)', # instead of blue 'rgb(35,205,205)', # cyan 'rgb(61,153,112)', # green 'rgb(40,35,35)', # black 'rgb(133,20,75)', # magenta 'rgb(255,65,54)', # red 'rgb(255,255,255)', # white 'rgb(255,220,0)', # yellow ] else: assert isinstance(colorscale, (list, tuple)), \ "colorscale must be a list or tuple of strings" assert all(isinstance(c, str) for c in colorscale), \ "colorscale must be a list or tuple of strings" original_colours = set(colours) original_colours.remove(above_threshold_colour) colour_map = dict(zip(original_colours, cycle(colorscale))) colour_map[above_threshold_colour] = non_cluster_colour return [colour_map[c] for c in colours]
ce50b8c061bb908d8670b059261cde83e7dd62b1
23,856
import re def document_to_vector(lemmatized_document, uniques): """ Converts a lemmatized document to a bow vector representation. 1/0 for word exists/doesn't exist """ #print(uniques) # tokenize words = re.findall(r'\w+', lemmatized_document.lower()) # vector = {} vector = [0]*len(uniques) # list of the words is accessible via vector.keys() # list of 0/1 is accessible via vector.values() # seen = [] for i in range(len(uniques)): for j in range(len(words)): if uniques[i] == words[j]: vector[i] = 1 continue return vector
e4b108b8e99a827788d7eff5d4eabf71021d6e21
23,857
def ubcOcTree(FileName_Mesh, FileName_Model, pdo=None): """ Description ----------- Wrapper to Read UBC GIF OcTree mesh and model file pairs. UBC OcTree models are defined using a 2-file format. The "mesh" file describes how the data is descritized. The "model" file lists the physical property values for all cells in a mesh. A model file is meaningless without an associated mesh file. This only handles OcTree formats Parameters ---------- `FileName_Mesh` : str - The OcTree Mesh filename as an absolute path for the input mesh file in UBC OcTree Mesh Format `FileName_Model` : str - The model filename as an absolute path for the input model file in UBC OcTree Model Format. `pdo` : vtk.vtkUnstructuredGrid, optional - The output data object Returns ------- Returns a vtkUnstructuredGrid generated from the UBC 2D/3D Mesh grid. Mesh is defined by the input mesh file. Cell data is defined by the input model file. """ # Construct/read the mesh mesh = ubcOcTreeMesh(FileName_Mesh, pdo=pdo) # Read the model data # - read model file for OcTree format if FileName_Model is not None: model = ubcModel3D(FileName_Model) # Place the model data onto the mesh mesh = placeModelOnOcTreeMesh(mesh, model) return mesh
f9d71c7beebc8ca5f3c45fc24a4fd6ccae607634
23,858
def load_colormaps(): """Return the provided colormaps.""" return load_builtin_data('colormaps')
00b0d73e127cbbf11b76d5a2281493af95337008
23,859
def discriminator(hr_images, scope, dim): """ Discriminator """ conv_lrelu = partial(conv, activation_fn=lrelu) def _combine(x, newdim, name, z=None): x = conv_lrelu(x, newdim, 1, 1, name) y = x if z is None else tf.concat([x, z], axis=-1) return minibatch_stddev_layer(y) def _conv_downsample(x, dim, ksize, name): y = conv2d_downscale2d(x, dim, ksize, name=name) y = lrelu(y) return y with tf.compat.v1.variable_scope(scope, reuse=tf.compat.v1.AUTO_REUSE): with tf.compat.v1.variable_scope("res_4x"): net = _combine(hr_images[1], newdim=dim, name="from_input") net = conv_lrelu(net, dim, 3, 1, "conv1") net = conv_lrelu(net, dim, 3, 1, "conv2") net = conv_lrelu(net, dim, 3, 1, "conv3") net = _conv_downsample(net, dim, 3, "conv_down") with tf.compat.v1.variable_scope("res_2x"): net = _combine(hr_images[2], newdim=dim, name="from_input", z=net) dim *= 2 net = conv_lrelu(net, dim, 3, 1, "conv1") net = conv_lrelu(net, dim, 3, 1, "conv2") net = conv_lrelu(net, dim, 3, 1, "conv3") net = _conv_downsample(net, dim, 3, "conv_down") with tf.compat.v1.variable_scope("res_1x"): net = _combine(hr_images[4], newdim=dim, name="from_input", z=net) dim *= 2 net = conv_lrelu(net, dim, 3, 1, "conv") net = _conv_downsample(net, dim, 3, "conv_down") with tf.compat.v1.variable_scope("bn"): dim *= 2 net = conv_lrelu(net, dim, 3, 1, "conv1") net = _conv_downsample(net, dim, 3, "conv_down1") net = minibatch_stddev_layer(net) # dense dim *= 2 net = conv_lrelu(net, dim, 1, 1, "dense1") net = conv(net, 1, 1, 1, "dense2") net = tf.reduce_mean(net, axis=[1, 2]) return net
9af149750aed5febd17ab37b1e816356d2e27a40
23,860
def addchallenges(request) : """ 管理员添加新的题目 """ if request.user.is_superuser : if request.method == 'POST' : success = 0 form = forms.AddChallengeForm(request.POST, request.FILES) if form.is_valid() : success = 1 print(request.FILES) if request.FILES : i = models.Challenges(file=request.FILES['file'], name=request.POST['name'], category=request.POST['category'], description=request.POST['description'], points=request.POST['points'], challenge_id=assignID(request.POST['name']), flag=request.POST['flag'], author=request.POST['author']) i.save() else : i = models.Challenges( name=request.POST['name'], category=request.POST['category'], description=request.POST['description'], points=request.POST['points'], challenge_id=assignID(request.POST['name']), flag=request.POST['flag'], author=request.POST['author']) i.save() return render(request, 'addchallenges.html', {'form':form,'success':success}) else : form = forms.AddChallengeForm() return render(request, 'addchallenges.html', {'form':form}) else : return redirect("/")
e7aaa3a8418f66322f050dee74d74fd1d71bc0c9
23,861
def _decompose_ridge(Xtrain, alphas, n_alphas_batch=None, method="svd", negative_eigenvalues="zeros"): """Precompute resolution matrices for ridge predictions. To compute the prediction:: Ytest_hat = Xtest @ (XTX + alphas * Id)^-1 @ Xtrain^T @ Ytrain where XTX = Xtrain^T @ Xtrain, this function precomputes:: matrices = (XTX + alphas * Id)^-1 @ Xtrain^T. Parameters ---------- Xtrain : array of shape (n_samples_train, n_features) Concatenated input features. alphas : float, or array of shape (n_alphas, ) Range of ridge regularization parameter. n_alphas_batch : int or None If not None, returns a generator over batches of alphas. method : str in {"svd"} Method used to diagonalize the kernel. negative_eigenvalues : str in {"nan", "error", "zeros"} If the decomposition leads to negative eigenvalues (wrongly emerging from float32 errors): - "error" raises an error. - "zeros" remplaces them with zeros. - "nan" returns nans if the regularization does not compensate twice the smallest negative value, else it ignores the problem. Returns ------- matrices : array of shape (n_alphas, n_samples_test, n_samples_train) or \ (n_alphas, n_features, n_samples_train) if test is not None Precomputed resolution matrices. alpha_batch : slice Slice of the batch of alphas. """ backend = get_backend() use_alpha_batch = n_alphas_batch is not None if n_alphas_batch is None: n_alphas_batch = len(alphas) if method == "svd": # SVD: X = U @ np.diag(eigenvalues) @ Vt U, eigenvalues, Vt = backend.svd(Xtrain, full_matrices=False) else: raise ValueError("Unknown method=%r." % (method, )) for start in range(0, len(alphas), n_alphas_batch): batch = slice(start, start + n_alphas_batch) ev_weighting = eigenvalues / (alphas[batch, None] + eigenvalues ** 2) # negative eigenvalues can emerge from incorrect kernels, # or from float32 if eigenvalues[0] < 0: if negative_eigenvalues == "nan": ev_weighting[alphas[batch] < -eigenvalues[0] * 2, :] = \ backend.asarray(backend.nan, type=ev_weighting.dtype) elif negative_eigenvalues == "zeros": eigenvalues[eigenvalues < 0] = 0 elif negative_eigenvalues == "error": raise RuntimeError( "Negative eigenvalues. Make sure the kernel is positive " "semi-definite, increase the regularization alpha, or use" "another solver.") else: raise ValueError("Unknown negative_eigenvalues=%r." % (negative_eigenvalues, )) matrices = backend.matmul(Vt.T, ev_weighting[:, :, None] * U.T) if use_alpha_batch: yield matrices, batch else: return matrices, batch del matrices
ba7d466546f4d417f9f455aee5fa0cccdaba968c
23,862
import requests def get_new_listing(old_listing): """Get the new listing.""" try: fetched_listing = requests.get(cfg['api_url']).json()['product'] except requests.exceptions.RequestException: return old_listing else: old_item_ids = {old_item['productId'] for old_item in old_listing} new_listing = [fetched_item for fetched_item in fetched_listing if fetched_item['productId'] not in old_item_ids] if new_listing: save_listing(new_listing) return new_listing
f8aa02d1a804ef5cfbb1192da15091d8e8816d16
23,863
from typing import Union import logging def create_user(engine: create_engine, data: dict) -> Union[User, None]: """ Function for creating row in database :param engine: sqlmodel's engine :param data: dictionary with data that represents user :return: Created user instance or nothing """ logging.info('Creating an user') user = User(**data) with Session(engine) as session: try: session.add(user) session.commit() session.refresh(user) logging.info('User was created') except exc.CompileError: logging.warning('User was not created') return None return user
bb1dff7aca37a8a1eab9104d0b6cd27cb55f78da
23,864
def _densify_2D(a, fact=2): """Densify a 2D array using np.interp. :fact - the factor to density the line segments by :Notes :----- :original construction of c rather than the zero's approach : c0 = c0.reshape(n, -1) : c1 = c1.reshape(n, -1) : c = np.concatenate((c0, c1), 1) """ # Y = a changed all the y's to a a = np.squeeze(a) n_fact = len(a) * fact b = np.arange(0, n_fact, fact) b_new = np.arange(n_fact - 1) # Where you want to interpolate c0 = np.interp(b_new, b, a[:, 0]) c1 = np.interp(b_new, b, a[:, 1]) n = c0.shape[0] c = np.zeros((n, 2)) c[:, 0] = c0 c[:, 1] = c1 return c
e9a881f014c9ebcae6f3550c3b0c4d7beb576203
23,865
import os def conf_paths(filename) -> list: """Get config paths""" home = os.path.expanduser('~') paths = [path.format(home=home, filename=filename) for path in PATH_TEMPLATES] return paths
a4864b0c3bf8f3a0fed1ec4d459852dea7aea577
23,866
from typing import Union def get_neighbor_edge( graph: srf.Alignment, edge: tuple[int, int], column: str = 'z', direction: str = 'up', window: Union[None, int] = None, statistic: str = 'min' ) -> Union[None, tuple[int, int]]: """Return the neighboring edge having the lowest minimum value Parameters: graph: directed network graph edge: edge for which to determine a neighbor Other Parameters: column: column to test in vertices direction: 'up' tests predecessor edges; 'down' tests successors window: number of neighbor vertices to test statistic: test statistic Returns: edge meeting the criteria """ vertices = graph.vertices result = None val = None if direction == 'up': neighbors = [(i, edge[0]) for i in graph.predecessors(edge[0])] else: neighbors = [(edge[1], i) for i in graph.successors(edge[1])] if len(neighbors) > 0: for neighbor in neighbors: if window: test_verts = vertices[vertices['edge'] == neighbor].tail(window) else: test_verts = vertices[vertices['edge'] == neighbor] if statistic == 'min': test_val = test_verts[column].min() if val: if test_val < val: result = neighbor val = test_val else: result = neighbor val = test_val return result
df81a5480a673efbf66bda7951b634f99996ffcf
23,867
def show_comparison(x_coordinates: np.ndarray, analytic_expression: callable, numeric_solution: [dict, np.ndarray], numeric_label: str = "Numeric Solution", analytic_label: str = "Analytic Solution", title: str = None, x_label: str = None, y_label: str = None, save_file_as: str = None): """ Method that shows the comparison between the analytic and numeric solutions. :param x_coordinates: Array of input values for function. :param numeric_solution: Array of values for the numeric solution. :param analytic_expression: Function that describes the analytic solution. :param numeric_label: Label for numeric solution on graph. :param analytic_label: Label for analytic solution on graph. :param title: Title of plot figure. :param x_label: Label for the x axis. :param y_label: Label for the y axis. :param save_file_as: Filename used to save generated figure. If not defined figure is not saved. :return: Displays the graphical comparison. """ check_method_call(x_coordinates) check_method_call(analytic_expression) check_method_call(numeric_solution) analytic_solution = analytic_expression(x_coordinates) default_cycler = cycler('color', ['b', 'g', 'k']) * cycler('linestyle', ['--', '-.', ':']) plt.rc('axes', prop_cycle=default_cycler) plt.plot(x_coordinates, analytic_solution, "r-", label=analytic_label) if isinstance(numeric_solution, dict): [plt.plot(x_coordinates, numeric_solution[key], label=("{:.4f}s".format(key) if isinstance(key, (float, int)) else key)) for key in sorted(numeric_solution)] else: plt.plot(x_coordinates, numeric_solution, "b--", label=numeric_label) axes = plt.gca() if x_label: axes.set_xlabel(x_label) if y_label: axes.set_ylabel(y_label) if title: axes.set_title(title) plt.grid() plt.legend() # Calculate errors numeric_solution = np.array(numeric_solution if not isinstance(numeric_solution, dict) else numeric_solution[max(numeric_solution.keys())]) error_array = np.nan_to_num(abs(numeric_solution - analytic_solution)/analytic_solution) print("Mean Error: {0}\nStandard Error: {1}".format(np.mean(error_array), np.std(error_array))) if save_file_as is not None and isinstance(save_file_as, str): plt.savefig("{0}".format(save_file_as)) return plt.show()
10727c4db401469f88fa93db31a13e21037f8e63
23,868
def has_master(mc: MasterCoordinator) -> bool: """ True if `mc` has a master. """ return bool(mc.sc and not mc.sc.master and mc.sc.master_url)
314fc4a2aa4deed7291d4676230bc9dafbb142d8
23,869
import time def sz_margin_details(date='', retry_count=3, pause=0.001): """ 获取深市融资融券明细列表 Parameters -------- date:string 明细数据日期 format:YYYY-MM-DD 默认为空'' retry_count : int, 默认 3 如遇网络等问题重复执行的次数 pause : int, 默认 0 重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题 Return ------ DataFrame opDate:信用交易日期 stockCode:标的证券代码 securityAbbr:标的证券简称 rzmre: 融资买入额(元) rzye:融资余额(元) rqmcl: 融券卖出量 rqyl: 融券余量 rqye: 融券余量(元) rzrqye:融资融券余额(元) """ for _ in range(retry_count): time.sleep(pause) try: request = Request(rv.MAR_SZ_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'], ct.PAGES['szsefc'], date)) lines = urlopen(request, timeout = 10).read() if len(lines) <= 200: return pd.DataFrame() df = pd.read_html(lines, skiprows=[0])[0] df.columns = rv.MAR_SZ_MX_COLS df['stockCode'] = df['stockCode'].map(lambda x:str(x).zfill(6)) df['opDate'] = date except Exception as e: print(e) else: return df raise IOError(ct.NETWORK_URL_ERROR_MSG)
115ef7ec68a08de065f086b53835c7bd05a34ff8
23,870
def delay_slot_insn(*args): """ delay_slot_insn(ea, bexec, fexec) -> bool Helper function to get the delay slot instruction. @param ea (C++: ea_t *) @param bexec (C++: bool *) @param fexec (C++: bool *) """ return _ida_idp.delay_slot_insn(*args)
6b0316845c4cefa2a33a9e9f5e0c24c1f2920a52
23,871
def decode_argument(params, word_embeddings, argument_extractors): """ :type params: dict :type word_embeddings: nlplingo.embeddings.WordEmbedding :type argument_extractors: list[nlplingo.nn.extractor.Extractor] # argument extractors """ if len(argument_extractors) == 0: raise RuntimeError('At least one argument extractor must be specified to decode over arguments.') test_docs = prepare_docs(params['data']['test']['filelist'], word_embeddings, params) # TODO CYS: this is current a hack. This needs to be better factorized and integrated more generically with the existing decode code if argument_extractors[0].engine == 'transformers': return decode_event_transformer_using_gold_trigger(params, argument_extractors[0], test_docs)
d0f4c819e0f8b29827bee28778a76ff4b0b2d8e7
23,872
import numpy def pairwise_radial_basis(K: numpy.ndarray, B: numpy.ndarray) -> numpy.ndarray: """Compute the TPS radial basis function phi(r) between every row-pair of K and B where r is the Euclidean distance. Arguments --------- K : numpy.array n by d vector containing n d-dimensional points. B : numpy.array m by d vector containing m d-dimensional points. Return ------ P : numpy.array n by m matrix where. P(i, j) = phi( norm( K(i,:) - B(j,:) ) ), where phi(r) = r^2*log(r), if r >= 1 r*log(r^r), if r < 1 """ # r_mat(i, j) is the Euclidean distance between K(i, :) and B(j, :). r_mat = cdist(K, B) pwise_cond_ind1 = r_mat >= 1 pwise_cond_ind2 = r_mat < 1 r_mat_p1 = r_mat[pwise_cond_ind1] r_mat_p2 = r_mat[pwise_cond_ind2] # P correcponds to the matrix K from [1]. P = numpy.empty(r_mat.shape) P[pwise_cond_ind1] = (r_mat_p1**2) * numpy.log(r_mat_p1) P[pwise_cond_ind2] = r_mat_p2 * numpy.log(numpy.power(r_mat_p2, r_mat_p2)) return P
5ce4ff200bf953d80b7aff30b84b4ae0a62a0445
23,873
def low_index_subgroups(G, N, Y=[]): """ Implements the Low Index Subgroups algorithm, i.e find all subgroups of ``G`` upto a given index ``N``. This implements the method described in [Sim94]. This procedure involves a backtrack search over incomplete Coset Tables, rather than over forced coincidences. Parameters ========== G: An FpGroup < X|R > N: positive integer, representing the maximum index value for subgroups Y: (an optional argument) specifying a list of subgroup generators, such that each of the resulting subgroup contains the subgroup generated by Y. Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup, low_index_subgroups >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**2, y**3, (x*y)**4]) >>> L = low_index_subgroups(f, 4) >>> for coset_table in L: ... print(coset_table.table) [[0, 0, 0, 0]] [[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 3, 3]] [[0, 0, 1, 2], [2, 2, 2, 0], [1, 1, 0, 1]] [[1, 1, 0, 0], [0, 0, 1, 1]] References ========== .. [1] Holt, D., Eick, B., O'Brien, E. "Handbook of Computational Group Theory" Section 5.4 .. [2] Marston Conder and Peter Dobcsanyi "Applications and Adaptions of the Low Index Subgroups Procedure" """ C = CosetTable(G, []) R = G.relators # length chosen for the length of the short relators len_short_rel = 5 # elements of R2 only checked at the last step for complete # coset tables R2 = {rel for rel in R if len(rel) > len_short_rel} # elements of R1 are used in inner parts of the process to prune # branches of the search tree, R1 = {rel.identity_cyclic_reduction() for rel in set(R) - R2} R1_c_list = C.conjugates(R1) S = [] descendant_subgroups(S, C, R1_c_list, C.A[0], R2, N, Y) return S
03dc48ada37302ca6d4bc5054660aae60bca2ca5
23,874
def ifft_complex(fft_sig_complex) -> np.ndarray: """ Compute the one-dimensional inverse discrete Fourier Transform. :param fft_sig_complex: input array, can be complex. :return: the truncated or zero-padded input, transformed along the axis """ ifft_sig = np.fft.ifft(fft_sig_complex) fft_points = len(ifft_sig) ifft_sig *= fft_points return ifft_sig
101189d4116b0d968ee015534ab8b8fc0020a769
23,875
def merge_eopatches(*eopatches, features=..., time_dependent_op=None, timeless_op=None): """ Merge features of given EOPatches into a new EOPatch :param eopatches: Any number of EOPatches to be merged together :type eopatches: EOPatch :param features: A collection of features to be merged together. By default all features will be merged. :type features: object :param time_dependent_op: An operation to be used to join data for any time-dependent raster feature. Before joining time slices of all arrays will be sorted. Supported options are: - None (default): If time slices with matching timestamps have the same values, take one. Raise an error otherwise. - 'concatenate': Keep all time slices, even the ones with matching timestamps - 'min': Join time slices with matching timestamps by taking minimum values. Ignore NaN values. - 'max': Join time slices with matching timestamps by taking maximum values. Ignore NaN values. - 'mean': Join time slices with matching timestamps by taking mean values. Ignore NaN values. - 'median': Join time slices with matching timestamps by taking median values. Ignore NaN values. :type time_dependent_op: str or Callable or None :param timeless_op: An operation to be used to join data for any timeless raster feature. Supported options are: - None (default): If arrays are the same, take one. Raise an error otherwise. - 'concatenate': Join arrays over the last (i.e. bands) dimension - 'min': Join arrays by taking minimum values. Ignore NaN values. - 'max': Join arrays by taking maximum values. Ignore NaN values. - 'mean': Join arrays by taking mean values. Ignore NaN values. - 'median': Join arrays by taking median values. Ignore NaN values. :type timeless_op: str or Callable or None :return: A dictionary with EOPatch features and values :rtype: Dict[(FeatureType, str), object] """ reduce_timestamps = time_dependent_op != 'concatenate' time_dependent_op = _parse_operation(time_dependent_op, is_timeless=False) timeless_op = _parse_operation(timeless_op, is_timeless=True) all_features = {feature for eopatch in eopatches for feature in FeatureParser(features)(eopatch)} eopatch_content = {} timestamps, sort_mask, split_mask = _merge_timestamps(eopatches, reduce_timestamps) eopatch_content[FeatureType.TIMESTAMP] = timestamps for feature in all_features: feature_type, feature_name = feature if feature_type.is_raster(): if feature_type.is_time_dependent(): eopatch_content[feature] = _merge_time_dependent_raster_feature( eopatches, feature, time_dependent_op, sort_mask, split_mask ) else: eopatch_content[feature] = _merge_timeless_raster_feature(eopatches, feature, timeless_op) if feature_type.is_vector(): eopatch_content[feature] = _merge_vector_feature(eopatches, feature) if feature_type is FeatureType.META_INFO: eopatch_content[feature] = _select_meta_info_feature(eopatches, feature_name) if feature_type is FeatureType.BBOX: eopatch_content[feature] = _get_common_bbox(eopatches) return eopatch_content
6328528c6b6fc3013db6aa17f0153354230efd4b
23,876
import argparse def parse_args(): """Parse cli arguments.""" parser = argparse.ArgumentParser(description="Java project package name changer.") parser.add_argument("--directory", default=".", type=str, help="Working directory.") parser.add_argument( "--current", required=True, type=str, help='Current package name. For example: "com.example".', ) parser.add_argument( "--target", required=True, type=str, help='Target package name. For example: "org.another".', ) parser.add_argument( "--protected_dirs", default=[], type=str, nargs="+", help="List of protected from any changes directories", ) parser.add_argument( "--protected_files", default=[], type=str, nargs="+", help="List of protected from any changes files", ) return parser.parse_args()
ceddd5028cd7aea1625e31f4c08bb22d112ea71a
23,877
import math def dispos(dra0, decd0, dra, decd): """ Source/credit: Skycat dispos computes distance and position angle solving a spherical triangle (no approximations) INPUT :coords in decimal degrees OUTPUT :dist in arcmin, returns phi in degrees (East of North) AUTHOR :a.p.martinez Parameters: dra0: center RA decd0: center DEC dra: point RA decd: point DEC Returns: distance in arcmin """ radian = 180.0/math.pi # coord transformed in radians alf = dra / radian alf0 = dra0 / radian del_ = decd / radian del0 = decd0 / radian sd0 = math.sin(del0) sd = math.sin(del_) cd0 = math.cos(del0) cd = math.cos(del_) cosda = math.cos(alf - alf0) cosd = sd0*sd + cd0*cd*cosda dist = math.acos(cosd) phi = 0.0 if dist > 0.0000004: sind = math.sin(dist) cospa = (sd*cd0 - cd*sd0*cosda)/sind #if cospa > 1.0: # cospa=1.0 if math.fabs(cospa) > 1.0: # 2005-06-02: fix from awicenec@eso.org cospa = cospa/math.fabs(cospa) sinpa = cd*math.sin(alf-alf0)/sind phi = math.acos(cospa)*radian if sinpa < 0.0: phi = 360.0-phi dist *= radian dist *= 60.0 if decd0 == 90.0: phi = 180.0 if decd0 == -90.0: phi = 0.0 return (phi, dist)
5c1b7c79a82f59764fd43ba0d89a763955b09a04
23,878
import socket def bind_port(sock, host=HOST): """Bind the socket to a free port and return the port number. Relies on ephemeral ports in order to ensure we are using an unbound port. This is important as many tests may be running simultaneously, especially in a buildbot environment. This method raises an exception if the sock.family is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR or SO_REUSEPORT set on it. Tests should *never* set these socket options for TCP/IP sockets. The only case for setting these options is testing multicasting via multiple UDP sockets. Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e. on Windows), it will be set on the socket. This will prevent anyone else from bind()'ing to our host/port for the duration of the test. """ if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM: if hasattr(socket, 'SO_REUSEADDR'): if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1: raise TestFailed("tests should never set the SO_REUSEADDR " \ "socket option on TCP/IP sockets!") if hasattr(socket, 'SO_REUSEPORT'): try: if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1: raise TestFailed("tests should never set the SO_REUSEPORT " \ "socket option on TCP/IP sockets!") except OSError: # Python's socket module was compiled using modern headers # thus defining SO_REUSEPORT but this process is running # under an older kernel that does not support SO_REUSEPORT. pass if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'): sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) sock.bind((host, 0)) port = sock.getsockname()[1] return port
a326581bea0f0873292028a5e39a710ea89fde4b
23,879
from typing import Union def node_to_html(node: Union[str, NodeElement, list]) -> str: """ Convert Nodes to HTML :param node: :return: """ if isinstance(node, str): # Text return escape(node) elif isinstance(node, list): # List of nodes result = '' for child_node in node: result += node_to_html(child_node) return result elif not isinstance(node, NodeElement): raise TypeError(f"Node must be instance of str or NodeElement, not {type(node)}") # NodeElement # Open result = "<" + node.tag if node.attrs: result += ' ' + ' '.join(f"{k}=\"{v}\"" for k, v in node.attrs.items()) if node.tag in VOID_ELEMENTS: # Close void element result += '/>' else: result += '>' for child_node in node.children: # Container body result += node_to_html(child_node) result += '</' + node.tag + '>' # Close tag return result
0366dffc181f27ac10cbae8d9eae65b6822371c6
23,880
def cuda_reshape(a, shape): """ Reshape a GPUArray. Parameters: a (gpu): GPUArray. shape (tuple): Dimension of new reshaped GPUArray. Returns: gpu: Reshaped GPUArray. Examples: >>> a = cuda_reshape(cuda_give([[1, 2], [3, 4]]), (4, 1)) array([[ 1.], [ 2.], [ 3.], [ 4.]]) >>> type(a) <class 'pycuda.gpuarray.GPUArray'> """ return a.reshape(shape)
966cae8aeb88aeaeada28a11c284920746771f00
23,881
def test_ep_basic_equivalence(stateful, state_tuple, limits): """ Test that EpisodeRoller is equivalent to a BasicRoller when run on a single environment. """ def env_fn(): return SimpleEnv(3, (4, 5), 'uint8') env = env_fn() model = SimpleModel(env.action_space.low.shape, stateful=stateful, state_tuple=state_tuple) basic_roller = BasicRoller(env, model, **limits) expected = basic_roller.rollouts() batched_env = batched_gym_env([env_fn], sync=True) ep_roller = EpisodeRoller(batched_env, model, **limits) actual = ep_roller.rollouts() _compare_rollout_batch(actual, expected)
4f9632bd088a0be806ca9c4f51e3c5bc55431513
23,882
def _find_crate_root_src(srcs, file_names=["lib.rs"]): """Finds the source file for the crate root.""" if len(srcs) == 1: return srcs[0] for src in srcs: if src.basename in file_names: return src fail("No %s source file found." % " or ".join(file_names), "srcs")
dd3488b49dc6c315c3d35ead75a83008e7bcd962
23,883
def decode_check(string): """Returns the base58 decoded value, verifying the checksum. :param string: The data to decode, as a string. """ number = b58decode(string) # Converting to bytes in order to verify the checksum payload = number.to_bytes(sizeof(number), 'big') if payload and sha256d(payload[:-4])[:4] == payload[-4:]: return payload[:-4] else: return None
716c0a92be68feb2a97cacfd57940e9e43fa07d9
23,884
import math def rotate(origin, point, angle): """ Rotate a point counterclockwise by a given angle around a given origin. The angle should be given in radians. """ ox, oy = origin px, py = point qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy) qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy) return qx, qy
9c542fd45b8b53bad61121429377298bd9d7fd08
23,885
def get_users_info(token, ids): """Return a response from vk api users.get :param token: access token :param ids: users ids :return: dict with users info """ args = { 'user_ids': ids, 'fields': 'city,bdate,connections,photo_200', 'access_token': token, 'v': settings.api_v } return send_vk_request('users.get', **args)[0]
02246232df0f3eebc530e05e1734e10f6be5ed9e
23,886
def create_consistencygroup(ctxt, host='test_host@fakedrv#fakepool', name='test_cg', description='this is a test cg', status='available', availability_zone='fake_az', volume_type_id=None, cgsnapshot_id=None, source_cgid=None, **kwargs): """Create a consistencygroup object in the DB.""" cg = objects.ConsistencyGroup(ctxt) cg.host = host cg.user_id = ctxt.user_id or 'fake_user_id' cg.project_id = ctxt.project_id or 'fake_project_id' cg.status = status cg.name = name cg.description = description cg.availability_zone = availability_zone if volume_type_id: cg.volume_type_id = volume_type_id cg.cgsnapshot_id = cgsnapshot_id cg.source_cgid = source_cgid for key in kwargs: setattr(cg, key, kwargs[key]) cg.create() return cg
a7548774690eccdc0c44231ae10dd345c9e84eb8
23,887
def ndigit(num): """Returns the number of digits in non-negative number num""" with nowarn(): return np.int32(np.floor(np.maximum(1,np.log10(num))))+1
ac83e0b31ce9213646e856aa47fa8bfba7d74a86
23,888
def data_store_folder_unzip_public(request, pk, pathname): """ Public version of data_store_folder_unzip, incorporating path variables :param request: :param pk: :param pathname: :return HttpResponse: """ return data_store_folder_unzip(request, res_id=pk, zip_with_rel_path=pathname)
7866ed0539a00e16cbe0cbb2ab6902faebfd4434
23,889
def privateDataOffsetLengthTest10(): """ Offset doesn't begin immediately after last table. >>> doctestFunction1(testPrivateDataOffsetAndLength, privateDataOffsetLengthTest10()) (None, 'ERROR') """ header = defaultTestData(header=True) header["privOffset"] = header["length"] + 4 header["privLength"] = 1 header["length"] += 2 return packTestHeader(header)
8744b97da80151a24c480ad29d2b1625f0c687f0
23,890
def meanncov(x, y=[], p=0, norm=True): """ Wrapper to multichannel case of new covariance *ncov*. Args: *x* : numpy.array multidimensional data (channels, data points, trials). *y* = [] : numpy.array multidimensional data. If not given the autocovariance of *x* will be calculated. *p* = 0: int window shift of input data. It can be negative as well. *norm* = True: bool normalization - if True the result is divided by length of *x*, otherwise it is not. Returns: *mcov* : np.array covariance matrix """ chn, N, trls = x.shape for tr in range(trls): if tr == 0: if not len(y): mcov = ncov(x[:, :, tr], p=p, norm=norm) else: mcov = ncov(x[:, :, tr], y[:, :, tr], p=p, norm=norm) continue if not len(y): mcov += ncov(x[:, :, tr], p=p, norm=norm) else: mcov += ncov(x[:, :, tr], y[:, :, tr], p=p, norm=norm) return mcov/trls
926bc64a0b7e15822f40f705ac3e770a57bb2e11
23,891
def nasnet_6a4032(**kwargs): """ NASNet-A 6@4032 (NASNet-A-Large) model from 'Learning Transferable Architectures for Scalable Image Recognition,' https://arxiv.org/abs/1707.07012. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ return get_nasnet( repeat=6, penultimate_filters=4032, init_block_channels=96, final_pool_size=11, extra_padding=False, skip_reduction_layer_input=True, in_size=(331, 331), model_name="nasnet_6a4032", **kwargs)
78f8862a69c12e8de85dc441e6b45e364d9b3385
23,892
def get_character_card(character_id, preston, access_token): """Get all the info for the character card. Args: character_id (int): ID of the character. preston (preston): Preston object to make scope-required ESI calls. access_token (str): Access token for the scope-required ESI calls. Returns: json: Character card information. """ # Get character. characterPayload = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/?datasource=tranquility".format(str(character_id))) if characterPayload.status_code != 200: flash('There was an error ({}) when trying to retrieve character with ID {}'.format(str(characterPayload.status_code), str(character_id)), 'danger') return None characterJSON = characterPayload.json() characterJSON['portrait'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/portrait/?datasource=tranquility".format(str(character_id))).json() # Get corporation. corporationPayload = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/?datasource=tranquility".format(str(characterJSON['corporation_id']))) if corporationPayload.status_code != 200: flash('There was an error ({}) when trying to retrieve corporation with ID {}'.format(str(corporationPayload.status_code), str(characterJSON['corporation_id'])), 'danger') return None characterJSON['corporation'] = corporationPayload.json() characterJSON['corporation']['logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/icons/?datasource=tranquility".format( str(characterJSON['corporation_id']))).json() # Get alliance. if 'alliance_id' in characterJSON: alliancePayload = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/?datasource=tranquility".format(str(characterJSON['alliance_id']))) if alliancePayload.status_code != 200: flash('There was an error ({}) when trying to retrieve alliance with ID {}'.format(str(alliancePayload.status_code), str(characterJSON['alliance_id'])), 'danger') return None characterJSON['alliance'] = alliancePayload.json() characterJSON['alliance']['logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/icons/?datasource=tranquility".format( str(characterJSON['alliance_id']))).json() # Get wallet. walletIsk = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-wallet.read_character_wallet.v1'], "https://esi.tech.ccp.is/latest/characters/{}/wallet/?datasource=tranquility&token={}".format( str(character_id), access_token)) walletIskJSON = None if walletIsk is not None: walletIskJSON = walletIsk.json() if walletIskJSON is not None and type(walletIskJSON) is not float: flash('There was an error ({}) when trying to retrieve wallet for character.'.format(str(walletIsk.status_code)), 'danger') return None else: characterJSON['wallet_isk'] = walletIskJSON # Get skillpoints characterSkills = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-skills.read_skills.v1'], "https://esi.tech.ccp.is/latest/characters/{}/skills/?datasource=tranquility&token={}".format( str(character_id), access_token)) characterSkillsJSON = None if characterSkills is not None: characterSkillsJSON = characterSkills.json() if characterSkillsJSON is not None and 'error' in characterSkillsJSON: flash('There was an error ({}) when trying to retrieve skills.'.format(str(characterSkills.status_code)), 'danger') return None else: characterJSON['skills'] = characterSkillsJSON return characterJSON
23f201833537a8a596e42acc41061a583ebead38
23,893
def expand_ALL_constant(model, fieldnames): """Replaces the constant ``__all__`` with all concrete fields of the model""" if "__all__" in fieldnames: concrete_fields = [] for f in model._meta.get_fields(): if f.concrete: if f.one_to_one or f.many_to_many: concrete_fields.append(f.name) else: concrete_fields.append(f.name) i = fieldnames.index("__all__") return fieldnames[:i] + concrete_fields + fieldnames[i + 1 :] return fieldnames
8c44c9b16fd93ca1c9a4efddd1ea85b44d34dba3
23,894
def server(user, password): """A shortcut to use MailServer. SMTP: server.send_mail([recipient,], mail) POP3: server.get_mail(which) server.get_mails(subject, sender, after, before) server.get_latest() server.get_info() server.stat() Parse mail: server.show(mail) server.get_attachment(mail) """ return MailServer(user, password)
acd2e2b69b6fe22ac8ae40cbe1b9d51a750e4e46
23,895
import requests import sys def get_rackspace_token(username, apikey): """Get Rackspace Identity token. Login to Rackspace with cloud account and api key from environment vars. Returns dict of the token and tenant id. """ auth_params = { "auth": { "RAX-KSKEY:apiKeyCredentials": { "username": username, "apiKey": apikey, } } } response = requests.post(RS_IDENTITY_URL, json=auth_params) if not response.ok: sys.exit(RS_AUTH_ERROR.format(response.status_code, response.text)) identity = response.json() return (identity['access']['token']['id'], identity['access']['token']['tenant']['id'])
35809def34199245e603ea9766dd03d9f7ff6578
23,896
def calculate_hessian(model, data, step_size): """ Computes the mixed derivative using finite differences mathod :param model: The imported model module :param data: The sampled data in structured form :param step_size: The dx time step taken between each :returns: mixed derivative """ hessian = pd.DataFrame(0, index = np.arange(data.shape[0]), columns=pd.MultiIndex.from_product([model.output_names, model.perturbation_feature_pairs + model.feature_names], names=['model.output_names','model.feature_pairs'])) for output_name in model.output_names: hessian_calculation_helpers = create_hessian_calculation_columns(model, output_name) mixed_derivative = (data.loc[:, hessian_calculation_helpers[0]].values - data.loc[:, hessian_calculation_helpers[1]].values - data.loc[:, hessian_calculation_helpers[2]].values + data.loc[:, hessian_calculation_helpers[3]].values) / (step_size * step_size) mixed_derivative *= np.sign(data.loc[:, hessian_calculation_helpers[1]].values + data.loc[:, hessian_calculation_helpers[2]].values - 2 * data.loc[:, hessian_calculation_helpers[0]].values) hessian.loc[:, zip([output_name] * len(model.perturbation_feature_pairs), model.perturbation_feature_pairs)] = mixed_derivative hessian.loc[:, zip([output_name] * len(model.feature_names), model.feature_names)] = np.array([(data.loc[:, (output_name,f)] - data.loc[:, (output_name,'core')]) / (step_size) for f in model.feature_names]).T return hessian
44ebed355e5db7991080e55f786850fb6b0e8908
23,897
def get_quarterly_income_statements(symbol): """ Returns quarterly IS for the past 5 yrs. """ df = query_av(function="INCOME_STATEMENT", symbol=symbol, datatype='quarterlyReports') return df
2b9094da22782d02ff5c6e4f32930c0816d213a9
23,898
def BRepBlend_HCurve2dTool_IsPeriodic(*args): """ :param C: :type C: Handle_Adaptor2d_HCurve2d & :rtype: bool """ return _BRepBlend.BRepBlend_HCurve2dTool_IsPeriodic(*args)
707895ece8aa032bcd6b747c5be0313102758957
23,899