content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import tqdm def get_good_start(system, numdistricts): """ Basically, instead of starting with a really bad initial solution for simulated annealing sometimes we can rig it to start with a decent one... """ print('Acquiring a good initial solution') solution = Solution(system, numdistricts) solution.generate_random_solution() # start with random solution for i in tqdm(range(100)): new_solution = Solution(system, numdistricts) new_solution.generate_random_solution() if new_solution.value > solution.value: solution = new_solution print('Starting with Solution[{}]'.format(solution.value)) return solution
2c557c0505d0c442890c96ebb6186e866b5a7726
3,638,700
def search_up(word_list, matrix): """Search words from word_list in matrix, up direction :param word_list - list of strings :param matrix - list of lists :return list of lists""" return straight_search(word_list, matrix, True, False)
e806af0505a1cb78bf1bc181a89ccee81407c0b7
3,638,701
def int_validator(inp, ifallowed): """ Test whether only (positive) integers are being keyed into a widget. Call signature: %S %P """ if len(ifallowed) > 10: return False try: return int(inp) >= 0 except ValueError: return False return True
ee433a6365a0aad58cab0cd59fa05e132b669053
3,638,702
import logging import requests def scrape_page(url): """ scrape page by url and return its html :param url: page url :return: html of page """ logging.info('scraping %s...', url) try: response = requests.get(url) if response.status_code == 200: return response.text logging.error('get invalid status code %s while scraping %s', response.status_code, url) except requests.RequestException: logging.error('error occurred while scraping %s', url, exc_info=True)
a09eb79ce6abe25e4eb740dcfeb7a4debfca0b88
3,638,703
def getProductionUrl(code,d0): """Get the url for outage data from d0 to d1.""" url = getUrl('png',code,2018,opts=[[None]]) url = url.replace('__datehere__',eomf.m2s(d0),) return url
d7e13671494c719ecc41058d481bd8d4cef5a3ff
3,638,704
def has_gaps_in_region(read, region): """ Returns True if the given pysam read spans the given pybedtools.Interval, ``region``. """ # If the given read has gaps in its alignment to the reference inside the # given interval (more than one block inside the SV event itself), there are # gaps inside the SV. tree = intervaltree.IntervalTree() for block in read.get_blocks(): tree[block[0]:block[1]] = block return len(tree[region.start:region.end]) > 1
df1e272044d47bb610a59e80f21ad0fcca484231
3,638,705
def zha_device_joined(opp, setup_zha): """Return a newly joined ZHA device.""" async def _zha_device(zigpy_dev): await setup_zha() zha_gateway = get_zha_gateway(opp) await zha_gateway.async_device_initialized(zigpy_dev) await opp.async_block_till_done() return zha_gateway.get_device(zigpy_dev.ieee) return _zha_device
e8d8ac320414762416e2a105583001ac452df6b6
3,638,706
def get_all_list_data(request_context, function, *args, **kwargs): """ Make a function request with args and kwargs and iterate over the "next" responses until exhausted. Return initial response json data or all json data as a single list. Responses that have a series of next responses (as retrieved by get_next generator) are expected to have data returned as a list. If an exception is raised during the initial function call or in the process of paging over results, that exception will be bubbled back to the caller and any intermediary results will be lost. Worst case complexity O(n). :param RequestContext request_context: The context required to make an API call :param function function: The API function to call :return: A list of all json data retrieved while iterating over response links, or the initial json function response if there are no paged results :rtype: list of json data or json """ response = function(request_context, *args, **kwargs) data = response.json() for next_response in get_next(request_context, response): data.extend(next_response.json()) return data
dd9aea10691a553c4b36009d733c11d39ada970e
3,638,707
def is_outlier(x, check_finite=False, confidence=3): """Boolean mask with outliers :param x: vector :param check_finite: :param confidence: confidence level: 1, 2, 3 or 4, which correspond to 90%, 95%, 99% and 99.9% two-tailed confidence respectively (normal distribution). Default: 3 (99%) :type x: numpy.ndarray :type check_finite: bool :type confidence: int :return: vector with condition "is `x` outlier?" """ return np.logical_not( is_not_outlier(x, check_finite=check_finite, confidence=confidence))
6fb1b9f157c3bf720a615892524d90df2f717096
3,638,708
def stateless_shuffle(value, seed): """Randomly shuffles a tensor, statelessly.""" flat_value = tf.reshape(value, [-1]) indices = tf.argsort( tf.random.stateless_uniform(tf.shape(flat_value), seed=seed)) flat_shuffle = tf.gather(flat_value, indices) return tf.reshape(flat_shuffle, tf.shape(value))
4fa1ab8538ab5ab7f356c68d75c9fa61395a6e75
3,638,709
def is_one_line_function_declaration_line(line: str) -> bool: # pylint:disable=invalid-name """ Check if line contains function declaration. """ return 'def ' in line and '(' in line and '):' in line or ') ->' in line
e402cbbedc587ab0d572dfe6c074aadef6980658
3,638,710
def check_if_ended(id): """ Check if the course has already ended. :param id: Id of the course that needs to be checked. :type id: int :return: If a course has ended :rtype: bool """ course = moodle_api.get_course_by_id_field(id) end_date = course['courses'][0]['enddate'] if(dt.datetime.fromtimestamp(end_date) < dt.datetime.today()): return True else: return False
0e67c58d2b107597f068a34c08289ce43f4d1beb
3,638,711
def get_all_applications(user, timeslot): """ Get a users applications for this timeslot :param user: user to get applications for :param timeslot: timeslot to get the applications. :return: """ return user.applications.filter(Proposal__TimeSlot=timeslot)
40aec747174fa4a3ce81fe2a3a5eee599c81643a
3,638,712
import requests import webbrowser def get_console_url(args): """ Get a console login URL """ # Get credentials, maybe assume the role session_creds = get_credentials(args) # build the token request and fetch the sign-in token url = request_signin_token(args, session_creds) r = requests.get(url,timeout=200.0) if r.status_code != 200: vprint('Error: Getting SigninToken', r.url) vprint(r.content) raise Exception(f'Bad response requesting signin token {r.reason}') sin_token = r.json()['SigninToken'] # build the console signin url sin_url = request_console_login(sin_token) if args.output: return sin_url else: vprint(f'Opening webbrowser for {sin_url}') webbrowser.open(sin_url) return None
0c12730f4e7f0367832f8cbe1e7b549b2582f2c6
3,638,713
def input_layer_from_space(space): """ create tensorlayer input layers from env.space input :param space: env.space :return: tensorlayer input layer """ if isinstance(space, Box): return input_layer(space.shape) elif isinstance(space, Discrete): return tl.layers.Input(dtype=tf.int32, shape=(None, )) raise NotImplementedError
491c6d03d717bd33aa26264cd0296799f7fd242b
3,638,714
import os def searchable_paths(env_vars=PATH_VARS): """ Return a list of directories where to search "in the PATH" in the provided ``env_vars`` list of PATH-like environment variables. """ dirs = [] for env_var in env_vars: value = os.environ.get(env_var, '') or '' dirs.extend(value.split(os.pathsep)) dirs = [os.path.realpath(d.strip()) for d in dirs if d.strip()] return tuple(d for d in dirs if os.path.isdir(d))
9b79580fd82b16b076dbfd0529efeed232cbd5cd
3,638,715
import os def _init_app(): """ Intializes the dash app.""" this_dir = os.path.dirname(os.path.abspath(__file__)) css_file = os.path.join(this_dir, "stylesheet.css") app = dash.Dash( __name__, external_stylesheets=[css_file], suppress_callback_exceptions=True, ) return app
2c351ede7153dc92c546cdbd3b980f073fc6abbb
3,638,716
def get_application_registry(): """Return the application registry. If :func:`set_application_registry` was never invoked, return a registry built using :file:`defaults_en.txt` embedded in the pint package. :param registry: a UnitRegistry instance. """ return _APP_REGISTRY
64b0eeb19933cc674d4e61c27432f02d12340d6d
3,638,717
import importlib def get_dataset(cfg, designation): """ Return a Dataset for the given designation ('train', 'valid', 'test'). """ dataset = importlib.import_module('.' + cfg['dataset'], __package__) return dataset.create(cfg, designation)
3f872d6407110cf735968ad6d4939b40fec9167d
3,638,718
import uuid def invite(email, inviter, user=None, sendfn=send_invite, resend=True, **kwargs): """ Invite a given email address. Returns a ``(User, sent)`` tuple similar to the Django :meth:`django.db.models.Manager.get_or_create` method. If a user is passed in, reinvite the user. For projects that support multiple users with the same email address, it is necessary to pass in the user to avoid throwing a MultipleObjectsReturned error. If a user with ``email`` address does not exist: * Creates a user object * Set ``user.email = email`` * Set ``user.is_active = False`` * Set a random password * Send the invitation email * Return ``(user, True)`` If a user with ``email`` address exists and ``user.is_active == False``: * Re-send the invitation * Return ``(user, True)`` If a user with ``email`` address exists: * Don't send the invitation * Return ``(user, False)`` If the email address is blocked: * Don't send the invitation * Return ``(None, False)`` To customize sending, pass in a new ``sendfn`` function as documented by :attr:`inviter2.utils.send_invite`: :: sendfn = lambda invitee, inviter, **kwargs: 1 invite("foo@bar.com", request.user, sendfn = sendfn) :param email: The email address :param inviter: The user inviting the email address :param pk: The pk of an existing user to be reinvited. :param sendfn: An email sending function. Defaults to :attr:`inviter2.utils.send_invite` :param resend: Resend email to users that are not registered yet """ if OptOut.objects.is_blocked(email): return None, False try: if not user: user = User.objects.get(email=email) if user.is_active: return user, False if not resend: return user, False except User.DoesNotExist: username_field = getattr(User, 'USERNAME_FIELD', 'username') if username_field == 'username': user = create_inactive_user(email=email, username=uuid()) else: user = create_inactive_user(email=email) url_parts = int_to_base36(user.id), token_generator.make_token(user) url = reverse('{}:register'.format(NAMESPACE), args=url_parts) opt_out_url = reverse('{}:opt-out'.format(NAMESPACE), args=url_parts) kwargs.update(opt_out_url=opt_out_url) sendfn(user, inviter, url=url, **kwargs) return user, True
46bc4d45b42d0cc2dbd80b2928f44e4926d24b77
3,638,719
def RestrictDictValues( aDict, restrictSet ): """Return a dict which has the mappings from the original dict only for values in the given set""" return dict( item for item in aDict.items() if item[1] in restrictSet )
4333c40a38ad3bce326f94c27b4ffd7dc24ae19c
3,638,720
import os def pump_impact(request): """ Ajax controller that prepares and submits the new pump impact jobs and workflow. """ session = None try: session_id = request.session.session_key resource_id = request.POST.get('resource_id') pumps = request.POST.get('data') tool = request.POST.get('tool') cancel_status = request.POST.get('cancel', '') stream_package = request.POST.get('package', '') if stream_package == "": stream_package = "_" layer = parseIntSet(request.POST.get('layer', '0')) stress_period = parseIntSet(request.POST.get('stress_period_output', '0')) data_input = request.POST.get('input') if not resource_id: return JsonResponse({'success': False, 'message': 'No resource ID given. Check URL for ID.'}) Session = app.get_persistent_store_database('primary_db', as_sessionmaker=True) session = Session() resource = session.query(ModflowModelResource).get(resource_id) if cancel_status: max_wait_time = 20 job_id = get_job_id(resource_id, session, max_wait_time) job_manager = app.get_job_manager() running_job = job_manager.get_job(job_id) # Stop running job running_job.stop() # Clear job_id resource.set_attribute('job_id', '') session.commit() return JsonResponse({'success': True, 'message': 'Well Influence Tool has been cancelled.'}) else: xll = resource.get_attribute('xll') yll = resource.get_attribute('yll') rotation = resource.get_attribute('rotation') model_units = resource.get_attribute('model_units') model_version = resource.get_attribute('model_version') srid = resource.get_attribute('srid') database_id = resource.get_attribute('database_id') # Writing geojson file for pumps to be passed to condor worker model_db = ModelFileDatabase(app=app, database_id=database_id) pump_json_file = os.path.join(model_db.directory, "well_impact.json") geojson = open(pump_json_file, "w") geojson.write(pumps + "\n") geojson.close() # setting up spatial manager to get model file list, and modflow executables gs_engine = app.get_spatial_dataset_service(app.GEOSERVER_NAME, as_engine=True) spatial_manager = ModflowSpatialManager(geoserver_engine=gs_engine, model_file_db_connection=model_db.model_db_connection, modflow_version=model_version) modflow_exe = os.path.join(spatial_manager.EXE_PATH, model_version) model_file_list = spatial_manager.model_file_db.list() # Loop through model file database for needed files wel_file = '_' hds_file = '_' # cbb_file = '_' nam_file = '' for file in model_file_list: # TODO: figure out .mfn problems if file.split(".")[-1] == 'nam': nam_file = file if file.split(".")[-1] == 'hds': hds_file = file if file.split(".")[-1] == 'wel': wel_file = file # if file.split(".")[-1] == 'cbb': # cbb_file = file user_workspace = app.get_user_workspace(request.user) user_workspace_path = user_workspace.path # Django validation. After Django2.0, is_authenticated is a property try: check_user = request.user.is_authenticated() except: # noqa: E722 check_user = request.user.is_authenticated # Used to get a valid Django anonymous user if not signed in if check_user: user = request.user else: user = get_anonymous_user() try: if tool == 'drawdown': job = DrawdownWorkflow( user=user, workspace=user_workspace_path, session_id=session_id, xll=xll, yll=yll, rotation=rotation, db_dir=model_db.directory, model_units=model_units, model_version=model_version, modflow_exe=modflow_exe, nam_file=nam_file, hds_file=hds_file, wel_file=wel_file, srid=srid, resource_id=resource.id, database_id=database_id, app_package=app.package, contour_levels=data_input, export_layer_string=layer, export_sp_string=stress_period, ) elif tool == 'stream_depletion': job = StreamDepletionWorkflow( user=user, workspace=user_workspace_path, session_id=session_id, xll=xll, yll=yll, rotation=rotation, db_dir=model_db.directory, model_units=model_units, model_version=model_version, modflow_exe=modflow_exe, nam_file=nam_file, wel_file=wel_file, srid=srid, resource_id=resource.id, database_id=database_id, app_package=app.package, stream_package=stream_package, std_minimum_change=data_input, export_layer_string=layer, export_sp_string=stress_period, ) else: data_input = data_input.split(";") contour_levels = data_input[0] std_minimum_change = data_input[1] job = RunAllToolsWorkflow( user=user, workspace=user_workspace_path, session_id=session_id, xll=xll, yll=yll, rotation=rotation, db_dir=model_db.directory, model_units=model_units, model_version=model_version, modflow_exe=modflow_exe, nam_file=nam_file, wel_file=wel_file, srid=srid, resource_id=resource.id, database_id=database_id, app_package=app.package, stream_package=stream_package, std_minimum_change=std_minimum_change, export_layer_string=layer, export_sp_string=stress_period, contour_levels=contour_levels, ) job.run_job() workflow_id = job.workflow.id remote_id = job.workflow.remote_id # Save job_id in resource so we can cancel it if necessary. resource.set_attribute('job_id', workflow_id) session.commit() return JsonResponse({'success': True, 'resource_id': resource_id, 'workflow_id': workflow_id, 'remote_id': remote_id}) except Exception as e: log.exception(str(e)) return JsonResponse({'success': False, 'message': 'An unexpected error has occurred.' ' Please contact Aquaveo and try again later.' }) finally: session and session.close()
b18d057cd3475de3d1d3c1375d7adf66ca4fc4b6
3,638,721
def classify_images(images_dir, petlabel_dic, model): """ Creates classifier labels with classifier function, compares labels, and creates a dictionary containing both labels and comparison of them to be returned. PLEASE NOTE: This function uses the classifier() function defined in classifier.py within this function. The proper use of this function is in test_classifier.py Please refer to this program prior to using the classifier() function to classify images in this function. Parameters: images_dir - The (full) path to the folder of images that are to be classified by pretrained CNN models (string) petlabel_dic - Dictionary that contains the pet image(true) labels that classify what's in the image, where its' key is the pet image filename & it's value is pet image label where label is lowercase with space between each word in label model - pretrained CNN whose architecture is indicated by this parameter, values must be: resnet alexnet vgg (string) Returns: results_dic - Dictionary with key as image filename and value as a List (index)idx 0 = pet image label (string) idx 1 = classifier label (string) idx 2 = 1/0 (int) where 1 = match between pet image and classifer labels and 0 = no match between labels """ results = {} # loop through each image in the IMAGE_DIR for image in listdir(images_dir): # get the pet label determined by the image filename (see get_pet_labels function) pet_label = petlabel_dic[image] # create the full path to the image file image_path = image_dir + image # classify the image using the model, leveraging the prebuilt function classifer classified_label = classifier(image_path, model) # call split on CLASSIFIED_LABEL as per the documentation in test_classifier.py it's possible to have multiple # words to describe one label. Thus get them into a list and then check if one of them matches PET_LABEL classified_labels = classified_label.split(",") match = 0 for label in classified_labels: if pet_label.lower() == label.lower(): match = 1 break results[image] = [pet_label, classified_label, match] return results
7d6679de95da2f7a526ae18a27d9602218f05153
3,638,722
def create_redis_fixture(scope="function"): """Produce a Redis fixture. Any number of fixture functions can be created. Under the hood they will all share the same database server. Args: scope (str): The scope of the fixture can be specified by the user, defaults to "function". Raises: KeyError: If any additional arguments are provided to the function than what is necessary. """ @pytest.fixture(scope=scope) def _(_redis_container, pmr_redis_config): db = redis.Redis(host=pmr_redis_config.host, port=pmr_redis_config.port) db.flushall() assign_fixture_credentials( db, drivername="redis", host=pmr_redis_config.host, port=pmr_redis_config.port, database=None, username=None, password=None, ) return db return _
0e2e79c34feeb805c8300145f3a04314069b873f
3,638,723
def get_heroes(**kwargs): """ Get a list of hero identifiers """ return make_request("GetHeroes", base="http://api.steampowered.com/IEconDOTA2_570/", **kwargs)
b512377952c0c1415eb45cc05918e7d152516f83
3,638,724
from typing import Union from typing import Optional def gt_strategy( pandera_dtype: Union[numpy_engine.DataType, pandas_engine.DataType], strategy: Optional[SearchStrategy] = None, *, min_value: Union[int, float], ) -> SearchStrategy: """Strategy to generate values greater than a minimum value. :param pandera_dtype: :class:`pandera.dtypes.DataType` instance. :param strategy: an optional hypothesis strategy. If specified, the pandas dtype strategy will be chained onto this strategy. :param min_value: generate values larger than this. :returns: ``hypothesis`` strategy """ if strategy is None: strategy = pandas_dtype_strategy( pandera_dtype, min_value=min_value, exclude_min=True if is_float(pandera_dtype) else None, ) return strategy.filter(lambda x: x > min_value)
751ee69c3cf396d0d2ca043bad17c6ed80b8d46d
3,638,725
def get_smoker_status(observation): """Does `observation` represent a suvery response indicating that the patient is or was a smoker.""" try: for coding in observation['valueCodeableConcept']['coding']: if ('system' in coding and 'code' in coding and coding['system'] == utils.SNOMED_SYSTEM and (coding['code'] == '8517006' or coding['code'] == '449868002' ) # Former smoker or Every day smoker ): return True return False except KeyError: return False
e63f4ffc09af3af19fd493c4fbc2824ffa136a64
3,638,726
def model_input_data_api(): """Returns records of the data used for the model.""" # Parse inputs # Hours query parameter must be between 1 and API_MAX_HOURS. hours = request.args.get('hours', default=24, type=int) hours = min(hours, current_app.config['API_MAX_HOURS']) hours = max(hours, 1) df = execute_sql('''SELECT * FROM processed_data ORDER BY time''') model_input_data = df.tail(n=hours).to_dict(orient='records') return jsonify(model_input_data=model_input_data)
7f5d014dda1f4e778cf8df5aac453c8a19748465
3,638,727
def GetIAP(args, messages, existing_iap_settings=None): """Returns IAP settings from arguments.""" if 'enabled' in args.iap and 'disabled' in args.iap: raise exceptions.InvalidArgumentException( '--iap', 'Must specify only one of [enabled] or [disabled]') iap_settings = messages.BackendServiceIAP() if 'enabled' in args.iap: iap_settings.enabled = True elif 'disabled' in args.iap: iap_settings.enabled = False elif existing_iap_settings is not None: iap_settings.enabled = existing_iap_settings.enabled if iap_settings.enabled: # If either oauth2-client-id or oauth2-client-secret is specified, # then the other should also be specified. if 'oauth2-client-id' in args.iap or 'oauth2-client-secret' in args.iap: iap_settings.oauth2ClientId = args.iap.get('oauth2-client-id') iap_settings.oauth2ClientSecret = args.iap.get('oauth2-client-secret') if not iap_settings.oauth2ClientId or not iap_settings.oauth2ClientSecret: raise exceptions.InvalidArgumentException( '--iap', 'Both [oauth2-client-id] and [oauth2-client-secret] must be ' 'specified together') return iap_settings
bf17a15ebcaab42a930928e3a949c4357cdf718f
3,638,728
def get_default_instance(): """Return the default VLC.Instance. """ global _default_instance if _default_instance is None: _default_instance = Instance() return _default_instance
f45a6eca003bc1b52b1c0bca1d15643898189899
3,638,729
def dirty_multi_node_expand(node, precision, mem_map=None, fma=True): """ Dirty expand node into Hi and Lo part, storing already processed temporary values in mem_map """ mem_map = mem_map or {} if node in mem_map: return mem_map[node] elif isinstance(node, Constant): value = node.get_value() value_hi = sollya.round(value, precision.sollya_object, sollya.RN) value_lo = sollya.round(value - value_hi, precision.sollya_object, sollya.RN) ch = Constant(value_hi, tag=node.get_tag() + "hi", precision=precision) cl = Constant(value_lo, tag=node.get_tag() + "lo", precision=precision ) if value_lo != 0 else None if cl is None: Log.report(Log.Info, "simplified constant") result = ch, cl mem_map[node] = result return result else: # Case of Addition or Multiplication nodes: # 1. retrieve inputs # 2. dirty convert inputs recursively # 3. forward to the right metamacro assert isinstance(node, Addition) or isinstance(node, Multiplication) lhs = node.get_input(0) rhs = node.get_input(1) op1h, op1l = dirty_multi_node_expand(lhs, precision, mem_map, fma) op2h, op2l = dirty_multi_node_expand(rhs, precision, mem_map, fma) if isinstance(node, Addition): result = Add222(op1h, op1l, op2h, op2l) \ if op1l is not None and op2l is not None \ else Add212(op1h, op2h, op2l) \ if op1l is None and op2l is not None \ else Add212(op2h, op1h, op1l) \ if op2l is None and op1l is not None \ else Add211(op1h, op2h) mem_map[node] = result return result elif isinstance(node, Multiplication): result = Mul222(op1h, op1l, op2h, op2l, fma=fma) \ if op1l is not None and op2l is not None \ else Mul212(op1h, op2h, op2l, fma=fma) \ if op1l is None and op2l is not None \ else Mul212(op2h, op1h, op1l, fma=fma) \ if op2l is None and op1l is not None \ else Mul211(op1h, op2h, fma=fma) mem_map[node] = result return result
f36b783041f7f6d2b7577db9160d702aa81461bd
3,638,730
import json def create_princess_df(spark_session) -> DataFrame: """Return a valid DF of disney princesses.""" princesses = [ { "name": "Cinderella", "age": 16, "happy": False, "items": {"weakness": "thorns", "created": "2020-10-14"}, }, { "name": "Snow white", "age": 17, "happy": True, "items": {"weakness": "apple", "created": "2020-10-14"}, }, { "name": "Belle", "age": 18, "happy": False, "items": {"weakness": "roses", "created": "2020-10-14"}, }, { "name": "Jasmine", "age": 19, "happy": True, "items": {"weakness": "jafar", "created": "2020-10-14"}, }, ] return ( spark_session.read.option("multiline", "true") .json(spark_session.sparkContext.parallelize([json.dumps(princesses)])) .select("name", "age", "happy", "items") )
cded63b3882adbb48f7e39f37169f46b55a99ae3
3,638,731
from pathlib import Path def get_sheet_names(file_path): """ This function returns the first sheet name of the excel file :param file_path: :return: """ file_extension = Path(file_path).suffix is_csv = True if file_extension.lower() == ".csv" else False if is_csv: return [Path(file_path).name] xl = pd.ExcelFile(file_path) return xl.sheet_names
826ffb19f21ef117124d747c812a773f1422a10b
3,638,732
def count_votes(votation_id): """ Count number of different vote_key. Its pourpose is to compare with voters. """ n = db.session.query(Vote.vote_key).filter(Vote.votation_id == votation_id).distinct().count() return n
81bd75c7185f9e46e0f2c2d2ed23d5717de98cbe
3,638,733
def get_eng_cv_rate(low_prob): """Returns 'low' and 'high' probabilites for student to english I conversion Simulated data for class enrollment. Args: low_prob(float): low end of probability Returns: dict """ np.random.seed(123) global eng_cv_rate_dict eng_cv_rate_dict = {'low':low_prob, 'high':np.random.uniform(low = low_prob, high=1.25*low_prob)} return eng_cv_rate_dict
841653ab1d5938d1fd8e7a170fc8d6b4f0326248
3,638,734
from typing import Type def dev_unify_nest(args: Type[MultiDev], kwargs: Type[MultiDev], dev, mode, axis=0, max_depth=1): """ Unify the input nested arguments, which consist of sub-arrays spread across arbitrary devices, to unified arrays on the single target device. :param args: The nested positional arguments to unify. :type args: MultiDev :param kwargs: The nested keyword arguments to unify. :type kwargs: MultiDev :param dev: The device to unify the nested arguments to. :type dev: Device :param mode: The mode by which to unify, must be one of [ concat | mean | sum ] :type mode: str :param axis: The axis along which to concattenate the sub-arrays. Default is 0. :type axis: int, optional :param max_depth: The maximum nested depth to reach. Default is 1. Increase this if the nest is deeper. :type max_depth: int, optional :return: nested arguments unified to the target device """ args = args._data if isinstance(args, MultiDevIter) else args kwargs = kwargs._data if isinstance(kwargs, MultiDevIter) else kwargs args_uni = ivy.nested_map(args, lambda x: dev_unify(x, dev, mode, axis), max_depth=max_depth) kwargs_uni = ivy.nested_map(kwargs, lambda x: dev_unify(x, dev, mode, axis), max_depth=max_depth) return args_uni, kwargs_uni
2c78c6fb3eb365c7742a134c33fa8f3bf2622bb2
3,638,735
def make_preprocessor(transforms=None, device_put=False): """ """ # verify input if transforms is not None: if not isinstance(transforms, (list, tuple)): transforms = (transforms) for fn in transforms: if not callable(fn): raise ValueError("Each element of custom_fns must be callabe") def preprocess(obs): # apply custom transforms first if transforms: for fn in transforms: obs = fn(obs) # convert obs to array if isinstance(obs, (int, float)): return jnp.array(obs).reshape((1,)) if not obs.shape: return obs.reshape((1,)) # put array to device if flag is set if device_put: obs = jax.device_put(obs) return obs return preprocess
09c1b9dc027457a26f7d4b73e3a5572c206fc343
3,638,736
def timeRangeContainsRange(event1Start, event2Start, event1End, event2End): """ Returns true if one set of times starts and ends within another set of times @param event1Start: datetime @param event2Start: datetime @param event1End: datetime @param event2End: datetime @return: boolean """ if event2Start <= event1Start and event2End >= event1End: return True elif event1Start <= event2Start and event1End >= event2End: return True else: return False
05d25969b1f97f2f7015c9ce9bafbffcb931cb9b
3,638,737
def compute_confidence_intervals(x: np.array, z: float = 1.96) -> float: """ Function to compute the confidence interval of the mean of a sample. Hazra, Avijit. "Using the confidence interval confidently." Journal of thoracic disease 9.10 (2017): 4125. Formula: CI = x̅ ± z × (std/√n) where CI: Confidence Interval x̅: Sample Mean z: Z Statistic for desired confidence interval std: Sample Standard Deviation n: Sample Size """ return z * (x.std()/len(x)**.5)
cd394ec2f4343ac82b16cc18a4ba280d2f57d1ad
3,638,738
import socket def SendCommands(cmds, key): """Send commands to the running instance of Editra @param cmds: List of command strings @param key: Server session authentication key @return: bool """ if not len(cmds): return # Add the authentication key cmds.insert(0, key) # Append the message end clause cmds.append(MSGEND) try: # Setup the client socket client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', EDPORT)) # Server expects commands delimited by ; client.send(u";".join(cmds)) client.shutdown(socket.SHUT_RDWR) client.close() except: return False else: return True
3db90749c3a88fd9341f6fbb9c4088d000d2d5ef
3,638,739
import urllib def esv(value, args=''): """ Use ESV API to get a Bible Passage http://www.esvapi.org/v2/rest/passageQuery?key=IP&passage=Gen+1:5-10&output-format=plain-text Looking for [[bible PASSAGE]] Usage:: {{ text|esv}} {{ text|esv:"option1:value,option2:value"}} """ if BIBLE_RE.search(value) is None: return value esv_dict = ESV_DICT.copy() esv_args = args.split(',') if len(esv_args) > 0: for arg in esv_args: try: key, val = arg.split(':') if esv_dict.has_key(key): esv_dict[key] = val except ValueError: pass global ESV_QUERY_URL ESV_QUERY_URL = ESV_API_URL+'&'.join([k+'='+urllib.quote(str(v)) for (k,v) in esv_dict.items()]) return BIBLE_RE.sub(_get_esv_txt, value)
5947071d3363f8308ddeb98a6f862488b942f89b
3,638,740
import torch def box_nms(bboxes, scores, labels, threshold=0.5, mode='union'): """Non maximum suppression. source: https://github.com/kuangliu/pytorch-retinanet Args: bboxes: (tensor) bounding boxes, sized [N,4]. scores: (tensor) bbox scores, sized [N,]. threshold: (float) overlap threshold. mode: (str) 'union' or 'min'. Returns: keep: (tensor) selected indices. Reference: https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py """ x1 = bboxes[:, 0] y1 = bboxes[:, 1] x2 = bboxes[:, 2] y2 = bboxes[:, 3] areas = (x2 - x1 + 1) * (y2 - y1 + 1) _, order = scores.sort(0, descending=True) keep = [] while order.numel() > 0: i = order[0] keep.append(i) if order.numel() == 1: break label = labels[i] xx1 = x1[order[1:]].clamp(min=x1[i]) yy1 = y1[order[1:]].clamp(min=y1[i]) xx2 = x2[order[1:]].clamp(max=x2[i]) yy2 = y2[order[1:]].clamp(max=y2[i]) w = (xx2 - xx1 + 1).clamp(min=0) h = (yy2 - yy1 + 1).clamp(min=0) inter = w * h if mode == 'union': ovr = inter / (areas[i] + areas[order[1:]] - inter) elif mode == 'min': ovr = inter / areas[order[1:]].clamp(max=areas[i]) else: raise TypeError('Unknown nms mode: %s.' % mode) ids = ((ovr <= threshold) | (labels[order[1:]] != label)).nonzero().squeeze() if ids.numel() == 0: break order = order[ids + 1] return torch.LongTensor(keep)
355262b9af52d0455089a05d90bfa7e21d5d52de
3,638,741
def abs_length_diff(trg, pred): """Computes absolute length difference between a target sequence and a predicted sequence Args: - trg (str): reference - pred (str): generated output Returns: - absolute length difference (int) """ trg_length = len(trg.split(' ')) pred_length = len(pred.split(' ')) return abs(trg_length - pred_length)
b5baf53609b65aa1ef3b1f142e965fa0606b3136
3,638,742
def CDLEVENINGSTAR(data: xr.DataArray, penetration: float = 0.3) -> xr.DataArray: """ Evening Star (Pattern Recognition) Inputs: data:['open', 'high', 'low', 'close'] Outputs: double series (values are -1, 0 or 1) """ return multiple_series_call(talib.CDLEVENINGSTAR, data, ds.TIME, ds.FIELD, [f.OPEN, f.HIGH, f.LOW, f.CLOSE], [penetration], result_divider=100)
6da8ed1782cea60c626829fc7a33210ddd58754e
3,638,743
def cern_authorized_signup_handler(resp, remote, *args, **kwargs): """Handle sign-in/up functionality. :param remote: The remote application. :param resp: The response. :returns: Redirect response. """ # Remove any previously stored auto register session key session.pop(token_session_key(remote.name) + "_autoregister", None) # Store token in session # ---------------------- # Set token in session - token object only returned if # current_user.is_authenticated(). token = response_token_setter(remote, resp) handlers = current_oauthclient.signup_handlers[remote.name] # Sign-in/up user # --------------- if not current_user.is_authenticated: account_info = handlers["info"](resp) account_info_received.send(remote, token=token, response=resp, account_info=account_info) user = oauth_get_user( remote.consumer_key, account_info=account_info, access_token=token_getter(remote)[0], ) if user is None: # Auto sign-up if user not found form = create_csrf_disabled_registrationform() form = fill_form(form, account_info["user"]) user = oauth_register(form) # if registration fails ... if user is None: # requires extra information session[token_session_key(remote.name) + "_autoregister"] = True session[token_session_key(remote.name) + "_account_info"] = account_info session[token_session_key(remote.name) + "_response"] = resp db.session.commit() return redirect( url_for( ".signup", remote_app=remote.name, ) ) # Authenticate user if not oauth_authenticate(remote.consumer_key, user, require_existing_link=False): return current_app.login_manager.unauthorized() # Link account # ------------ # Need to store token in database instead of only the session when # called first time. token = response_token_setter(remote, resp) # Setup account # ------------- if not token.remote_account.extra_data: account_setup = handlers["setup"](token, resp) account_setup_received.send(remote, token=token, response=resp, account_setup=account_setup) db.session.commit() account_setup_committed.send(remote, token=token) else: db.session.commit() # Redirect to next if current_user.is_authenticated and not is_egroup_admin(): logout_user() return redirect(get_post_logout_redirect()) next_url = get_session_next_url(remote.name) if next_url: return redirect(next_url) return redirect(url_for("invenio_oauthclient_settings.index"))
df2d8998acb5be4175069507832cd3bf90558824
3,638,744
import subprocess import os def cr2_to_pgm(cr2_fname, pgm_fname=None, dcraw='dcraw', clobber=True, **kwargs): # pragma: no cover """ Convert CR2 file to PGM Converts a raw Canon CR2 file to a netpbm PGM file via `dcraw`. Assumes `dcraw` is installed on the system Note: This is a blocking call Arguments: cr2_fname {str} -- Name of CR2 file to convert **kwargs {dict} -- Additional keywords to pass to script Keyword Arguments: pgm_fname {str} -- Name of PGM file to output, if None (default) then use same name as CR2 (default: {None}) dcraw {str} -- Path to installed `dcraw` (default: {'dcraw'}) clobber {bool} -- A bool indicating if existing PGM should be clobbered (default: {True}) Returns: str -- Filename of PGM that was created """ assert subprocess.call('dcraw', stdout=subprocess.PIPE),\ "could not execute dcraw in path: {}".format(dcraw) assert os.path.exists(cr2_fname), "cr2 file does not exist at {}".format( cr2_fname) verbose = kwargs.get('verbose', False) if pgm_fname is None: pgm_fname = cr2_fname.replace('.cr2', '.pgm') if os.path.exists(pgm_fname) and not clobber: if verbose: print("PGM file exists, returning existing file: {}".format( pgm_fname)) else: try: # Build the command for this file command = '{} -t 0 -D -4 {}'.format(dcraw, cr2_fname) cmd_list = command.split() if verbose: print("PGM Conversion command: \n {}".format(cmd_list)) # Run the command if subprocess.check_call(cmd_list) == 0: if verbose: print("PGM Conversion command successful") except subprocess.CalledProcessError as err: raise error.InvalidSystemCommand(msg="File: {} \n err: {}".format( cr2_fname, err)) return pgm_fname
c7332c73d64fad52a2c98025621c6f076b977c5f
3,638,745
def asset_from_iconomi(symbol: str) -> Asset: """May raise: - DeserializationError - UnsupportedAsset - UnknownAsset """ if not isinstance(symbol, str): raise DeserializationError(f'Got non-string type {type(symbol)} for iconomi asset') symbol = symbol.upper() if symbol in UNSUPPORTED_ICONOMI_ASSETS: raise UnsupportedAsset(symbol) name = ICONOMI_TO_WORLD.get(symbol, symbol) return symbol_to_asset_or_token(name)
ae9463d1234cf409eaa52d5fbbdd158444b50b16
3,638,746
def ConvertHashType(value): """ Attempt to convert a space separated series of key=value pairs into a dictionary of pairs. If any value fails to split successfully an error will be raised. :param value: Space delimited string of key-value pairs :return: Dictionary of key-value pairs. """ collection = dict() for option in value.split(): try: k, v = option.split('=') except ValueError: raise ConversionFailure("Invalid option '{}' for key-value pair: {}" .format(option, value)) collection[k] = v.strip() return collection
91cc47a855958a3ae352cd35a1eea84c5d8e45b4
3,638,747
def ts_dct_from_estsks(pes_idx, es_tsk_lst, rxn_lst, thy_dct, spc_dct, run_prefix, save_prefix): """ build a ts queue """ print('\nTasks for transition states requested...') print('Identifying reaction classes for transition states...') # Build the ts_dct ts_dct = {} for tsk_lst in es_tsk_lst: obj, es_keyword_dct = tsk_lst[0], tsk_lst[-1] if obj in ('ts', 'all'): # want print for task list method_dct = thy_dct.get(es_keyword_dct['runlvl']) ini_method_dct = thy_dct.get(es_keyword_dct['inplvl']) thy_info = tinfo.from_dct(method_dct) ini_thy_info = tinfo.from_dct(ini_method_dct) break # Discern if TS should be reidentified re_id = False for tsk_lst in es_tsk_lst: obj, es_keyword_dct = tsk_lst[:-1], tsk_lst[-1] if 'find_ts' in obj: re_id = es_keyword_dct.get('re_id', False) ts_dct = {} for rxn in rxn_lst: ts_dct.update( ts_dct_sing_chnl( pes_idx, rxn, spc_dct, run_prefix, save_prefix, thy_info=thy_info, ini_thy_info=ini_thy_info, re_id=re_id) ) # Build the queue # ts_queue = tuple(sadpt for sadpt in ts_dct) if ts_dct else () return ts_dct # return ts_dct, ts_queue
30d07aac9fb2c03b87017878cdb96ed750538c9d
3,638,748
import re def parse_msig_storage(storage: str): """Parse the storage of a multisig contract to get its counter (as a number), threshold (as a number), and the keys of the signers (as Micheline sequence in a string).""" # put everything on a single line storage = ' '.join(storage.split('\n')) storage_regexp = r'Pair\s+?([0-9]+)\s+?([0-9]+)\s+?(.*)\s*' match = re.search(storage_regexp, storage) assert match is not None return { 'counter': int(match[1]), 'threshold': int(match[2]), 'keys': match[3], }
6e04091721177cdd3d40b86717eb86ebbb92a8ff
3,638,749
def doTestMethods(method, term, parm, options): """ update SF using passed data""" tf = file(options.trace, 'w+') sfb = sForceApi3(dlog=tf, debug=options.debug) ret = ['No test found for %s %s' %(method,parm)] dtup = time.strptime('2004-04-21T12:30:59', ISO_8601_DATETIME) dtsec = time.mktime(dtup) taskdata = {'Description':'This was updated from api3 %s' %term ,'Dont_Send_Empty_Report__c':True ,'Frequency__c':'weekly' ,'Last_Report__c':dtsec #dataTime ,'My_CRs_in_Development__c':True # ref must be valid ID ,'id':'00330000001Ud4D' ,'LastName':'Vanek' } if method == 'update': if parm == 'test1': ret = sfb.updateTest1(entity='Task', data={}, nullEmpty=False, seed=term) elif parm == 'test2': ret = sfb.updateTest1(entity='Contact', data=taskdata, nullEmpty=False, seed=term) elif method == 'create': if parm == 'test1': ret = sfb.createTest1(entity='Task', data={}, nullEmpty=False, seed=term) elif parm == 'test2': ret = sfb.createTest1(entity='Contact', data=taskdata, nullEmpty=False, seed=term) elif method == 'query': if parm == 'test1': soql = "Select Business_Unit__c, Department, Dept__c, Email, EmployeeNumber__c, FirstName, Id, LastName from Contact \ where AccountId = '00130000000DWRJ' and EmployeeNumber__c <'10015' and EmployeeNumber__c != '' " ret = sfb.queryBase(entity='Contact', soql=soql) elif parm == 'test2': # should be same as above ret = sfb.queryBase(entity='Contact', where='t1', sc='contacts') else: ret = sfb.queryBase(entity='task', where=term, sc=parm) # sc = mod, case, contacts, elif method == 'search': if term.find('@') != -1: scope = 'EMAIL FIELDS' else: scope = 'ALL FIELDS' if parm == 'test1': retsc = 'case' ret = sfb.searchBase(term, retsc=retsc, scope=scope) elif parm == 'test2': retsc = 'people' ret = sfb.searchBase(term, retsc=retsc, scope=scope) else: ret = sfb.searchBase(term, retsc=parm, scope=scope) elif method == 'retrieve': if parm == 'test1': ids = ['00T3000000321P8','00T3000000321Nn'] fieldList = [] ret = sfb.retrieveBase(ids=ids, entity='Task', fieldList=fieldList ) elif parm == 'test2': ids = ['00330000001Jsyq','00330000001Ud4D'] fieldList = ['Id','FirstName', 'LastName','Email'] ret = sfb.retrieveBase(ids=ids, entity='Contact' ) elif method == 'delete': if parm == 'test1': ids = ['00T300000033cw3'] ret = sfb.deleteBase(ids=ids) else: print 'Only a parm of test1 is supported right now' return ret
800e2bcf065e6c2b47f527439f41b58c2f961aed
3,638,750
def do_index(request): """Render the index page.""" projects = [ (name, path) for name, path in config.all_projects() if request.user.can_open(path) ] login_block = render_login_block(request) html = util.render_template( config.get_path('www.index_template', 'gbd/core/index.html.tpl'), locals()) return 200, [('Content-Type', 'text/html; charset=utf-8')], html
f5ba6274ebe2e3fce875a44f727d258b536e2e46
3,638,751
from random import sample import requests def sample_coll(word, urns=[], after=5, before=5, sample_size = 300, limit=1000): """Find collocations for word in a sample of set of book URNs""" # check if urns is a list of lists, [[s1, ...],[s2, ...]...] then urn serial first element # else the list is assumed to be on the form [s1, s2, ....] if isinstance(urns[0], list): urns = [u[0] for u in urns] newurns = [x[0] for x in nb.refine_book_urn(words=[word], urns = urns)] # Take a sample sampleurns = sample(newurns, min(len(newurns), sample_size)) # run collocation as normal r = requests.post("https://api.nb.no/ngram/urncoll", json = { 'word':word, 'urns':sampleurns, 'after':after, 'before':before, 'limit':limit } ) res = pd.DataFrame.from_dict(r.json(), orient='index') # sort values of resultant set if not res.empty: res = res.sort_values(by=res.columns[0], ascending = False) return res
37b343a82a9424fc408b9545a5875ee8bb0f4d9a
3,638,752
def roll_array(arr: npt.ArrayLike, shift: int, axis: int = 0) -> np.ndarray: """Roll the elements in the array by `shift` positions along the given axis. Parameters ---------- arr : :py:obj:`~numpy.typing.ArrayLike` input array to roll shift : int number of bins to shift by axis : int axis to roll along, by default 0 Returns ------- :py:obj:`~numpy.ndarray` shifted numpy array """ arr = np.asanyarray(arr) arr_size = arr.shape[axis] shift %= arr_size return arr.take(np.concatenate((np.arange(shift, arr_size), np.arange(shift))), axis)
79ad44163eb33408021879a0d64f3d0541e97410
3,638,753
def remove(favourites_list, ctype, pk, **options): """Remove a line from the favourites_list. """ instance = unpack_instance_key(favourites_list, ctype, pk) return favourites_list.remove(instance, options=options)
701c153d2f846c8431fae41b06ab5c28617845a3
3,638,754
def eHealthClass_airFlowWave(*args): """eHealthClass_airFlowWave(int air)""" return _ehealth.eHealthClass_airFlowWave(*args)
8af646f62c13f783c4b38524bf727fca038b0b7b
3,638,755
def bp_symm_func(tensors, sf_spec, rc, cutoff_type): """ Wrapper for building Behler-style symmetry functions""" sf_func = {'G2': G2_SF, 'G3': G3_SF, 'G4': G4_SF} fps = {} for i, sf in enumerate(sf_spec): options = {k: v for k, v in sf.items() if k != "type"} if sf['type'] == 'G3': # Workaround for G3 only options.update({'rc': rc, 'cutoff_type': cutoff_type}) fp, jacob, jacob_ind = sf_func[sf['type']]( tensors, **options) fps['fp_{}'.format(i)] = fp fps['jacob_{}'.format(i)] = jacob fps['jacob_ind_{}'.format(i)] = jacob_ind return fps
be882f791f4d4b2c9c575d836e9122604b89effa
3,638,756
def create_dummy_window(show_all=True, should_quit=False, fullscreen=False): """ Function to create dummy window which does nothing. :param show_all: True if window should be shown immediately :param should_quit: True if window should quit after user closed it :param fullscreen: True if window should be in full screen mode by default :return: True if window should be in full screen mode by default """ window = Gtk.Window() if show_all: window.show_all() if should_quit: window.connect("delete-event", Gtk.main_quit) if fullscreen: window.fullscreen() return window
b8f7a2bb4bc2531bc9c49ccf8f89f0a23cd93667
3,638,757
def color_RGB_to_hs(iR: float, iG: float, iB: float) -> tuple[float, float]: """Convert an rgb color to its hs representation.""" return color_RGB_to_hsv(iR, iG, iB)[:2]
14ae1cd29aca8de29bd3b776fe9a5e752015203d
3,638,758
def select_thread(*args): """ select_thread(tid) -> bool Select the given thread as the current debugged thread. All thread related execution functions will work on this thread. The process must be suspended to select a new thread. \sq{Type, Synchronous function - available as request, Notification, none (synchronous function)} @param tid: ID of the thread to select (C++: thid_t) @return: false if the thread doesn't exist. """ return _ida_dbg.select_thread(*args)
2aefb9cb11c202e811e22d8cdb5e1a414f213bc4
3,638,759
def simulation_aggreation_merge(rankings, baseline, method='od'): """Merge rankings by running simulation of existing rankings. This would first extract relative position of different ranking results, and relative position are considered as simulated games. The game results are sent to another ranker that gives merged ranking result. Parameters ---------- rankings: list of rankings returned by rank of rankers. baseline: (0, +Inf) Since we are using relative position of each game player, one should provide a baseline as the least score a team should obtain in the simulated match. method: {'massey', 'colley', 'keener', 'markov', 'od', 'difference'} The final ranker applied on simulated games. Returns ------- pandas.DataFrame: ['name', 'rating', 'rank'] """ if not isinstance(rankings, list): raise ValueError('rankings should be a list of ranker result.') if not all([isinstance(x, pd.DataFrame) for x in rankings]): raise ValueError('all items in rankings list should be pandas dataframe.') vhost = [] vvisit = [] vhscore = [] vvscore = [] for it in rankings: for i in range(it.shape[0]): for j in range(i+1, it.shape[0]): host = it.loc[i, 'name'] visit = it.loc[j, 'name'] delta = it.loc[j, 'rank'] - it.loc[i, 'rank'] # host wins delta score over visit hscore = baseline if delta<0 else baseline+delta vscore = baseline-delta if delta<0 else baseline vhost.append(host) vvisit.append(visit) vhscore.append(hscore) vvscore.append(vscore) sim = pd.DataFrame(data={ 'host': vhost, 'visit': vvisit, 'hscore': vhscore, 'vscore': vvscore }, columns=['host', 'visit', 'hscore', 'vscore']) data = Table(data=sim, col = [0, 1, 2, 3]) if method=='massey': ranker = MasseyRanker(table=data) return ranker.rank(ascending=False) elif method=='colley': ranker = ColleyRanker(table=data) return ranker.rank(ascending=False) elif method=='keener': ranker = KeenerRanker(table=data) return ranker.rank(ascending=False) elif method=='markov': ranker = MarkovRanker(table=data) return ranker.rank(ascending=False) elif method=='od': ranker = ODRanker(table=data) return ranker.rank(output='summary', ascending=False) elif method=='difference': ranker = DifferenceRanker(table=data) return ranker.rank(ascending=False) else: raise ValueError('method not available. Available methods are: massey, colley, keener, markov, od and difference.')
56de703d3dc0bee0b8e77c0ac7e21e9f97a8d485
3,638,760
def maxOverTime(field,makeTimes=0): """Take the max of the values in each time step If makeTimes is true (1) then we return a field mapping all of the times to the average. Else we just return the max """ return GridMath.maxOverTime(field,makeTimes);
53281490d0f42538564aeb701fb312bf2d22d509
3,638,761
from functools import reduce from operator import add def assemble_docstring(parsed, sig=None): """ Assemble a docstring from an OrderedDict as returned by :meth:`nd.utils.parse_docstring()` Parameters ---------- parsed : OrderedDict A parsed docstring as obtained by ``nd.utils.parse_docstring()``. sig : function signature, optional If provided, the parameters in the docstring will be ordered according to the parameter order in the function signature. Returns ------- str The assembled docstring. """ parsed = parsed.copy() indent = parsed.pop('indent') pad = ' '*indent # Sort 'Parameters' section according to signature if sig is not None and 'Parameters' in parsed: order = tuple(sig.parameters.keys()) def sort_index(p): key = p[0].split(':')[0].strip(' *') if key == '': return 9999 return order.index(key) parsed['Parameters'] = sorted(parsed['Parameters'], key=sort_index) d = [] for k, v in parsed.items(): if isinstance(v[0], list): flat_v = reduce(add, v) else: flat_v = v if k is not None: d.extend(['', pad + k, pad + '-'*len(k)]) d.extend([(pad + l).rstrip() for l in flat_v]) return '\n'.join(d)
90553c468a2b113d3f26720128e384b0444d5c93
3,638,762
import requests def retrieve_unscoped_token(os_auth_url, access_token, protocol="openid"): """Request an unscopped token""" url = get_keystone_url( os_auth_url, "/v3/OS-FEDERATION/identity_providers/egi.eu/protocols/%s/auth" % protocol, ) r = requests.post(url, headers={"Authorization": "Bearer %s" % access_token}) if r.status_code != requests.codes.created: raise RuntimeError("Unable to get an unscoped token") else: return r.headers["X-Subject-Token"]
46d7eb0e057e2e8726effeeac45898c284bb2a4d
3,638,763
import csv import tqdm def load_dataset(csv_path, relative_path): """ Inputs --- csv_path: path to training data csv relative_path: relative path to training data Outputs --- X: Training data numpy array y: Training labels numpy array """ # Read CSV lines lines = [] with open(csv_path) as csvfile: reader = csv.reader(csvfile) print("Loading CSV File ...") for line in tqdm(reader): lines.append(line) images = []; measurements = [] print("Loading Data ...") # Read from CSV lines for line in tqdm(lines): # Center Image image, measurement = _load_image(line, 0, relative_path) images.append(image) measurements.append(measurement) image_flipped = np.fliplr(image) images.append(image_flipped) measurement_flipped = -1 * measurement measurements.append(measurement_flipped) # Left Image image, measurement = _load_image(line, 1, relative_path) images.append(image) measurements.append(measurement) image_flipped = np.fliplr(image) images.append(image_flipped) measurement_flipped = -1 * measurement measurements.append(measurement_flipped) # Right Image image, measurement = _load_image(line, 2, relative_path) images.append(image) measurements.append(measurement) image_flipped = np.fliplr(image) images.append(image_flipped) measurement_flipped = -1 * measurement measurements.append(measurement_flipped) X = np.array(images) y = np.array(measurements) return X, y
01f6c639a41628ceca0a854c3096ca795a2da972
3,638,764
from typing import Dict from typing import List def randomly_replace_a_zone() -> rd.RouteDict: """ 读入历史数据,把每条 high quality 的 route 的一个随机的 zone 替换成新 zone :return: rd.RouteDict: 改过的route """ routeDict = rd.loadOrCreate() rng.seed(3) new_zone_id = 'zub_fy' i: int rid: str route: rd.Route for (i, (rid, route)) in enumerate(routeDict.items()): if route.route_score != "High": continue # if i < 55: # continue zone_id2stops: Dict[str, List[rd.Stop]] = {} unique_zone_id_list: List[str] = [] for s in route.stops: if s.isDropoff() and s.zone_id is not None: # if s.zone_id == 'C-13.1J': # print(f'**** {s.idx=}, {s.zone_id=}') if s.zone_id in zone_id2stops: zone_id2stops[s.zone_id].append(s) else: zone_id2stops[s.zone_id] = [s] unique_zone_id_list.append(s.zone_id) zone_idx = rng.randrange(0, len(unique_zone_id_list)) selected_zone_id = unique_zone_id_list[zone_idx] print(f'route_idx {i}: change zone {selected_zone_id} to {new_zone_id}') for s in zone_id2stops[selected_zone_id]: # print(f'---- {s.idx}, zone {s.zone_id} ==> {new_zone_id}') s.zone_id = new_zone_id if hasattr(route, 'zones'): raise RuntimeError if hasattr(route, 'zones_filled'): raise RuntimeError zones, zone_id2zones = route.computeZones() # 用 fill_missing_zone 会把附件没有 zone_id 的加进来 if selected_zone_id in zone_id2zones: raise RuntimeError if not (new_zone_id in zone_id2zones): raise RuntimeError if len(zone_id2zones[new_zone_id].stops) != len(zone_id2stops[selected_zone_id]): # print(f' new_zone stops: {[s.idx for s in zone_id2zones[new_zone_id].stops]}') raise RuntimeError(f'{len(zone_id2zones[new_zone_id].stops)=} != {len(zone_id2stops[selected_zone_id])=}') return routeDict
9bd3de2f35b6a356a610f091391726749a87cc56
3,638,765
import types import itertools def import_loop(schema, mutable, raw_data=None, field_converter=None, trusted_data=None, mapping=None, partial=False, strict=False, init_values=False, apply_defaults=False, convert=True, validate=False, new=False, oo=False, recursive=False, app_data=None, context=None): """ The import loop is designed to take untrusted data and convert it into the native types, as described in ``schema``. It does this by calling ``field_converter`` on every field. Errors are aggregated and returned by throwing a ``ModelConversionError``. :param schema: The Schema to use as source for validation. :param mutable: A mapping or instance that can be changed during validation by Schema functions. :param raw_data: A mapping to be converted into types according to ``schema``. :param field_converter: This function is applied to every field found in ``instance_or_dict``. :param trusted_data: A ``dict``-like structure that may contain already validated data. :param partial: Allow partial data to validate; useful for PATCH requests. Essentially drops the ``required=True`` arguments from field definitions. Default: False :param strict: Complain about unrecognized keys. Default: False :param apply_defaults: Whether to set fields to their default values when not present in input data. :param app_data: An arbitrary container for application-specific data that needs to be available during the conversion. :param context: A ``Context`` object that encapsulates configuration options and ``app_data``. The context object is created upon the initial invocation of ``import_loop`` and is then propagated through the entire process. """ if raw_data is None: raw_data = mutable got_data = raw_data is not None context = Context._make(context) try: context.initialized except: if type(field_converter) is types.FunctionType: field_converter = BasicConverter(field_converter) context._setdefaults({ 'initialized': True, 'field_converter': field_converter, 'trusted_data': trusted_data or {}, 'mapping': mapping or {}, 'partial': partial, 'strict': strict, 'init_values': init_values, 'apply_defaults': apply_defaults, 'convert': convert, 'validate': validate, 'new': new, 'oo': oo, 'recursive': recursive, 'app_data': app_data if app_data is not None else {} }) raw_data = context.field_converter.pre(schema, raw_data, context) _field_converter = context.field_converter _model_mapping = context.mapping.get('model_mapping') data = dict(context.trusted_data) if context.trusted_data else {} errors = {} if got_data and context.validate: errors = _mutate(schema, mutable, raw_data, context) if got_data: # Determine all acceptable field input names all_fields = schema._valid_input_keys if context.mapping: mapped_keys = (set(itertools.chain(*( listify(input_keys) for target_key, input_keys in context.mapping.items() if target_key != 'model_mapping')))) all_fields = all_fields | mapped_keys if context.strict: # Check for rogues if strict is set rogue_fields = set(raw_data) - all_fields if rogue_fields: for field in rogue_fields: errors[field] = 'Rogue field' atoms_filter = None if not context.validate: # optimization: convert without validate doesn't require to touch setters atoms_filter = atom_filter.not_setter for field_name, field, value in atoms(schema, raw_data, filter=atoms_filter): serialized_field_name = field.serialized_name or field_name if got_data and value is Undefined: for key in field.get_input_keys(context.mapping): if key and key != field_name and key in raw_data: value = raw_data[key] break if value is Undefined: if field_name in data: continue if context.apply_defaults: value = field.default if value is Undefined and context.init_values: value = None if got_data: if field.is_compound: if context.trusted_data and context.recursive: td = context.trusted_data.get(field_name) if not all(hasattr(td, attr) for attr in ('keys', '__getitem__')): td = {field_name: td} else: td = {} if _model_mapping: submap = _model_mapping.get(field_name) else: submap = {} field_context = context._branch(trusted_data=td, mapping=submap) else: field_context = context try: value = _field_converter(field, value, field_context) except (FieldError, CompoundError) as exc: errors[serialized_field_name] = exc if context.apply_defaults: value = field.default if value is not Undefined: data[field_name] = value if isinstance(exc, DataError): data[field_name] = exc.partial_data continue if value is Undefined: continue data[field_name] = value if not context.validate: for field_name, field, value in atoms(schema, raw_data, filter=atom_filter.has_setter): data[field_name] = value if errors: raise DataError(errors, data) data = context.field_converter.post(schema, data, context) return data
b3849cd3e4b5cf338a3999954820c5f7d474e406
3,638,766
def resnet_50(inputs, block_fn=bottleneck_block, is_training_bn=False): """ResNetv50 model with classification layers removed.""" layers = [3, 4, 6, 3] data_format = 'channels_last' inputs = conv2d_fixed_padding( inputs=inputs, filters=64, kernel_size=7, strides=2, data_format=data_format) inputs = tf.identity(inputs, 'initial_conv') inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format) inputs = tf.layers.max_pooling2d( inputs=inputs, pool_size=3, strides=2, padding='SAME', data_format=data_format) inputs = tf.identity(inputs, 'initial_max_pool') inputs = block_group( inputs=inputs, filters=64, blocks=layers[0], strides=1, block_fn=block_fn, is_training_bn=is_training_bn, name='block_group1', data_format=data_format) c3 = block_group( inputs=inputs, filters=128, blocks=layers[1], strides=2, block_fn=block_fn, is_training_bn=is_training_bn, name='block_group2', data_format=data_format) c4 = block_group( inputs=c3, filters=256, blocks=layers[2], strides=2, block_fn=block_fn, is_training_bn=is_training_bn, name='block_group3', data_format=data_format) c5 = block_group( inputs=c4, filters=512, blocks=layers[3], strides=2, block_fn=block_fn, is_training_bn=is_training_bn, name='block_group4', data_format=data_format) return c3, c4, c5
901954713b6c77334fc9050ab0c2758d9cad90dd
3,638,767
def _sanitize_index_element(ind): """Sanitize a one-element index.""" if isinstance(ind, Number): ind2 = int(ind) if ind2 != ind: raise IndexError(f"Bad index. Must be integer-like: {ind}") else: return ind2 elif ind is None: return None else: raise TypeError("Invalid index type", type(ind), ind)
7d006d6ab0081fef01e162df63f76bcff691bbe3
3,638,768
def overlap_click(original, click_position, sr=44100, click_freq=2000, click_duration=0.5): """ :param click_position: Notice that position should be given in second :return: wave """ cwave = librosa.clicks(np.array(click_position), sr=44100, click_freq=4000, click_duration=0.05) / 2 original, wave = mono_pad_or_truncate(original, cwave) return standardize(original + wave)
23a1668648c4b79b82088d149c6bc0b92ecd0326
3,638,769
import itertools def vigenere(plaintext: str, *, key: str) -> str: """Vigenère cipher (page 48) - `plaintext` is the message to be encrypted - `key` defines the series of interwoven Caesar ciphers to be used """ plaintext = validate_plaintext(plaintext) key = validate_key(key) cycled_cipher_alphabet = itertools.cycle( _shifted_alphabet(ord(c) - 65) for c in key ) seq = [next(cycled_cipher_alphabet)[ord(c) - 97] for c in plaintext] return "".join(seq)
c2b0428a86673770f299842b459a44d64423ca52
3,638,770
def generateVtTick(row, symbol): """生成K线""" tick = VtTickData() tick.symbol = symbol tick.vtSymbol = symbol tick.lastPrice = row['last'] tick.volume = row['volume'] tick.openInterest = row['open_interest'] tick.datetime = row.name tick.openPrice = row['open'] tick.highPrice = row['high'] tick.lowPrice = row['low'] tick.preClosePrice = row['prev_close'] tick.upperLimit = row['limit_up'] tick.lowerLimit = row['limit_down'] tick.bidPrice1 = row['b1'] tick.bidPrice2 = row['b2'] tick.bidPrice3 = row['b3'] tick.bidPrice4 = row['b4'] tick.bidPrice5 = row['b5'] tick.bidVolume1 = row['b1_v'] tick.bidVolume2 = row['b2_v'] tick.bidVolume3 = row['b3_v'] tick.bidVolume4 = row['b4_v'] tick.bidVolume5 = row['b5_v'] tick.askPrice1 = row['a1'] tick.askPrice2 = row['a2'] tick.askPrice3 = row['a3'] tick.askPrice4 = row['a4'] tick.askPrice5 = row['a5'] tick.askVolume1 = row['a1_v'] tick.askVolume2 = row['a2_v'] tick.askVolume3 = row['a3_v'] tick.askVolume4 = row['a4_v'] tick.askVolume5 = row['a5_v'] return tick
108fbbf82eac228772cea90669fe14d90cbe8ccc
3,638,771
import dataclasses def hartigan_map_mutations(tree, genotypes, alleles, ancestral_state=None): """ Returns a Hartigan parsimony reconstruction for the specified set of genotypes. The reconstruction is specified by returning the ancestral state and a list of mutations on the tree. Each mutation is a (node, parent, state) triple, where node is the node over which the transition occurs, the parent is the index of the parent transition above it on the tree (or -1 if there is none) and state is the new state. """ # The python version of map_mutations allows the ancestral_state to be a string # from the alleles list, so we implement this at the top of this function although # it doesn't need to be in the C equivalent of this function if isinstance(ancestral_state, str): ancestral_state = alleles.index(ancestral_state) # equivalent C implementation can start here genotypes = np.array(genotypes) not_missing = genotypes != -1 if np.sum(not_missing) == 0: raise ValueError("Must have at least one non-missing genotype") num_alleles = np.max(genotypes[not_missing]) + 1 if ancestral_state is not None: if ancestral_state < 0 or ancestral_state >= len(alleles): raise ValueError("ancestral_state must be a number from 0..(num_alleles-1)") if ancestral_state >= num_alleles: num_alleles = ancestral_state + 1 num_nodes = tree.tree_sequence.num_nodes # use a numpy array of 0/1 values to represent the set of states # to make the code as similar as possible to the C implementation. optimal_set = np.zeros((num_nodes + 1, num_alleles), dtype=np.int8) for allele, u in zip(genotypes, tree.tree_sequence.samples()): if allele != -1: optimal_set[u, allele] = 1 else: optimal_set[u] = 1 allele_count = np.zeros(num_alleles, dtype=int) for u in tree.nodes(tree.virtual_root, order="postorder"): allele_count[:] = 0 for v in tree.children(u): for j in range(num_alleles): allele_count[j] += optimal_set[v, j] if not tree.is_sample(u): max_allele_count = np.max(allele_count) optimal_set[u, allele_count == max_allele_count] = 1 if ancestral_state is None: ancestral_state = np.argmax(optimal_set[tree.virtual_root]) else: optimal_set[tree.virtual_root] = 1 @dataclasses.dataclass class StackElement: node: int state: int mutation_parent: int mutations = [] stack = [StackElement(tree.virtual_root, ancestral_state, -1)] while len(stack) > 0: s = stack.pop() if optimal_set[s.node, s.state] == 0: s.state = np.argmax(optimal_set[s.node]) mutation = tskit.Mutation( node=s.node, derived_state=alleles[s.state], parent=s.mutation_parent, ) s.mutation_parent = len(mutations) mutations.append(mutation) for v in tree.children(s.node): stack.append(StackElement(v, s.state, s.mutation_parent)) return alleles[ancestral_state], mutations
cbdeb0bc2e23f5a0e2ca1d420a3e78c7476cabc9
3,638,772
import numpy def padArray(ori_array, pad_size): """ Pads out an array to a large size. ori_array - A 2D numpy array. pad_size - The number of elements to add to each of the "sides" of the array. The padded 2D numpy array. """ if (pad_size > 0): [x_size, y_size] = ori_array.shape lg_array = numpy.ones((x_size+2*pad_size,y_size+2*pad_size)) lg_array[pad_size:(x_size+pad_size),pad_size:(y_size+pad_size)] = ori_array.astype(numpy.float64) lg_array[0:pad_size,:] = numpy.flipud(lg_array[pad_size:2*pad_size,:]) lg_array[(x_size+pad_size):(x_size+2*pad_size),:] = numpy.flipud(lg_array[x_size:(x_size+pad_size),:]) lg_array[:,0:pad_size] = numpy.fliplr(lg_array[:,pad_size:2*pad_size]) lg_array[:,(y_size+pad_size):(y_size+2*pad_size)] = numpy.fliplr(lg_array[:,y_size:(y_size+pad_size)]) return lg_array else: return ori_array
28fac7ccb8fc08c3ac7cf3104fed558128003750
3,638,773
def finish_subprocess(proc, cmdline, cmd_input=None, ok_exit_codes=None): """Ensure that the process returned a zero exit code indicating success""" if ok_exit_codes is None: ok_exit_codes = [0] out, err = proc.communicate(cmd_input) ret = proc.returncode if ret not in ok_exit_codes: LOG.error("Command '%(cmdline)s' with process id '%(pid)s' expected " "return code in '%(ok)s' but got '%(rc)s': %(err)s", {'cmdline': cmdline, 'pid': proc.pid, 'ok': ok_exit_codes, 'rc': ret, 'err': err}) raise SubprocessException(' '.join(cmdline), ret, out, err) return out
c8aa0f63f019b92b799cafd931f782a6855709ba
3,638,774
def browse(): """ A simple browser that doesn't deal with queries at all """ page = int(request.args.get("page", 1)) includeHistory = request.args.get("includeHistory", False) results_per_page, search_offset = results_offset(page) searchIndex = "history" if includeHistory else "latest" count, hostdata = current_app.elastic.search( results_per_page, search_offset, searchIndex=searchIndex ) totalHosts = current_app.elastic.total_hosts() if includeHistory: next_url, prev_url = build_pagination_urls( "main.browse", page, count, includeHistory=includeHistory ) else: # By using the if/else we can avoid putting includeHistory=False into the url that gets constructed next_url, prev_url = build_pagination_urls("main.browse", page, count) return render_template( "main/browse.html", numresults=count, totalHosts=totalHosts, page=page, hosts=hostdata, next_url=next_url, prev_url=prev_url, )
b579bf402d0034d950dcf2f87c08072d5a2959f9
3,638,775
from typing import cast def argmax(pda: pdarray) -> np.int64: """ Return the index of the first occurrence of the array max value. Parameters ---------- pda : pdarray Values for which to calculate the argmax Returns ------- np.int64 The index of the argmax calculated from the pda Raises ------ TypeError Raised if pda is not a pdarray instance RuntimeError Raised if there's a server-side error thrown """ repMsg = generic_msg(cmd="reduction", args="{} {}".format("argmax", pda.name)) return parse_single_value(cast(str, repMsg))
9fbe515db4e40bf56373a1046a034f15f28d1849
3,638,776
import torch def log_cumsum(probs, dim=1, eps=1e-8): """Calculate log of inclusive cumsum.""" return torch.log(torch.cumsum(probs, dim=dim) + eps)
7f1ab77fd9909037c7b89600c531173dab80c11e
3,638,777
def iou_coe_Slice_by_Slice(output, target, threshold=0.5, axis=(2, 3,4), smooth=1e-5): """Non-differentiable Intersection over Union (IoU) for comparing the similarity """ pre = tf.cast(output > threshold, dtype=tf.float32) truth = tf.cast(target > threshold, dtype=tf.float32) inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) # AND union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= 1, dtype=tf.float32), axis=axis) # OR # old axis=[0,1,2,3] # epsilon = 1e-5 # batch_iou = inse / (union + epsilon) # new haodong batch_iou = (inse + smooth) / (union + smooth) iou = tf.reduce_mean(batch_iou,axis=0, name='iou_coe') return iou
6292c25b8ca9c2fd78dd810703795bfa97877261
3,638,778
def splitTrainTestDataList(list_data, test_fraction=0.2, sample_size=None, replace=False, seed=None): """ Split a list of data into train and test data based on given test fraction. Each data in the list should have row for sample index, don't care about other axes. :param list_data: List of data to sample and train-test split. If only one data provided, then it doesn't have to be in a list. :type list_data: list/tuple(np.ndarray[n_samples, _]) or np.ndarray[n_samples, _] :param test_fraction: Fraction of data to use for test. :type test_fraction: float [0-1], optional (default=0.2) :param sample_size: Number of samples if doing sampling. If None, then no sampling is done and all samples are used. :type sample_size: int or None, optional (default=None) :param replace: Whether to draw samples with replacement. :type replace: bool, optional (default=False) :param seed: Whether to use seed for reproducibility. If None, then seed is not provided. :type seed: int or None, optional (default=None) :return: (Sampled) list of train data and list of test data :rtype: list(np.ndarray[n_samples_train, _]), list(np.ndarray[n_samples_test, _]) """ # Ensure list if isinstance(list_data, np.ndarray): list_data = [list_data] elif isinstance(list_data, tuple): list_data = list(list_data) # If sample_size is None, use all samples # Otherwise use provided sample_size but limit it sample_size = len(list_data[0]) if sample_size is None else int(min(sample_size, len(list_data[0]))) # Indices of the samples randomized np.random.seed(seed) rand_indices = np.random.choice(np.arange(len(list_data[0])), len(list_data[0]), replace=replace) # Train and test data sample size taking into account of sampling train_size = int(sample_size*(1. - test_fraction)) test_size = sample_size - train_size # Indices of train and test data after randomization and sampling indices_train, indices_test = rand_indices[:train_size], rand_indices[train_size:sample_size] # Go through all provided data list_data_train, list_data_test = list_data.copy(), list_data.copy() for i in range(len(list_data)): # Pick up samples for train and test respectively from index lists prepared earlier list_data_train[i] = list_data[i][indices_train] list_data_test[i] = list_data[i][indices_test] return list_data_train, list_data_test
ae14275da02025f6c3c894c33da64d336cd0f5d1
3,638,779
def create_hyperbounds(hyperparameters): """ Gets the bounds of each hyperspace for sampling. Parameters ---------- * `hyperparameters` [list, shape=(n_hyperparameters,)] Returns ------- * `hyperspace_bounds` [list of lists, shape(n_spaces, n_hyperparameters)] - All combinations of hyperspace bounds. - Matches the bounds in hyerspaces from create_hyperspace. """ hparams_low = [] hparams_high = [] for hparam in hyperparameters: low, high = check_hyperbounds(hparam) hparams_low.append(low) hparams_high.append(high) all_spaces = fold_spaces(hparams_low, hparams_high) hyperspace_bounds = [] for space in all_spaces: hyperspace_bounds.append(space) return hyperspace_bounds
4160e9084cd3f835f327661f4dacad80a348bd11
3,638,780
def race_from_string(str): """Convert race to one of ['white', 'black', None].""" race_dict = { "White/Caucasian": 'white', "Black/African American": 'black', "Unknown": None, "": None } return race_dict.get(str, 'other')
1d38469537c3f5f6a4a42712f5ec1dbd26a471bd
3,638,781
def test_wrap_coordinates(coords, origin, wgs84): """ Test whether coordinates wrap around the antimeridian in wgs84 """ lon_under_minus_170 = False lon_over_plus_170 = False if isinstance(coords[0], list): for c in coords[0]: c = list(transform(origin, wgs84, *c)) if c[0] < -170: lon_under_minus_170 = True elif c[0] > 170: lon_over_plus_170 = True else: return False return lon_under_minus_170 and lon_over_plus_170
bd95c3bc1fd4f500e3af7a68af9d5e327656165e
3,638,782
def filter_dfg_contain_activity(dfg0, start_activities0, end_activities0, activities_count0, activity, parameters=None): """ Filters the DFG keeping only nodes that can reach / are reachable from activity Parameters --------------- dfg0 Directly-follows graph start_activities0 Start activities end_activities0 End activities activities_count0 Activities count activity Activity that should be reachable / should reach all the nodes of the filtered graph parameters Parameters Returns --------------- dfg Filtered DFG start_activities Filtered start activities end_activities Filtered end activities activities_count Filtered activities count """ if parameters is None: parameters = {} # since the dictionaries/sets are modified, a deepcopy is the best option to ensure data integrity dfg = deepcopy(dfg0) start_activities = deepcopy(start_activities0) end_activities = deepcopy(end_activities0) activities_count = deepcopy(activities_count0) changed = True while changed: changed = False predecessors = dfg_utils.get_predecessors(dfg, activities_count) successors = dfg_utils.get_successors(dfg, activities_count) predecessors_act = predecessors[activity].union({activity}) successors_act = successors[activity].union({activity}) start_activities1 = {x: y for x, y in start_activities.items() if x in predecessors_act} end_activities1 = {x: y for x, y in end_activities.items() if x in successors_act} if start_activities != start_activities1 or end_activities != end_activities1: changed = True start_activities = start_activities1 end_activities = end_activities1 reachable_nodes = predecessors_act.union(successors_act) if reachable_nodes != set(activities_count.keys()): changed = True activities_count = {x: y for x, y in activities_count.items() if x in reachable_nodes} dfg = {x: y for x, y in dfg.items() if x[0] in reachable_nodes and x[1] in reachable_nodes} return dfg, start_activities, end_activities, activities_count
3969de1413e70234f76ab8f9e7a52126615d12c3
3,638,783
def generate_secret_key(length=16): """ Generates a key of the given length. :param length: Length of the key to generate, in bytes. :type length: :class:`int` :returns: :class:`str` -- The generated key, in byte string. """ return get_random_bytes(length)
76fd617e3b316321d2efc09e9f9253970e5b2d2d
3,638,784
import io def nouveau_flux(title: str, link: str, description: str) -> parse: """ Crée un nouveau flux RSS. Parameters ---------- title : str Titre du flux RSS. link : str Lien vers le flux RSS. description : str Description générale du contenu. Returns ------- parse Arbre XML (ElementTree). """ flux = rss.RSS2(title, link, description, pubDate=Dt.now()) # rss.RSS2.write_xml ne retourne pas de valeur, il fait juste écrire dans # un fichier ou similaire f = io.StringIO() flux.write_xml(f) f.seek(0) return parse(f)
693f4df7c645fceddc288bd3511ec412cc7e9bd7
3,638,785
import os def get_open_strain_data( name, start_time, end_time, outdir, cache=False, buffer_time=0, **kwargs): """ A function which accesses the open strain data This uses `gwpy` to download the open data and then saves a cached copy for later use Parameters ---------- name: str The name of the detector to get data for start_time, end_time: float The GPS time of the start and end of the data outdir: str The output directory to place data in cache: bool If true, cache the data buffer_time: float Time to add to the begining and end of the segment. **kwargs: Passed to `gwpy.timeseries.TimeSeries.fetch_open_data` Returns ------- strain: gwpy.timeseries.TimeSeries The object containing the strain data. If the connection to the open-data server fails, this function retruns `None`. """ filename = '{}/{}_{}_{}.txt'.format(outdir, name, start_time, end_time) if buffer_time < 0: raise ValueError("buffer_time < 0") start_time = start_time - buffer_time end_time = end_time + buffer_time if os.path.isfile(filename) and cache: logger.info('Using cached data from {}'.format(filename)) strain = TimeSeries.read(filename) else: logger.info('Fetching open data from {} to {} with buffer time {}' .format(start_time, end_time, buffer_time)) try: strain = TimeSeries.fetch_open_data(name, start_time, end_time, **kwargs) logger.info('Saving cache of data to {}'.format(filename)) strain.write(filename) except Exception as e: logger.info("Unable to fetch open data, see debug for detailed info") logger.info("Call to gwpy.timeseries.TimeSeries.fetch_open_data returned {}" .format(e)) strain = None return strain
51feee29732bc4188e48a2327dc7b91c0089f416
3,638,786
from tqdm import tqdm import os import glob def extract_text(path): """ Extract the text of all txt files in the path, and store it in a dictionary. Return the dic with the structure: key : 'filename' (without .txt extension and without the page number) value : 'text' """ files_to_search = os.path.join(path,'*.txt') data_dic = {}# key:filename',value:'text' data_index = {} # Assuming the file to be made of one digit page number appended to the name like this: file.1.txt: set_of_text_files = set([remove_extensions(item) for item in glob.glob(files_to_search)]) nb_of_texts = len(set_of_text_files) # progress bar to display pbar = tqdm(total=nb_of_texts) for idx,file in enumerate(set_of_text_files): pbar.update(1) nb_of_pages = find_nb_of_pages(file) full_text,error_code = singlepdf_extract_text(file,nb_of_pages) path,fname = os.path.split(file) data_dic[fname] = {} data_dic[fname]['text'] = full_text data_dic[fname]['error'] = error_code data_dic[fname]['id'] = idx data_index[idx] = fname pbar.close() return data_dic,data_index
ce6fb1beaee3a6eef2a8c57b191efb2abb40bf8a
3,638,787
def extract_app_name_key(): """ Extracts the application name redis key and hash from the request The key should be of format: <metrics_prefix>:<metrics_application>:<ip>:<rounded_date_time_format> ie: "API_METRICS:applications:192.168.0.1:2020/08/04:14" The hash should be of format: <app_name> ie: "audius_dapp" """ application_name = request.args.get(app_name_param, type=str, default=None) ip = request.headers.get('X-Forwarded-For', request.remote_addr) date_time = get_rounded_date_time().strftime(datetime_format) application_key = f"{metrics_prefix}:{metrics_application}:{ip}:{date_time}" return (application_key, application_name)
5ec563855fc417774368bf9027d8b853739fdc27
3,638,788
def load(filename, fs, duration, flipud = True, display=False, **kwargs): """ Load an image from a file or an URL Parameters ---------- filename : string Image file name, e.g. ``test.jpg`` or URL. fs : scalar Sampling frequency of the audiogram (in Hz) duration : scalar Duration of the audiogram (in s) flipud : boolean, optional, default is True Vertical flip of the matrix (image) display : boolean, optional, default is False if True, display the image **kwargs, optional. This parameter is used by plt.plot figsize : tuple of integers, optional, default: (4,10) width, height in inches. title : string, optional, default : 'Spectrogram' title of the figure xlabel : string, optional, default : 'Time [s]' label of the horizontal axis ylabel : string, optional, default : 'Amplitude [AU]' label of the vertical axis cmap : string or Colormap object, optional, default is 'gray' See https://matplotlib.org/examples/color/colormaps_reference.html in order to get all the existing colormaps examples: 'hsv', 'hot', 'bone', 'tab20c', 'jet', 'seismic', 'viridis'... vmin, vmax : scalar, optional, default: None `vmin` and `vmax` are used in conjunction with norm to normalize luminance data. Note if you pass a `norm` instance, your settings for `vmin` and `vmax` will be ignored. ext : scalars (left, right, bottom, top), optional, default: None The location, in data-coordinates, of the lower-left and upper-right corners. If `None`, the image is positioned such that the pixel centers fall on zero-based (row, column) indices. dpi : integer, optional, default is 96 Dot per inch. For printed version, choose high dpi (i.e. dpi=300) => slow For screen version, choose low dpi (i.e. dpi=96) => fast format : string, optional, default is 'png' Format to save the figure ... and more, see matplotlib Returns ------- im : ndarray The different color bands/channels are stored in the third dimension, such that a gray-image is MxN, an RGB-image MxNx3 and an RGBA-image MxNx4. ext : list of scalars [left, right, bottom, top], optional, default: None The location, in data-coordinates, of the lower-left and upper-right corners. If `None`, the image is positioned such that the pixel centers fall on zero-based (row, column) indices. dt : scalar Time resolution of the spectrogram (horizontal x-axis) df : scalar Frequency resolution of the spectrogram (vertical y-axis) """ print(72 * '_' ) print("loading %s..." %filename) # Load image im = imread(filename, as_gray=True) # if 3D, convert into 2D if len(im.shape) == 3: im = im[:,:,0] # Rescale the image between 0 to 1 im = linear_scale(im, minval= 0.0, maxval=1.0) # Get the resolution df = fs/(im.shape[0]-1) dt = duration/(im.shape[1]-1) # Extent ext = [0, duration, 0, fs/2] # flip the image vertically if flipud: im = np.flip(im, 0) # Display if display : ylabel =kwargs.pop('ylabel','Frequency [Hz]') xlabel =kwargs.pop('xlabel','Time [sec]') title =kwargs.pop('title','loaded spectrogram') cmap =kwargs.pop('cmap','gray') figsize=kwargs.pop('figsize',(4, 13)) vmin=kwargs.pop('vmin',0) vmax=kwargs.pop('vmax',1) _, fig = plot2D (im, extent=ext, figsize=figsize,title=title, ylabel = ylabel, xlabel = xlabel,vmin=vmin, vmax=vmax, cmap=cmap, **kwargs) return im, ext, dt, df
820be7b1cac4dd6e1b2e06cd28b01f47536930ad
3,638,789
def get_status(lib, device_id): """ A function of reading status information from the device You can use this function to get basic information about the device status. :param lib: structure for accessing the functionality of the libximc library. :param device_id: device id. """ x_status = status_t() result = lib.get_status(device_id, byref(x_status)) if result == Result.Ok: return x_status else: return None
2321c1d61ec2566c4480c55c477c0fb598855df6
3,638,790
import json from pathlib import Path import heapq def query_by_image_objects(image_path, weights_path, cfg_path, names_path, confidence_threshold=0.5, save=False): """Processes user-uploaded image to retrieve similar images from database. First, all the objects in the image are detected using the :method: ``rubrix.images.detect.detect_objects``. Next, the image descriptor array for the user-uploaded image is compared with that of all pruned images so as to retrieve the top-5 results. Arguments: ---------- image_path (numpy.ndarray): Path for user-uploaded image, for reverse-image search. weights_path (pathlib.Path): Path to YOLOv4 pretrained weights file. cfg_path (pathlib.Path): Path to darknet configuration file. names_path (pathlib.Path): Path to darknet names file. save (bool): If True, save predictions to /assets/predictions. Returns: -------- results (list of pathlib.Path objects): List of paths to images retrieved for user query. """ # Retrieve image descriptor vector for user-uploaded image. array = extract_image_descriptors(image_path, 'inception', TARGET_SIZE) array = array.reshape(-1) # Retrieve YOLOv4 model related variables to detect objects in an image. net = get_yolo_net(cfg_path, weights_path) labels = get_labels(names_path) image = cv2.imread(str(image_path)) objects = detect_objects(net, labels, image, confidence_threshold) index_path = pathfinder.get('assets', 'index.json') with open(index_path, 'r') as json_file: index = json.load(json_file) paths_to_images = set([]) for object in objects: paths_to_images |= set(index[object]) descriptors_path = pathfinder.get('assets', 'data', 'descriptors') results = [] for path in paths_to_images: path = Path(path) other_array = np.load(descriptors_path / f'{path.stem}.npy') score = dot_product(array, other_array) results.append(ReverseSearchResultObject( name=path.name, path_to_image=path, score=score, ) ) # Using heaps to extract N largest results from a list of n elements # is recommended, as the time complexity to do so is O(n * logN), which # is approximately O(n) if N is relatively small. results = heapq.nlargest(5, results) results = [result.path_to_image for result in results] if save: # Save predictions to /assets/predictions. save_predictions(results) return results
dc718b3b5fe5f513b9281c5db30516ed32c8246f
3,638,791
def suspend_supplier_services(client, logger, framework_slug, supplier_id, framework_info, dry_run): """ The supplier ID list should have been flagged by CCS as requiring action, but double check that the supplier: - has some services on the framework - has `agreementReturned: false` - has not `agreementReturned: on-hold :param client: API client instance :param framework_info: JSON :param dry_run: don't suspend if True :return: suspended_service_count :rtype: int """ suspended_service_count = 0 # Ignore any 'private' services that the suppliers have removed themselves new_service_status, old_service_status = 'disabled', 'published' if not framework_info['frameworkInterest']['onFramework']: logger.error(f'Supplier {supplier_id} is not on the framework.') return suspended_service_count if framework_info['frameworkInterest']['agreementReturned']: logger.error(f'Supplier {supplier_id} has returned their framework agreement.') return suspended_service_count if framework_info['frameworkInterest']['agreementStatus'] == 'on-hold': logger.error(f"Supplier {supplier_id}'s framework agreement is on hold.") return suspended_service_count # Find the supplier's non-private services on this framework services = client.find_services( supplier_id=supplier_id, framework=framework_slug, status=old_service_status ) if not services['services']: logger.error(f'Supplier {supplier_id} has no {old_service_status} services on the framework.') return suspended_service_count # Suspend all services for each supplier (the API will de-index the services from search results) logger.info( f"Setting {services['meta']['total']} services to '{new_service_status}' for supplier {supplier_id}." ) for service in services['services']: if dry_run: logger.info(f"[DRY RUN] Would suspend service {service['id']} for supplier {supplier_id}") else: client.update_service_status(service['id'], new_service_status, "Suspend services script") suspended_service_count += 1 # Return suspended service count (i.e. if > 0, some emails need to be sent) return suspended_service_count
6442bf7f287126c6e1fe445fa9bca1ccde4d142f
3,638,792
import os def get_events(): """Get events from meetup website, parse them and return a list of dicts""" group = os.environ['MEETUP_GROUP'] url = f'https://www.meetup.com/{group}/events/' resp = HTMLSession().get(url, timeout=10) if not resp.ok: raise ConnectionError( f"Received http {resp.status_code} when connecting to {url}" ) events = resp.html.find('.flex.flex--column.flex--spaceBetween') events = [parse_event(event) for event in events] return events
1618808820bd95c34a3ae39c2ff6157f00474e24
3,638,793
def get_campaigns_with_goal_id(campaigns, goal_identifer): """Returns campaigns having the same goal_identifier passed in the args from the campaigns list Args: campaigns (list): List of campaign objects gaol_identifier (str): Global goal identifier Returns: tuple (campaign_goal_list, campaigns_without_goal): campaign_goal_list is a tuple of (campaign, campaign_goal) """ campaign_goal_list = [] campaigns_without_goal = [] for campaign in campaigns: campaign_goal = get_campaign_goal(campaign, goal_identifer) if campaign_goal: campaign_goal_list.append((campaign, campaign_goal)) else: campaigns_without_goal.append(campaign) return campaign_goal_list, campaigns_without_goal
83d77ee2e6c1b9b5025a24e996e573fe816dd4b7
3,638,794
def tautologically_define_state_machine_transitions(state_machine): """Create a mapping of all transitions in ``state_machine`` Parameters ---------- state_machine : super_state_machine.machines.StateMachine The state machine you want a complete map of Returns ------- dict Dictionary of all transitions in ``state_machine`` Structured as {from_state1: [(to_state, allowed), ...], from_state2: [(to_state, allowed), ...], } where - ``allowed`` is a boolean - ``from_stateN`` is a string - ``to_state`` is a string """ transitions_as_enum = state_machine.__class__._meta['transitions'] transitions_as_names = { to_state.value: [from_state.value for from_state in from_states] for to_state, from_states in transitions_as_enum.items()} transition_map = defaultdict(list) all_states = set(state_machine.States.states()) for to_state, from_states in transitions_as_names.items(): for from_state in all_states: allowed = True if from_state not in from_states: allowed = False transition_map[to_state].append((from_state, allowed)) return transition_map
0e7300a616811e26481b9a66ee8f22336fbd5943
3,638,795
from typing import List def topk_errors(preds: Tensor, labels: Tensor, ks: List[int]): """ Computes the top-k error for each k. Args: preds (array): array of predictions. Dimension is N. labels (array): array of labels. Dimension is N. ks (list): list of ks to calculate the top accuracies. """ num_topks_correct = topks_correct(preds, labels, ks) return [(1.0 - x / preds.size(0)) for x in num_topks_correct]
39a69f745eb789df4a47a776f7c84fa0b7a8b25a
3,638,796
def extract_data_from_inspect(network_name, network_data): """ :param network_name: str :param network_data: dict :return: dict: { "ip_address4": "12.34.56.78" "ip_address6": "ff:fa:..." } """ a4 = None if network_name == "host": a4 = "127.0.0.1" n = {} a4 = graceful_chain_get(network_data, "IPAddress") or a4 if a4: n["ip_address4"] = a4 a6 = graceful_chain_get(network_data, "GlobalIPv6Address") if a6: n["ip_address4"] = a6 return n
431eb3f7bda8c5e4d5580bc4be19185223d39c4d
3,638,797
import time def solveSudoku(fileName = "", showResults = False, showTime = False, matrix = []): """ Solves a Sudoku by prompting the sudoku or reading a text file containing the sudoku or by directly taking the matrix as a variable and either shows the solution or returns it. Can also tell the execution time (Any one of the arguments 'fileName' or 'matrix' should be given. Else rises ValueError) args: -fileName - Name of the text file in which sudoku is present (optional) -showResults - Prints the solution if set true. Else returns the solution (optional) -showTime - Calculates and shows the execution time only if set true (optional) -martix - 9x9 sudoku matrix (optional) returns: If 'showResults' parameter is given true, it returns the 9x9 solved sudoku list Else simply prints the solution """ if fileName == "" and matrix == []: rows = prompt_sudoku() elif fileName != "" and matrix == []: rows = get_sudoku(fileName) elif fileName == "" and matrix != []: rows = matrix elif fileName != "" and matrix !=[]: raise ValueError("Please give any of the arguments, 'fileName' or 'matrix' (Both are given)") st = time.perf_counter() all_combo = [] vert = vertical(rows) blocks = blockify(rows) for i in rows: all_combo.append(insert_combos(i, vert, blocks,rows.index(i))) a = all_combo.copy() for r1 in a[0]: for r2 in a[1]: if vertically_has_duplicates(r1,r2): continue for r3 in a[2]: if vertically_has_duplicates(r1,r2,r3) or blocks_has_duplicates([r1,r2,r3]): continue for r4 in a[3]: if vertically_has_duplicates(r1,r2,r3,r4): continue for r5 in a[4]: if vertically_has_duplicates(r1,r2,r3,r4,r5): continue for r6 in a[5]: if vertically_has_duplicates(r1,r2,r3,r4,r5,r6) or blocks_has_duplicates([r4,r5,r6]): continue for r7 in a[6]: if vertically_has_duplicates(r1,r2,r3,r4,r5,r6,r7): continue for r8 in a[7]: if vertically_has_duplicates(r1,r2,r3,r4,r5,r6,r7,r8): continue for r9 in a[8]: try_sol = [r1,r2,r3,r4,r5,r6,r7,r8,r9] if vertically_has_duplicates(r1,r2,r3,r4,r5,r6,r7,r8,r9) or blocks_has_duplicates([r7,r8,r9]): continue time_taken = 'Time Taken: '+str(round(time.perf_counter()-st, 4))+'s' if showResults: for row in try_sol: print(row) if showTime: print(time_taken) else: if showTime: try_sol.append(time_taken) return try_sol
a54cd39111638b9a02845781e7b731ca32d11089
3,638,798
def vertexval(val, size): """Converto to row,col or raise GTP error.""" val = val.lower() if val == 'pass': return None letter = str(val[0]) number = int(val[1:], 10) if not 'a' <= letter <= 'z': raise GTPError('invalid vertex letter: {!r}'.format(val)) if number < 1: raise GTPError('invalid vertex number: {!r}'.format(val)) row = size - number col = ord(letter) - (ord('a') if letter < 'i' else ord('b')) if 0 > row or row >= size or 0 > col or col >= size: raise GTPError('off board') return row, col
610441915c1d46baf9157a849140d1db7bf6f3d5
3,638,799