blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1cb6331749f4108d71c21c5faf4213b75388ea24
|
f20dca20021d34da8e14638321ae7869e32da949
|
/misc/construct_subs_all_rois_fc_5D_mats.py
|
ad44fec329de21e688c77b2a6c0a6ad633bd169f
|
[] |
no_license
|
R-Gaurav/Autism-Group-Level-Analysis
|
14ba67c61a538e5522719059c41ffe7c3167afcc
|
a80b9d6f54c00bdbe8528faeed69409824aec3c0
|
refs/heads/master
| 2020-04-24T13:34:24.254343
| 2019-10-01T17:03:46
| 2020-01-09T19:18:26
| 171,992,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,950
|
py
|
#
# Author: Ramashish Gaurav
#
# This file removes the 3D FC matrix which has all 0's for a particular ROI.
# That particular ROI is 254th ROI (0 based indexing). All subjects are expected
# to have their 254th ROI FC matrix as all zeros matrix. In case there are some
# other FC matrices which are all zeros, then the subject ID and the ROI ID are
# printed.
#
from collections import defaultdict
import nibabel as nib
import numpy as np
import pandas as pd
import pickle
import sys
ROI_WITH_ZERO_FC_MAT = 254 # 0 based indexing
NUM_VALID_ROIS = 273
def construct_subs_all_rois_fc_5D_mats(sub_list):
"""
This function constructs the complete 5D matrix of all_subs x ROIs x 3D brains
and removes the 3D FC matrix (254th ROI) of subjects which is all zeros from
the group of all such 274 3D matrices of a subject. This is done for all the
subjects in sub_list.
Args:
sub_list([]): A list of all subject IDs.
"""
subs_all_rois_fc_mat = [] # To contain all the subjects' all ROIs FC matrix.
subs_with_no_fc_mats = [] # To contain all the subjects' IDs with no FC matrix.
subs_with_fc_maps = [] # To contain all the subjects' IDs with FC matrix.
# To contain subject IDs as keys and ROIs with all zero FC matrix as values.
subs_zero_roi_indices = defaultdict(list)
for sub_id in sub_list:
try:
data = nib.load(file_path.format(sub_id, sub_id)).get_fdata() # 4D matrix.
_, _, _, num_rois = data.shape
#sub_all_rois_fc_mat = [] # To contain all FC mats except 254th of a subject.
is_254th_roi_mat_zero = False
for roi in range(num_rois):
if np.sum(data[:, :, :, roi]) == 0:
subs_zero_roi_indices[sub_id].append(roi)
if roi == ROI_WITH_ZERO_FC_MAT:
is_254th_roi_mat_zero = True
continue
else:
print(("Subject with ID {} has an all zero FC matrix at {}th "
"ROI.".format(sub_id, roi)))
#sub_all_rois_fc_mat.append(data[:, :, :, roi])
#sub_all_rois_fc_mat = np.array(sub_all_rois_fc_mat)
#if sub_all_rois_fc_mat.shape[0] != NUM_VALID_ROIS:
# print(("Subject with ID {} does not have {} ROI FC matrices but has "
# "{} ROI FC matrices".format(
# sub_id, NUM_VALID_ROIS, sub_all_rois_fc_mat.shape[3])))
# sys.exit()
if not is_254th_roi_mat_zero:
print(("Subject with ID {} does not have 254th ROI FC matrix as all "
"zeros.".format(sub_id)))
#sys.exit()
subs_with_fc_maps.append(sub_id)
#subs_all_rois_fc_mat.append(sub_all_rois_fc_mat)
except Exception as e:
subs_with_no_fc_mats.append(sub_id)
print("Error: {}, Subject with ID {} does not have a FC brain map".format(
e, sub_id))
#subs_all_rois_fc_mat = np.array(subs_all_rois_fc_mat)
#num_dim = len(subs_all_rois_fc_mat.shape)
#if num_dim != 5:
# print("subs_all_rois_fc_mat is not 5D matrix but {}D matrix".format(num_dim))
# sys.exit()
return (subs_with_fc_maps, subs_all_rois_fc_mat, subs_with_no_fc_mats,
subs_zero_roi_indices)
if __name__ == "__main__":
sub_list = pd.read_csv("/home/others/ramashish/Autism-Group-Level-Analysis/"
"ABIDE_1_sub_ids.csv")["SUB_ID"].tolist()
file_path = sys.argv[1]
output_dir = sys.argv[2]
file_path = file_path + "/_subject_id_{}/func2std_xform/00{}_fc_map_flirt.nii.gz"
(subs_with_fc_maps, subs_all_rois_fc_mat, subs_with_no_fc_mats,
subs_zero_roi_indices) = construct_subs_all_rois_fc_5D_mats(sub_list)
np.save(output_dir+"/all_subs_all_rois_fc_5D_mat.npy", subs_all_rois_fc_mat)
np.save(output_dir+"/all_subs_ids_with_fc_mats_list.npy", subs_with_fc_maps)
np.save(
output_dir+"/all_subs_ids_with_no_fc_mats_list.npy", subs_with_no_fc_mats)
pickle.dump(
subs_zero_roi_indices,
open(output_dir+"/all_subs_roi_list_with_zero_val_FC_mats.p", "wb"))
print("DONE!")
|
[
"ramashish.gaurav@gmail.com"
] |
ramashish.gaurav@gmail.com
|
cb93c18de5e1003daafbbee6f8e87c0cdfa5d7d8
|
a1e75c7d86facf6963954352e774b638215a3709
|
/orderlyjson/jsonschema/tests/test_pattern.py
|
d064781a9517e29fcd6b7fb0016ef8f232dce0b8
|
[
"MIT"
] |
permissive
|
kroo/py-orderly-json
|
335663c0f45444932189f7f6ae7335bf1b1794ea
|
91a82193ab33f010107e811d0d718257a7fb3c96
|
refs/heads/master
| 2020-05-19T16:50:20.636358
| 2010-11-17T19:50:22
| 2010-11-17T19:50:22
| 723,081
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
import math
from unittest import TestCase
import jsonschema
class TestPattern(TestCase):
#Match a simplified regular expression for an e-mail address
schema = { "pattern":"^[A-Za-z0-9][A-Za-z0-9\.]*@([A-Za-z0-9]+\.)+[A-Za-z0-9]+$" }
def test_pattern_pass(self):
data = "my.email01@gmail.com"
try:
jsonschema.validate(data, self.schema)
except ValueError, e:
self.fail("Unexpected failure: %s" % e)
def test_pattern_pass_nonstring(self):
data = 123
try:
jsonschema.validate(data, self.schema)
except ValueError, e:
self.fail("Unexpected failure: %s" % e)
def test_pattern_fail(self):
data = "whatever"
try:
jsonschema.validate(data, self.schema)
except ValueError:
pass
else:
self.fail("Expected failure for %s" % repr(None))
|
[
"elliot.kroo@gmail.com"
] |
elliot.kroo@gmail.com
|
5e0e5611b9a3bca684272e724af06adaa9eb5a92
|
62fcdb29e0a5ef002902594fd717edba939119ed
|
/Django/random_word/random_word/settings.py
|
23fb2f3c3510ed78dd076480eda9d2cc40d9b98a
|
[] |
no_license
|
mchorney/Python
|
cc470e27e6361592cff7b5cb18bcb9f017a96c7e
|
2802cf50dc71b54142e8ca16f74b198591d02efe
|
refs/heads/master
| 2021-09-05T20:53:19.705324
| 2018-01-30T23:41:34
| 2018-01-30T23:41:34
| 112,399,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,134
|
py
|
"""
Django settings for random_word project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+gif$wf4%1bd2ga)su1cxbsqmkwh&$bkl229t14slm=zdc&gbq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.random_gen',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'random_word.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'random_word.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"m.chorney7@gmail.com"
] |
m.chorney7@gmail.com
|
66359b9ed02ada61d81235d9357951f28863408a
|
9bc8b7a27bfe29e715de37c0cd628a88c2d89961
|
/bin/jsonschema_generator.py
|
9428d96252ae0c0ba2b268304c740c568ee27962
|
[] |
no_license
|
luhu888/json_schema_generator
|
a786c48bef3aa5d48c7e544506d91bc0678d714f
|
55cd3e476c327397db108227bbbcbaa29c279e02
|
refs/heads/master
| 2020-03-22T15:40:27.449210
| 2018-07-09T10:18:40
| 2018-07-09T10:18:40
| 140,268,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,183
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#import re
import os
import argparse
import string
from urllib.request import urlopen
import json_schema_generator
from json_schema_generator import Recorder, Validator
def record(args):
if(os.path.isfile(args.json_source)):
rec = Recorder.from_file(args.json_source)
else:
rec = Recorder.from_url(args.json_source)
rec.save_json_schema(args.json_schema_file_path, indent=4)
def validate(args):
json_data = urlopen(args.json_source).read()
validator = Validator.from_path(args.json_schema_file_path)
is_valid = validator.assert_json(json_data)
if is_valid:
print(" * JSON is valid")
else:
print(" ! JSON is broken ")
print(validator.error_message)
def homologate(args):
template_file_path = os.path.join(os.path.dirname(json_schema_generator.__file__), 'test_template.py.tmpl')
json_schemas_dir = os.path.join(args.path, 'json_schemas')
json_schema_file_name = '%s.json_schema' % args.homologation_name
json_schema_file_path = os.path.join(json_schemas_dir, json_schema_file_name)
test_file_path = os.path.join(args.path, 'test_%s_json_schema.py' % args.homologation_name)
with open(template_file_path) as template_file:
tmpl = string.Template(template_file.read())
if not os.path.exists(json_schemas_dir):
os.mkdir(json_schemas_dir)
if not os.path.exists(json_schema_file_path):
rec = Recorder.from_url(args.json_source)
rec.save_json_schema(json_schema_file_path, indent=4)
rendered = tmpl.substitute(
homologation_name=args.homologation_name,
service_url=args.json_source,
json_schema_file_name=json_schema_file_name,
json_schemas_dir=json_schemas_dir
)
with open(test_file_path, 'w') as test_file:
test_file.write(rendered)
def main():
parser = argparse.ArgumentParser()
default_parser = argparse.ArgumentParser(add_help=False)
default_parser.add_argument('json_source', type=str, help='url or file')
default_parser.add_argument('--path', dest='path', default='', help='set path')
subparsers = parser.add_subparsers(help='sub-command help')
parser_record = subparsers.add_parser('record', parents=[default_parser])
parser_record.add_argument('json_schema_file_path', type=str, help='json schema file path')
parser_record.set_defaults(func=record)
parser_validate = subparsers.add_parser('validate', parents=[default_parser])
parser_validate.add_argument('json_schema_file_path', type=str, help='json schema file path')
parser_validate.set_defaults(func=validate)
parser_homologate = subparsers.add_parser('homologate', parents=[default_parser])
parser_homologate.add_argument('homologation_name', type=str, help='json schema file path')
parser_homologate.set_defaults(func=homologate)
args = parser.parse_args()
try:
args.func
except AttributeError:
import sys
print("missing 1 or more required arguments (see '%s --help')" % sys.argv[0])
exit(1)
else:
args.func(args)
if __name__ == '__main__':
main()
|
[
"liu.ming@xinheyun.com"
] |
liu.ming@xinheyun.com
|
e8d2ac5ca006c732fc43b0997a11598d68c67909
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_roadways.py
|
9a1905f1784655d39cfd9d826973aa9efdab7e60
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#calss header
class _ROADWAYS():
def __init__(self,):
self.name = "ROADWAYS"
self.definitions = roadway
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['roadway']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
d5cc9cecbd841cbebd0bcf3ab30aed117731f49c
|
5b711d9d1c71eb8a7c253a17b2a7f319163d2fdc
|
/airflow/models/abstractoperator.py
|
eb1e04f4402e6b180d37c5b9a6f25a7afa4d298b
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
waleedsamy/airflow
|
8289465af0ef8199bf82e0696115bb5f83f9b667
|
b19ccf8ead027d9eaf53b33305be5873f2711699
|
refs/heads/main
| 2023-03-17T06:29:20.695168
| 2022-08-29T16:59:13
| 2022-08-29T16:59:13
| 251,581,666
| 0
| 0
|
Apache-2.0
| 2020-03-31T11:21:23
| 2020-03-31T11:21:22
| null |
UTF-8
|
Python
| false
| false
| 20,850
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Collection,
Dict,
FrozenSet,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
from airflow.compat.functools import cached_property
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.models.taskmixin import DAGNode
from airflow.utils.context import Context
from airflow.utils.helpers import render_template_as_native, render_template_to_string
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.weight_rule import WeightRule
TaskStateChangeCallback = Callable[[Context], None]
if TYPE_CHECKING:
import jinja2 # Slow import.
from sqlalchemy.orm import Session
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.dag import DAG
from airflow.models.mappedoperator import MappedOperator
from airflow.models.operator import Operator
from airflow.models.taskinstance import TaskInstance
DEFAULT_OWNER: str = conf.get_mandatory_value("operators", "default_owner")
DEFAULT_POOL_SLOTS: int = 1
DEFAULT_PRIORITY_WEIGHT: int = 1
DEFAULT_QUEUE: str = conf.get_mandatory_value("operators", "default_queue")
DEFAULT_IGNORE_FIRST_DEPENDS_ON_PAST: bool = conf.getboolean(
"scheduler", "ignore_first_depends_on_past_by_default"
)
DEFAULT_RETRIES: int = conf.getint("core", "default_task_retries", fallback=0)
DEFAULT_RETRY_DELAY: datetime.timedelta = datetime.timedelta(
seconds=conf.getint("core", "default_task_retry_delay", fallback=300)
)
DEFAULT_WEIGHT_RULE: WeightRule = WeightRule(
conf.get("core", "default_task_weight_rule", fallback=WeightRule.DOWNSTREAM)
)
DEFAULT_TRIGGER_RULE: TriggerRule = TriggerRule.ALL_SUCCESS
DEFAULT_TASK_EXECUTION_TIMEOUT: Optional[datetime.timedelta] = conf.gettimedelta(
"core", "default_task_execution_timeout"
)
class AbstractOperator(LoggingMixin, DAGNode):
"""Common implementation for operators, including unmapped and mapped.
This base class is more about sharing implementations, not defining a common
interface. Unfortunately it's difficult to use this as the common base class
for typing due to BaseOperator carrying too much historical baggage.
The union type ``from airflow.models.operator import Operator`` is easier
to use for typing purposes.
:meta private:
"""
operator_class: Union[Type["BaseOperator"], Dict[str, Any]]
weight_rule: str
priority_weight: int
# Defines the operator level extra links.
operator_extra_links: Collection["BaseOperatorLink"]
# For derived classes to define which fields will get jinjaified.
template_fields: Collection[str]
# Defines which files extensions to look for in the templated fields.
template_ext: Sequence[str]
owner: str
task_id: str
outlets: list
inlets: list
HIDE_ATTRS_FROM_UI: ClassVar[FrozenSet[str]] = frozenset(
(
'log',
'dag', # We show dag_id, don't need to show this too
'node_id', # Duplicates task_id
'task_group', # Doesn't have a useful repr, no point showing in UI
'inherits_from_empty_operator', # impl detail
# For compatibility with TG, for operators these are just the current task, no point showing
'roots',
'leaves',
# These lists are already shown via *_task_ids
'upstream_list',
'downstream_list',
# Not useful, implementation detail, already shown elsewhere
'global_operator_extra_link_dict',
'operator_extra_link_dict',
)
)
def get_dag(self) -> "Optional[DAG]":
raise NotImplementedError()
@property
def task_type(self) -> str:
raise NotImplementedError()
@property
def operator_name(self) -> str:
raise NotImplementedError()
@property
def inherits_from_empty_operator(self) -> bool:
raise NotImplementedError()
@property
def dag_id(self) -> str:
"""Returns dag id if it has one or an adhoc + owner"""
dag = self.get_dag()
if dag:
return dag.dag_id
return f"adhoc_{self.owner}"
@property
def node_id(self) -> str:
return self.task_id
def get_template_env(self) -> "jinja2.Environment":
"""Fetch a Jinja template environment from the DAG or instantiate empty environment if no DAG."""
# This is imported locally since Jinja2 is heavy and we don't need it
# for most of the functionalities. It is imported by get_template_env()
# though, so we don't need to put this after the 'if dag' check.
from airflow.templates import SandboxedEnvironment
dag = self.get_dag()
if dag:
return dag.get_template_env(force_sandboxed=False)
return SandboxedEnvironment(cache_size=0)
def prepare_template(self) -> None:
"""Hook triggered after the templated fields get replaced by their content.
If you need your operator to alter the content of the file before the
template is rendered, it should override this method to do so.
"""
def resolve_template_files(self) -> None:
"""Getting the content of files for template_field / template_ext."""
if self.template_ext:
for field in self.template_fields:
content = getattr(self, field, None)
if content is None:
continue
elif isinstance(content, str) and any(content.endswith(ext) for ext in self.template_ext):
env = self.get_template_env()
try:
setattr(self, field, env.loader.get_source(env, content)[0]) # type: ignore
except Exception:
self.log.exception("Failed to resolve template field %r", field)
elif isinstance(content, list):
env = self.get_template_env()
for i, item in enumerate(content):
if isinstance(item, str) and any(item.endswith(ext) for ext in self.template_ext):
try:
content[i] = env.loader.get_source(env, item)[0] # type: ignore
except Exception:
self.log.exception("Failed to get source %s", item)
self.prepare_template()
def get_direct_relative_ids(self, upstream: bool = False) -> Set[str]:
"""Get direct relative IDs to the current task, upstream or downstream."""
if upstream:
return self.upstream_task_ids
return self.downstream_task_ids
def get_flat_relative_ids(
self,
upstream: bool = False,
found_descendants: Optional[Set[str]] = None,
) -> Set[str]:
"""Get a flat set of relative IDs, upstream or downstream."""
dag = self.get_dag()
if not dag:
return set()
if found_descendants is None:
found_descendants = set()
task_ids_to_trace = self.get_direct_relative_ids(upstream)
while task_ids_to_trace:
task_ids_to_trace_next: Set[str] = set()
for task_id in task_ids_to_trace:
if task_id in found_descendants:
continue
task_ids_to_trace_next.update(dag.task_dict[task_id].get_direct_relative_ids(upstream))
found_descendants.add(task_id)
task_ids_to_trace = task_ids_to_trace_next
return found_descendants
def get_flat_relatives(self, upstream: bool = False) -> Collection["Operator"]:
"""Get a flat list of relatives, either upstream or downstream."""
dag = self.get_dag()
if not dag:
return set()
return [dag.task_dict[task_id] for task_id in self.get_flat_relative_ids(upstream)]
def _iter_all_mapped_downstreams(self) -> Iterator["MappedOperator"]:
"""Return mapped nodes that are direct dependencies of the current task.
For now, this walks the entire DAG to find mapped nodes that has this
current task as an upstream. We cannot use ``downstream_list`` since it
only contains operators, not task groups. In the future, we should
provide a way to record an DAG node's all downstream nodes instead.
Note that this does not guarantee the returned tasks actually use the
current task for task mapping, but only checks those task are mapped
operators, and are downstreams of the current task.
To get a list of tasks that uses the current task for task mapping, use
:meth:`iter_mapped_dependants` instead.
"""
from airflow.models.mappedoperator import MappedOperator
from airflow.utils.task_group import TaskGroup
def _walk_group(group: TaskGroup) -> Iterable[Tuple[str, DAGNode]]:
"""Recursively walk children in a task group.
This yields all direct children (including both tasks and task
groups), and all children of any task groups.
"""
for key, child in group.children.items():
yield key, child
if isinstance(child, TaskGroup):
yield from _walk_group(child)
dag = self.get_dag()
if not dag:
raise RuntimeError("Cannot check for mapped dependants when not attached to a DAG")
for key, child in _walk_group(dag.task_group):
if key == self.node_id:
continue
if not isinstance(child, MappedOperator):
continue
if self.node_id in child.upstream_task_ids:
yield child
def iter_mapped_dependants(self) -> Iterator["MappedOperator"]:
"""Return mapped nodes that depend on the current task the expansion.
For now, this walks the entire DAG to find mapped nodes that has this
current task as an upstream. We cannot use ``downstream_list`` since it
only contains operators, not task groups. In the future, we should
provide a way to record an DAG node's all downstream nodes instead.
"""
return (
downstream
for downstream in self._iter_all_mapped_downstreams()
if any(p.node_id == self.node_id for p in downstream.iter_mapped_dependencies())
)
def unmap(self, resolve: Union[None, Dict[str, Any], Tuple[Context, "Session"]]) -> "BaseOperator":
"""Get the "normal" operator from current abstract operator.
MappedOperator uses this to unmap itself based on the map index. A non-
mapped operator (i.e. BaseOperator subclass) simply returns itself.
:meta private:
"""
raise NotImplementedError()
@property
def priority_weight_total(self) -> int:
"""
Total priority weight for the task. It might include all upstream or downstream tasks.
Depending on the weight rule:
- WeightRule.ABSOLUTE - only own weight
- WeightRule.DOWNSTREAM - adds priority weight of all downstream tasks
- WeightRule.UPSTREAM - adds priority weight of all upstream tasks
"""
if self.weight_rule == WeightRule.ABSOLUTE:
return self.priority_weight
elif self.weight_rule == WeightRule.DOWNSTREAM:
upstream = False
elif self.weight_rule == WeightRule.UPSTREAM:
upstream = True
else:
upstream = False
dag = self.get_dag()
if dag is None:
return self.priority_weight
return self.priority_weight + sum(
dag.task_dict[task_id].priority_weight
for task_id in self.get_flat_relative_ids(upstream=upstream)
)
@cached_property
def operator_extra_link_dict(self) -> Dict[str, Any]:
"""Returns dictionary of all extra links for the operator"""
op_extra_links_from_plugin: Dict[str, Any] = {}
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.operator_extra_links is None:
raise AirflowException("Can't load operators")
for ope in plugins_manager.operator_extra_links:
if ope.operators and self.operator_class in ope.operators:
op_extra_links_from_plugin.update({ope.name: ope})
operator_extra_links_all = {link.name: link for link in self.operator_extra_links}
# Extra links defined in Plugins overrides operator links defined in operator
operator_extra_links_all.update(op_extra_links_from_plugin)
return operator_extra_links_all
@cached_property
def global_operator_extra_link_dict(self) -> Dict[str, Any]:
"""Returns dictionary of all global extra links"""
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.global_operator_extra_links is None:
raise AirflowException("Can't load operators")
return {link.name: link for link in plugins_manager.global_operator_extra_links}
@cached_property
def extra_links(self) -> List[str]:
return list(set(self.operator_extra_link_dict).union(self.global_operator_extra_link_dict))
def get_extra_links(self, ti: "TaskInstance", link_name: str) -> Optional[str]:
"""For an operator, gets the URLs that the ``extra_links`` entry points to.
:meta private:
:raise ValueError: The error message of a ValueError will be passed on through to
the fronted to show up as a tooltip on the disabled link.
:param ti: The TaskInstance for the URL being searched for.
:param link_name: The name of the link we're looking for the URL for. Should be
one of the options specified in ``extra_links``.
"""
link: Optional["BaseOperatorLink"] = self.operator_extra_link_dict.get(link_name)
if not link:
link = self.global_operator_extra_link_dict.get(link_name)
if not link:
return None
parameters = inspect.signature(link.get_link).parameters
old_signature = all(name != "ti_key" for name, p in parameters.items() if p.kind != p.VAR_KEYWORD)
if old_signature:
return link.get_link(self.unmap(None), ti.dag_run.logical_date) # type: ignore[misc]
return link.get_link(self.unmap(None), ti_key=ti.key)
def render_template_fields(
self,
context: Context,
jinja_env: Optional["jinja2.Environment"] = None,
) -> Optional["BaseOperator"]:
"""Template all attributes listed in template_fields.
If the operator is mapped, this should return the unmapped, fully
rendered, and map-expanded operator. The mapped operator should not be
modified.
If the operator is not mapped, this should modify the operator in-place
and return either *None* (for backwards compatibility) or *self*.
"""
raise NotImplementedError()
@provide_session
def _do_render_template_fields(
self,
parent: Any,
template_fields: Iterable[str],
context: Context,
jinja_env: "jinja2.Environment",
seen_oids: Set[int],
*,
session: "Session" = NEW_SESSION,
) -> None:
for attr_name in template_fields:
try:
value = getattr(parent, attr_name)
except AttributeError:
raise AttributeError(
f"{attr_name!r} is configured as a template field "
f"but {parent.task_type} does not have this attribute."
)
if not value:
continue
try:
rendered_content = self.render_template(
value,
context,
jinja_env,
seen_oids,
)
except Exception:
self.log.exception(
"Exception rendering Jinja template for task '%s', field '%s'. Template: %r",
self.task_id,
attr_name,
value,
)
raise
else:
setattr(parent, attr_name, rendered_content)
def render_template(
self,
content: Any,
context: Context,
jinja_env: Optional["jinja2.Environment"] = None,
seen_oids: Optional[Set[int]] = None,
) -> Any:
"""Render a templated string.
If *content* is a collection holding multiple templated strings, strings
in the collection will be templated recursively.
:param content: Content to template. Only strings can be templated (may
be inside a collection).
:param context: Dict with values to apply on templated content
:param jinja_env: Jinja environment. Can be provided to avoid
re-creating Jinja environments during recursion.
:param seen_oids: template fields already rendered (to avoid
*RecursionError* on circular dependencies)
:return: Templated content
"""
# "content" is a bad name, but we're stuck to it being public API.
value = content
del content
if seen_oids is not None:
oids = seen_oids
else:
oids = set()
if id(value) in oids:
return value
if not jinja_env:
jinja_env = self.get_template_env()
from airflow.models.param import DagParam
from airflow.models.xcom_arg import XComArg
if isinstance(value, str):
if any(value.endswith(ext) for ext in self.template_ext): # A filepath.
template = jinja_env.get_template(value)
else:
template = jinja_env.from_string(value)
dag = self.get_dag()
if dag and dag.render_template_as_native_obj:
return render_template_as_native(template, context)
return render_template_to_string(template, context)
if isinstance(value, (DagParam, XComArg)):
return value.resolve(context)
# Fast path for common built-in collections.
if value.__class__ is tuple:
return tuple(self.render_template(element, context, jinja_env, oids) for element in value)
elif isinstance(value, tuple): # Special case for named tuples.
return value.__class__(*(self.render_template(el, context, jinja_env, oids) for el in value))
elif isinstance(value, list):
return [self.render_template(element, context, jinja_env, oids) for element in value]
elif isinstance(value, dict):
return {k: self.render_template(v, context, jinja_env, oids) for k, v in value.items()}
elif isinstance(value, set):
return {self.render_template(element, context, jinja_env, oids) for element in value}
# More complex collections.
self._render_nested_template_fields(value, context, jinja_env, oids)
return value
def _render_nested_template_fields(
self,
value: Any,
context: Context,
jinja_env: "jinja2.Environment",
seen_oids: Set[int],
) -> None:
if id(value) in seen_oids:
return
seen_oids.add(id(value))
try:
nested_template_fields = value.template_fields
except AttributeError:
# content has no inner template fields
return
self._do_render_template_fields(value, nested_template_fields, context, jinja_env, seen_oids)
|
[
"noreply@github.com"
] |
waleedsamy.noreply@github.com
|
b57f36c05a0c7de1e3886710c10113783c3318df
|
5a7fba5001e24524ea10eb3f732c578f03385687
|
/MinMaxAlgorithm/implementations/NormalPlayer.py
|
ddfa7a4c25e375f5ec061444c1c3d4004be6757f
|
[] |
no_license
|
MIKE432/intro-to-AI-lab
|
ee87be7fa869c79cfcbee8a596bb035b791f945a
|
84e08761c8c8a8b744d955352a2962611e05f77c
|
refs/heads/master
| 2023-06-03T06:44:19.374748
| 2021-06-16T10:25:36
| 2021-06-16T10:25:36
| 345,989,239
| 0
| 0
| null | 2021-04-17T11:51:46
| 2021-03-09T11:53:01
|
Python
|
UTF-8
|
Python
| false
| false
| 285
|
py
|
from typing import List
from abstracts.Player import Player
class NormalPlayer(Player):
def __init__(self, number):
super().__init__(number)
def move(self, choices: List, board, random_move=False):
return int(input(f"Pick one of given values {choices}: "))
|
[
"michal.raszczuk.apusoft@gmail.com"
] |
michal.raszczuk.apusoft@gmail.com
|
f855f6cb918cb89594fb953af4a1dd3609e45fb9
|
4809213a0ecef876c9e1bf3669169881766335c8
|
/lib/hitbox.py
|
e1cbc579ffb2f30bc66e5d66abb418d594d57ef9
|
[] |
no_license
|
wmaxlloyd/COVID-19-Model
|
98a5f697558fb25b0c721a3876727dc091cb145c
|
7431a033add5f0eb4acf8a95cff9faebcdcde405
|
refs/heads/master
| 2023-02-13T05:28:41.001582
| 2021-01-19T05:24:11
| 2021-01-19T05:24:11
| 330,868,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,418
|
py
|
from typing import Tuple, TYPE_CHECKING
from .vector import Vector
from pyglet.gl import *
from math import inf
if TYPE_CHECKING:
from .component import Component
class Hitbox:
def __init__(self, component: 'Component', width: Tuple[float, float], height: Tuple[float, float]):
self.__component = component
self.width_range = tuple(sorted(width))
self.height_range = tuple(sorted(height))
def get_coordinates(self) -> Tuple[Vector, Vector, Vector, Vector]:
return (
Vector(self.left(), self.top()),
Vector(self.right(), self.top()),
Vector(self.right(), self.bottom()),
Vector(self.left(), self.bottom()),
)
def left(self) -> float:
return self.__component.pos.x() + self.width_range[0]
def right(self) -> float:
return self.__component.pos.x() + self.width_range[1]
def top(self) -> float:
return self.__component.pos.y() + self.height_range[1]
def bottom(self) -> float:
return self.__component.pos.y() + self.height_range[0]
def contains_point(self, point: Vector):
point_x, point_y = point.array
if not self.left() <= point_x <= self.right():
return False
if not self.bottom() <= point_y <= self.top():
return False
return True
def intersects(self, hitbox: 'Hitbox') -> bool:
return not (
self.bottom() > hitbox.top() or
self.top() < hitbox.bottom() or
self.right() < hitbox.left() or
self.left() > hitbox.right()
)
def contains(self, coordinate: Vector) -> bool:
return (
self.bottom() <= coordinate.y() <= self.top() and
self.left <= coordinate.x() <= self.right()
)
def draw(self):
points = self.get_coordinates()
glBegin(GL_LINES)
for i in range(len(points)):
point1 = points[i]
point2 = points[(i + 1) % len(points)]
glVertex3f(point1.x(), point1.y(), 0)
glVertex3f(point2.x(), point2.y(), 0)
glEnd()
def add_padding(self, padding: float) -> 'Hitbox':
self.width_range = [self.width_range[0] - padding, self.width_range[1] + padding]
self.height_range = [self.height_range[0] - padding, self.height_range[1] + padding]
return self
|
[
"maxlloyd@Maxs-MacBook-Pro.local"
] |
maxlloyd@Maxs-MacBook-Pro.local
|
6b2fc19a523d12d6170e86b8b28d7e4d27721009
|
82f449cc405b8379a30b228a15682bbd70d1b09d
|
/venv/Lib/site-packages/PyInstaller/building/makespec.py
|
82c1572c7ea82952298580e8c80c3580211d95f3
|
[] |
no_license
|
neo-talen/QuickCmdBtnSet
|
82dd18e070e285ba752f9bd3586201cc8c174f78
|
4781a5c44a4022b6f014bd8ca513b89983f6a309
|
refs/heads/master
| 2022-05-06T08:29:10.993183
| 2022-05-05T11:07:04
| 2022-05-05T11:07:04
| 183,062,524
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,940
|
py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2022, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Automatically build spec files containing a description of the project.
"""
import argparse
import os
import sys
from PyInstaller import DEFAULT_SPECPATH, HOMEPATH
from PyInstaller import log as logging
from PyInstaller.building.templates import (
bundleexetmplt, bundletmplt, cipher_absent_template, cipher_init_template, onedirtmplt, onefiletmplt, splashtmpl
)
from PyInstaller.compat import expand_path, is_darwin, is_win
logger = logging.getLogger(__name__)
add_command_sep = os.pathsep
# This list gives valid choices for the ``--debug`` command-line option, except for the ``all`` choice.
DEBUG_ARGUMENT_CHOICES = ['imports', 'bootloader', 'noarchive']
# This is the ``all`` choice.
DEBUG_ALL_CHOICE = ['all']
def escape_win_filepath(path):
# escape all \ with another \ after using normpath to clean up the path
return os.path.normpath(path).replace('\\', '\\\\')
def make_path_spec_relative(filename, spec_dir):
"""
Make the filename relative to the directory containing .spec file if filename is relative and not absolute.
Otherwise keep filename untouched.
"""
if os.path.isabs(filename):
return filename
else:
filename = os.path.abspath(filename)
# Make it relative.
filename = os.path.relpath(filename, start=spec_dir)
return filename
# Support for trying to avoid hard-coded paths in the .spec files. Eg, all files rooted in the Installer directory tree
# will be written using "HOMEPATH", thus allowing this spec file to be used with any Installer installation. Same thing
# could be done for other paths too.
path_conversions = ((HOMEPATH, "HOMEPATH"),)
def add_data_or_binary(string):
try:
src, dest = string.split(add_command_sep)
except ValueError as e:
# Split into SRC and DEST failed, wrong syntax
raise argparse.ArgumentError("Wrong syntax, should be SRC{}DEST".format(add_command_sep)) from e
if not src or not dest:
# Syntax was correct, but one or both of SRC and DEST was not given
raise argparse.ArgumentError("You have to specify both SRC and DEST")
# Return tuple containing SRC and SRC
return src, dest
def make_variable_path(filename, conversions=path_conversions):
if not os.path.isabs(filename):
# os.path.commonpath can not compare relative and absolute paths, and if filename is not absolut, none of the
# paths in conversions will match anyway.
return None, filename
for (from_path, to_name) in conversions:
assert os.path.abspath(from_path) == from_path, ("path '%s' should already be absolute" % from_path)
try:
common_path = os.path.commonpath([filename, from_path])
except ValueError:
# Per https://docs.python.org/3/library/os.path.html#os.path.commonpath, this raises ValueError in several
# cases which prevent computing a common path.
common_path = None
if common_path == from_path:
rest = filename[len(from_path):]
if rest.startswith(('\\', '/')):
rest = rest[1:]
return to_name, rest
return None, filename
# An object used in place of a "path string", which knows how to repr() itself using variable names instead of
# hard-coded paths.
class Path:
def __init__(self, *parts):
self.path = os.path.join(*parts)
self.variable_prefix = self.filename_suffix = None
def __repr__(self):
if self.filename_suffix is None:
self.variable_prefix, self.filename_suffix = make_variable_path(self.path)
if self.variable_prefix is None:
return repr(self.path)
return "os.path.join(" + self.variable_prefix + "," + repr(self.filename_suffix) + ")"
# An object used to construct extra preamble for the spec file, in order to accommodate extra collect_*() calls from the
# command-line
class Preamble:
def __init__(
self, datas, binaries, hiddenimports, collect_data, collect_binaries, collect_submodules, collect_all,
copy_metadata, recursive_copy_metadata
):
# Initialize with literal values - will be switched to preamble variable name later, if necessary
self.binaries = binaries or []
self.hiddenimports = hiddenimports or []
self.datas = datas or []
# Preamble content
self.content = []
# Import statements
if collect_data:
self._add_hookutil_import('collect_data_files')
if collect_binaries:
self._add_hookutil_import('collect_dynamic_libs')
if collect_submodules:
self._add_hookutil_import('collect_submodules')
if collect_all:
self._add_hookutil_import('collect_all')
if copy_metadata or recursive_copy_metadata:
self._add_hookutil_import('copy_metadata')
if self.content:
self.content += [''] # empty line to separate the section
# Variables
if collect_data or copy_metadata or collect_all or recursive_copy_metadata:
self._add_var('datas', self.datas)
self.datas = 'datas' # switch to variable
if collect_binaries or collect_all:
self._add_var('binaries', self.binaries)
self.binaries = 'binaries' # switch to variable
if collect_submodules or collect_all:
self._add_var('hiddenimports', self.hiddenimports)
self.hiddenimports = 'hiddenimports' # switch to variable
# Content - collect_data_files
for entry in collect_data:
self._add_collect_data(entry)
# Content - copy_metadata
for entry in copy_metadata:
self._add_copy_metadata(entry)
# Content - copy_metadata(..., recursive=True)
for entry in recursive_copy_metadata:
self._add_recursive_copy_metadata(entry)
# Content - collect_binaries
for entry in collect_binaries:
self._add_collect_binaries(entry)
# Content - collect_submodules
for entry in collect_submodules:
self._add_collect_submodules(entry)
# Content - collect_all
for entry in collect_all:
self._add_collect_all(entry)
# Merge
if self.content and self.content[-1] != '':
self.content += [''] # empty line
self.content = '\n'.join(self.content)
def _add_hookutil_import(self, name):
self.content += ['from PyInstaller.utils.hooks import {0}'.format(name)]
def _add_var(self, name, initial_value):
self.content += ['{0} = {1}'.format(name, initial_value)]
def _add_collect_data(self, name):
self.content += ['datas += collect_data_files(\'{0}\')'.format(name)]
def _add_copy_metadata(self, name):
self.content += ['datas += copy_metadata(\'{0}\')'.format(name)]
def _add_recursive_copy_metadata(self, name):
self.content += ['datas += copy_metadata(\'{0}\', recursive=True)'.format(name)]
def _add_collect_binaries(self, name):
self.content += ['binaries += collect_dynamic_libs(\'{0}\')'.format(name)]
def _add_collect_submodules(self, name):
self.content += ['hiddenimports += collect_submodules(\'{0}\')'.format(name)]
def _add_collect_all(self, name):
self.content += [
'tmp_ret = collect_all(\'{0}\')'.format(name),
'datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2]'
]
def __add_options(parser):
"""
Add the `Makespec` options to a option-parser instance or a option group.
"""
g = parser.add_argument_group('What to generate')
g.add_argument(
"-D",
"--onedir",
dest="onefile",
action="store_false",
default=None,
help="Create a one-folder bundle containing an executable (default)",
)
g.add_argument(
"-F",
"--onefile",
dest="onefile",
action="store_true",
default=None,
help="Create a one-file bundled executable.",
)
g.add_argument(
"--specpath",
metavar="DIR",
help="Folder to store the generated spec file (default: current directory)",
)
g.add_argument(
"-n",
"--name",
help="Name to assign to the bundled app and spec file (default: first script's basename)",
)
g = parser.add_argument_group('What to bundle, where to search')
g.add_argument(
'--add-data',
action='append',
default=[],
type=add_data_or_binary,
metavar='<SRC;DEST or SRC:DEST>',
dest='datas',
help='Additional non-binary files or folders to be added to the executable. The path separator is platform '
'specific, ``os.pathsep`` (which is ``;`` on Windows and ``:`` on most unix systems) is used. This option '
'can be used multiple times.',
)
g.add_argument(
'--add-binary',
action='append',
default=[],
type=add_data_or_binary,
metavar='<SRC;DEST or SRC:DEST>',
dest="binaries",
help='Additional binary files to be added to the executable. See the ``--add-data`` option for more details. '
'This option can be used multiple times.',
)
g.add_argument(
"-p",
"--paths",
dest="pathex",
metavar="DIR",
action="append",
default=[],
help="A path to search for imports (like using PYTHONPATH). Multiple paths are allowed, separated by ``%s``, "
"or use this option multiple times. Equivalent to supplying the ``pathex`` argument in the spec file." %
repr(os.pathsep),
)
g.add_argument(
'--hidden-import',
'--hiddenimport',
action='append',
default=[],
metavar="MODULENAME",
dest='hiddenimports',
help='Name an import not visible in the code of the script(s). This option can be used multiple times.',
)
g.add_argument(
'--collect-submodules',
action="append",
default=[],
metavar="MODULENAME",
dest='collect_submodules',
help='Collect all submodules from the specified package or module. This option can be used multiple times.',
)
g.add_argument(
'--collect-data',
'--collect-datas',
action="append",
default=[],
metavar="MODULENAME",
dest='collect_data',
help='Collect all data from the specified package or module. This option can be used multiple times.',
)
g.add_argument(
'--collect-binaries',
action="append",
default=[],
metavar="MODULENAME",
dest='collect_binaries',
help='Collect all binaries from the specified package or module. This option can be used multiple times.',
)
g.add_argument(
'--collect-all',
action="append",
default=[],
metavar="MODULENAME",
dest='collect_all',
help='Collect all submodules, data files, and binaries from the specified package or module. This option can '
'be used multiple times.',
)
g.add_argument(
'--copy-metadata',
action="append",
default=[],
metavar="PACKAGENAME",
dest='copy_metadata',
help='Copy metadata for the specified package. This option can be used multiple times.',
)
g.add_argument(
'--recursive-copy-metadata',
action="append",
default=[],
metavar="PACKAGENAME",
dest='recursive_copy_metadata',
help='Copy metadata for the specified package and all its dependencies. This option can be used multiple '
'times.',
)
g.add_argument(
"--additional-hooks-dir",
action="append",
dest="hookspath",
default=[],
help="An additional path to search for hooks. This option can be used multiple times.",
)
g.add_argument(
'--runtime-hook',
action='append',
dest='runtime_hooks',
default=[],
help='Path to a custom runtime hook file. A runtime hook is code that is bundled with the executable and is '
'executed before any other code or module to set up special features of the runtime environment. This option '
'can be used multiple times.',
)
g.add_argument(
'--exclude-module',
dest='excludes',
action='append',
default=[],
help='Optional module or package (the Python name, not the path name) that will be ignored (as though it was '
'not found). This option can be used multiple times.',
)
g.add_argument(
'--key',
dest='key',
help='The key used to encrypt Python bytecode.',
)
g.add_argument(
'--splash',
dest='splash',
metavar="IMAGE_FILE",
help="(EXPERIMENTAL) Add an splash screen with the image IMAGE_FILE to the application. The splash screen can "
"display progress updates while unpacking.",
)
g = parser.add_argument_group('How to generate')
g.add_argument(
"-d",
"--debug",
# If this option is not specified, then its default value is an empty list (no debug options selected).
default=[],
# Note that ``nargs`` is omitted. This produces a single item not stored in a list, as opposed to a list
# containing one item, as per `nargs <https://docs.python.org/3/library/argparse.html#nargs>`_.
nargs=None,
# The options specified must come from this list.
choices=DEBUG_ALL_CHOICE + DEBUG_ARGUMENT_CHOICES,
# Append choice, rather than storing them (which would overwrite any previous selections).
action='append',
# Allow newlines in the help text; see the ``_SmartFormatter`` in ``__main__.py``.
help=(
"R|Provide assistance with debugging a frozen\n"
"application. This argument may be provided multiple\n"
"times to select several of the following options.\n"
"\n"
"- all: All three of the following options.\n"
"\n"
"- imports: specify the -v option to the underlying\n"
" Python interpreter, causing it to print a message\n"
" each time a module is initialized, showing the\n"
" place (filename or built-in module) from which it\n"
" is loaded. See\n"
" https://docs.python.org/3/using/cmdline.html#id4.\n"
"\n"
"- bootloader: tell the bootloader to issue progress\n"
" messages while initializing and starting the\n"
" bundled app. Used to diagnose problems with\n"
" missing imports.\n"
"\n"
"- noarchive: instead of storing all frozen Python\n"
" source files as an archive inside the resulting\n"
" executable, store them as files in the resulting\n"
" output directory.\n"
"\n"
),
)
g.add_argument(
'--python-option',
dest='python_options',
metavar='PYTHON_OPTION',
action='append',
default=[],
help='Specify a command-line option to pass to the Python interpreter at runtime. Currently supports '
'"v" (equivalent to "--debug imports"), "u", and "W <warning control>".',
)
g.add_argument(
"-s",
"--strip",
action="store_true",
help="Apply a symbol-table strip to the executable and shared libs (not recommended for Windows)",
)
g.add_argument(
"--noupx",
action="store_true",
default=False,
help="Do not use UPX even if it is available (works differently between Windows and *nix)",
)
g.add_argument(
"--upx-exclude",
dest="upx_exclude",
metavar="FILE",
action="append",
help="Prevent a binary from being compressed when using upx. This is typically used if upx corrupts certain "
"binaries during compression. FILE is the filename of the binary without path. This option can be used "
"multiple times.",
)
g = parser.add_argument_group('Windows and Mac OS X specific options')
g.add_argument(
"-c",
"--console",
"--nowindowed",
dest="console",
action="store_true",
default=None,
help="Open a console window for standard i/o (default). On Windows this option has no effect if the first "
"script is a '.pyw' file.",
)
g.add_argument(
"-w",
"--windowed",
"--noconsole",
dest="console",
action="store_false",
default=None,
help="Windows and Mac OS X: do not provide a console window for standard i/o. On Mac OS this also triggers "
"building a Mac OS .app bundle. On Windows this option is automatically set if the first script is a '.pyw' "
"file. This option is ignored on *NIX systems.",
)
g.add_argument(
"-i",
"--icon",
dest="icon_file",
metavar='<FILE.ico or FILE.exe,ID or FILE.icns or Image or "NONE">',
help="FILE.ico: apply the icon to a Windows executable. FILE.exe,ID: extract the icon with ID from an exe. "
"FILE.icns: apply the icon to the .app bundle on Mac OS. If an image file is entered that isn't in the "
"platform format (ico on Windows, icns on Mac), PyInstaller tries to use Pillow to translate the icon into "
"the correct format (if Pillow is installed). Use \"NONE\" to not apply any icon, thereby making the OS show "
"some default (default: apply PyInstaller's icon)",
)
g.add_argument(
"--disable-windowed-traceback",
dest="disable_windowed_traceback",
action="store_true",
default=False,
help="Disable traceback dump of unhandled exception in windowed (noconsole) mode (Windows and macOS only), "
"and instead display a message that this feature is disabled.",
)
g = parser.add_argument_group('Windows specific options')
g.add_argument(
"--version-file",
dest="version_file",
metavar="FILE",
help="Add a version resource from FILE to the exe.",
)
g.add_argument(
"-m",
"--manifest",
metavar="<FILE or XML>",
help="Add manifest FILE or XML to the exe.",
)
g.add_argument(
"--no-embed-manifest",
dest="embed_manifest",
action="store_false",
help="Generate an external .exe.manifest file instead of embedding the manifest into the exe. Applicable only "
"to onedir mode; in onefile mode, the manifest is always embedded, regardless of this option.",
)
g.add_argument(
"-r",
"--resource",
dest="resources",
metavar="RESOURCE",
action="append",
default=[],
help="Add or update a resource to a Windows executable. The RESOURCE is one to four items, "
"FILE[,TYPE[,NAME[,LANGUAGE]]]. FILE can be a data file or an exe/dll. For data files, at least TYPE and NAME "
"must be specified. LANGUAGE defaults to 0 or may be specified as wildcard * to update all resources of the "
"given TYPE and NAME. For exe/dll files, all resources from FILE will be added/updated to the final executable "
"if TYPE, NAME and LANGUAGE are omitted or specified as wildcard *. This option can be used multiple times.",
)
g.add_argument(
'--uac-admin',
dest='uac_admin',
action="store_true",
default=False,
help="Using this option creates a Manifest that will request elevation upon application start.",
)
g.add_argument(
'--uac-uiaccess',
dest='uac_uiaccess',
action="store_true",
default=False,
help="Using this option allows an elevated application to work with Remote Desktop.",
)
g = parser.add_argument_group('Windows Side-by-side Assembly searching options (advanced)')
g.add_argument(
"--win-private-assemblies",
dest="win_private_assemblies",
action="store_true",
help="Any Shared Assemblies bundled into the application will be changed into Private Assemblies. This means "
"the exact versions of these assemblies will always be used, and any newer versions installed on user machines "
"at the system level will be ignored.",
)
g.add_argument(
"--win-no-prefer-redirects",
dest="win_no_prefer_redirects",
action="store_true",
help="While searching for Shared or Private Assemblies to bundle into the application, PyInstaller will "
"prefer not to follow policies that redirect to newer versions, and will try to bundle the exact versions of "
"the assembly.",
)
g = parser.add_argument_group('Mac OS specific options')
g.add_argument(
"--argv-emulation",
dest="argv_emulation",
action="store_true",
default=False,
help="Enable argv emulation for macOS app bundles. If enabled, the intial open document/URL event is processed "
"by the bootloader and the passed file paths or URLs are appended to sys.argv.",
)
g.add_argument(
'--osx-bundle-identifier',
dest='bundle_identifier',
help="Mac OS .app bundle identifier is used as the default unique program name for code signing purposes. "
"The usual form is a hierarchical name in reverse DNS notation. For example: com.mycompany.department.appname "
"(default: first script's basename)",
)
g.add_argument(
'--target-architecture',
'--target-arch',
dest='target_arch',
metavar='ARCH',
default=None,
help="Target architecture (macOS only; valid values: x86_64, arm64, universal2). Enables switching between "
"universal2 and single-arch version of frozen application (provided python installation supports the target "
"architecture). If not target architecture is not specified, the current running architecture is targeted.",
)
g.add_argument(
'--codesign-identity',
dest='codesign_identity',
metavar='IDENTITY',
default=None,
help="Code signing identity (macOS only). Use the provided identity to sign collected binaries and generated "
"executable. If signing identity is not provided, ad-hoc signing is performed instead.",
)
g.add_argument(
'--osx-entitlements-file',
dest='entitlements_file',
metavar='FILENAME',
default=None,
help="Entitlements file to use when code-signing the collected binaries (macOS only).",
)
g = parser.add_argument_group('Rarely used special options')
g.add_argument(
"--runtime-tmpdir",
dest="runtime_tmpdir",
metavar="PATH",
help="Where to extract libraries and support files in `onefile`-mode. If this option is given, the bootloader "
"will ignore any temp-folder location defined by the run-time OS. The ``_MEIxxxxxx``-folder will be created "
"here. Please use this option only if you know what you are doing.",
)
g.add_argument(
"--bootloader-ignore-signals",
action="store_true",
default=False,
help="Tell the bootloader to ignore signals rather than forwarding them to the child process. Useful in "
"situations where for example a supervisor process signals both the bootloader and the child (e.g., via a "
"process group) to avoid signalling the child twice.",
)
def main(
scripts,
name=None,
onefile=False,
console=True,
debug=[],
python_options=[],
strip=False,
noupx=False,
upx_exclude=None,
runtime_tmpdir=None,
pathex=[],
version_file=None,
specpath=None,
bootloader_ignore_signals=False,
disable_windowed_traceback=False,
datas=[],
binaries=[],
icon_file=None,
manifest=None,
embed_manifest=True,
resources=[],
bundle_identifier=None,
hiddenimports=[],
hookspath=[],
key=None,
runtime_hooks=[],
excludes=[],
uac_admin=False,
uac_uiaccess=False,
win_no_prefer_redirects=False,
win_private_assemblies=False,
collect_submodules=[],
collect_binaries=[],
collect_data=[],
collect_all=[],
copy_metadata=[],
splash=None,
recursive_copy_metadata=[],
target_arch=None,
codesign_identity=None,
entitlements_file=None,
argv_emulation=False,
**_kwargs
):
# Default values for onefile and console when not explicitly specified on command-line (indicated by None)
if onefile is None:
onefile = False
if console is None:
console = True
# If appname is not specified - use the basename of the main script as name.
if name is None:
name = os.path.splitext(os.path.basename(scripts[0]))[0]
# If specpath not specified - use default value - current working directory.
if specpath is None:
specpath = DEFAULT_SPECPATH
else:
# Expand tilde to user's home directory.
specpath = expand_path(specpath)
# If cwd is the root directory of PyInstaller, generate the .spec file in ./appname/ subdirectory.
if specpath == HOMEPATH:
specpath = os.path.join(HOMEPATH, name)
# Create directory tree if missing.
if not os.path.exists(specpath):
os.makedirs(specpath)
# Handle additional EXE options.
exe_options = ''
if version_file:
exe_options += "\n version='%s'," % escape_win_filepath(version_file)
if uac_admin:
exe_options += "\n uac_admin=True,"
if uac_uiaccess:
exe_options += "\n uac_uiaccess=True,"
if icon_file:
# Icon file for Windows.
# On Windows, the default icon is embedded in the bootloader executable.
exe_options += "\n icon='%s'," % escape_win_filepath(icon_file)
# Icon file for Mac OS.
# We need to encapsulate it into apostrofes.
icon_file = "'%s'" % icon_file
else:
# On Mac OS, the default icon has to be copied into the .app bundle.
# The the text value 'None' means - use default icon.
icon_file = 'None'
if bundle_identifier:
# We need to encapsulate it into apostrofes.
bundle_identifier = "'%s'" % bundle_identifier
if manifest:
if "<" in manifest:
# Assume XML string
exe_options += "\n manifest='%s'," % manifest.replace("'", "\\'")
else:
# Assume filename
exe_options += "\n manifest='%s'," % escape_win_filepath(manifest)
if not embed_manifest:
exe_options += "\n embed_manifest=False,"
if resources:
resources = list(map(escape_win_filepath, resources))
exe_options += "\n resources=%s," % repr(resources)
hiddenimports = hiddenimports or []
upx_exclude = upx_exclude or []
# If file extension of the first script is '.pyw', force --windowed option.
if is_win and os.path.splitext(scripts[0])[-1] == '.pyw':
console = False
# If script paths are relative, make them relative to the directory containing .spec file.
scripts = [make_path_spec_relative(x, specpath) for x in scripts]
# With absolute paths replace prefix with variable HOMEPATH.
scripts = list(map(Path, scripts))
if key:
# Try to import tinyaes as we need it for bytecode obfuscation.
try:
import tinyaes # noqa: F401 (test import)
except ImportError:
logger.error(
'We need tinyaes to use byte-code obfuscation but we could not find it. You can install it '
'with pip by running:\n pip install tinyaes'
)
sys.exit(1)
cipher_init = cipher_init_template % {'key': key}
else:
cipher_init = cipher_absent_template
# Translate the default of ``debug=None`` to an empty list.
if debug is None:
debug = []
# Translate the ``all`` option.
if DEBUG_ALL_CHOICE[0] in debug:
debug = DEBUG_ARGUMENT_CHOICES
# Create preamble (for collect_*() calls)
preamble = Preamble(
datas, binaries, hiddenimports, collect_data, collect_binaries, collect_submodules, collect_all, copy_metadata,
recursive_copy_metadata
)
if splash:
splash_init = splashtmpl % {'splash_image': splash}
splash_binaries = "\n splash.binaries,"
splash_target = "\n splash,"
else:
splash_init = splash_binaries = splash_target = ""
# Create OPTIONs array
if 'imports' in debug and 'v' not in python_options:
python_options.append('v')
python_options_array = [(opt, None, 'OPTION') for opt in python_options]
d = {
'scripts': scripts,
'pathex': pathex or [],
'binaries': preamble.binaries,
'datas': preamble.datas,
'hiddenimports': preamble.hiddenimports,
'preamble': preamble.content,
'name': name,
'noarchive': 'noarchive' in debug,
'options': python_options_array,
'debug_bootloader': 'bootloader' in debug,
'bootloader_ignore_signals': bootloader_ignore_signals,
'strip': strip,
'upx': not noupx,
'upx_exclude': upx_exclude,
'runtime_tmpdir': runtime_tmpdir,
'exe_options': exe_options,
'cipher_init': cipher_init,
# Directory with additional custom import hooks.
'hookspath': hookspath,
# List with custom runtime hook files.
'runtime_hooks': runtime_hooks or [],
# List of modules/pakages to ignore.
'excludes': excludes or [],
# only Windows and Mac OS distinguish windowed and console apps
'console': console,
'disable_windowed_traceback': disable_windowed_traceback,
# Icon filename. Only Mac OS uses this item.
'icon': icon_file,
# .app bundle identifier. Only OSX uses this item.
'bundle_identifier': bundle_identifier,
# argv emulation (macOS only)
'argv_emulation': argv_emulation,
# Target architecture (macOS only)
'target_arch': target_arch,
# Code signing identity (macOS only)
'codesign_identity': codesign_identity,
# Entitlements file (macOS only)
'entitlements_file': entitlements_file,
# Windows assembly searching options
'win_no_prefer_redirects': win_no_prefer_redirects,
'win_private_assemblies': win_private_assemblies,
# splash screen
'splash_init': splash_init,
'splash_target': splash_target,
'splash_binaries': splash_binaries,
}
# Write down .spec file to filesystem.
specfnm = os.path.join(specpath, name + '.spec')
with open(specfnm, 'w', encoding='utf-8') as specfile:
if onefile:
specfile.write(onefiletmplt % d)
# For Mac OS create .app bundle.
if is_darwin and not console:
specfile.write(bundleexetmplt % d)
else:
specfile.write(onedirtmplt % d)
# For Mac OS create .app bundle.
if is_darwin and not console:
specfile.write(bundletmplt % d)
return specfnm
|
[
"hongtianlong@corp.netease.com"
] |
hongtianlong@corp.netease.com
|
22636f1842754ee1a53fdf953af58979f814b77a
|
8e520c67f67b4989395d61bf52682a57fdc86ae6
|
/Jackknife.py
|
3b80cefc9c7a7316c0e6f8a0caa686a85b97ddab
|
[] |
no_license
|
awilson0/PhyloTools
|
783de03586c256bee3c49dbba5a72c787951ba00
|
982d58be8bd4401d02d84be6e94d320159ec6a3d
|
refs/heads/master
| 2020-08-02T15:42:24.319298
| 2019-09-19T17:15:25
| 2019-09-19T17:15:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,646
|
py
|
#Ryan A. Melnyk
#schmelnyk@gmail.com
#UBC Microbiology - Haney Lab
import os, argparse, random
from Bio import SeqIO
def parse_args():
parser = argparse.ArgumentParser(description='''
Script that removes redundancy and/or generates jackknife resampling (i.e. without replacement).
''')
parser.add_argument('align_file',type=str,help='path to the alignment file for sampling')
parser.add_argument('prefix',type=str,help='prefix for jackknife files')
parser.add_argument('--size',type=int,help='length of jackknife (default 10000)')
parser.add_argument('--num',type=int,help='number of jackknifes (default 1)')
parser.add_argument('--remove_redundant',action='store_true',help='use if you wish to remove sites that are non-informative (i.e. the same in all sequences)')
parser.add_argument('--remove_gapped',type=float,help='use if you wish to remove gapped sites - enter max proportion sites that can be gapped.')
return parser.parse_args()
def parse(align_file):
seqdata = {}
for seq in SeqIO.parse(open(align_file,'r'),'fasta'):
seqdata[str(seq.id)] = str(seq.seq)
return seqdata
def remove_redundant(seqdata):
first = seqdata.keys()[0]
newdata = {s : [] for s in seqdata.keys()}
length = len(seqdata[first])
print(length, "residues to scan...")
count = 0
uniqcount = 0
for i in range(0,length):
res = seqdata[first][i]
matching = True
for s in seqdata:
if seqdata[s][i] != res:
matching = False
if not matching:
for s in seqdata:
newdata[s].append(seqdata[s][i])
uniqcount += 1
count += 1
if count % 10000 == 0:
print("{}0K residues parsed...".format(str(count/10000)))
print("Done!")
print(uniqcount, "informative residues out of", length, "total positions.")
return {s : "".join(newdata[s]) for s in newdata}
def select_sites(prefix,seqdata,size,num):
first = list(seqdata.keys())[0]
sites = list(range(0,len(seqdata[first])))
print("Beginning jackknife replicates of size {}...".format(str(size)))
for i in range(0,num):
print("jackknife replicate {} of {}...".format(str(i+1),str(num)))
jackknife = {s : [] for s in seqdata}
selected = random.sample(sites,size)
[sites.remove(s) for s in selected]
for s in selected:
for seq in seqdata:
jackknife[seq].append(seqdata[seq][s])
o = open(os.path.join(prefix+"_{}.faa".format(str(i+1))),'w')
for j in jackknife:
o.write(">{}\n{}\n".format(j,"".join(jackknife[j])))
o.close()
return
def remove_gapped(seqdata,t):
first = list(seqdata.keys())[0]
newdata = {s : [] for s in seqdata.keys()}
length = len(seqdata[first])
print(length, "residues to scan...")
count = 0
gap_totalcount = 0
for i in range(0,length):
gap_rescount = 0
for s in seqdata:
if seqdata[s][i] == "-":
gap_rescount += 1
prop = float(gap_rescount)/float(len(seqdata.keys()))
if prop < t:
for s in seqdata:
newdata[s].append(seqdata[s][i])
gap_totalcount += 1
count += 1
if count % 100000 == 0:
print("{}00K residues parsed...".format(str(count/100000)))
print("Done!")
print(gap_totalcount, "informative residues out of", length, "total positions.")
return {s : "".join(newdata[s]) for s in newdata}
def main():
args = parse_args()
align_file = os.path.abspath(args.align_file)
prefix = os.path.abspath(args.prefix)
seqdata = parse(align_file)
if args.remove_gapped:
t = args.remove_gapped
seqdata = remove_gapped(seqdata,t)
if args.remove_redundant:
seqdata = remove_redundant(seqdata)
if args.size:
size = args.size
else:
size = 10000
if args.num:
num = args.num
else:
num = 1
select_sites(prefix,seqdata,size,num)
if __name__ == '__main__':
main()
|
[
"schmelnyk@gmail.com"
] |
schmelnyk@gmail.com
|
275a0b0254293a2c873371f26e6cb3f2c7826f43
|
9f2445e9a00cc34eebcf3d3f60124d0388dcb613
|
/2019-08-10-Izhekevich_network/izhikevich_simple_nrn/simple.py
|
40f49a85c23cb50fb313f6913fb2ab206b18b938
|
[] |
no_license
|
analkumar2/Thesis-work
|
7ee916d71f04a60afbd117325df588908518b7d2
|
75905427c2a78a101b4eed2c27a955867c04465c
|
refs/heads/master
| 2022-01-02T02:33:35.864896
| 2021-12-18T03:34:04
| 2021-12-18T03:34:04
| 201,130,673
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,587
|
py
|
'''Usage:
import simple
h.run()
simple.show()
Sets up 5 models using default parameters in the .mod files
2 versions of 2003/2004 parameterization: freestanding (3a); in section (3b)
4 versions of 2007/2008 parameterization: freestanding (7a); in section with local integration of 'u' (7b);
in sec with STATE 'u' 7bS; in sec using wrapper class (7bw)
can graph u, v for any model
simple.show('v3a','v3b') # compare voltage output for the 2 versions of the 2003/2004 parameterization; will NOT be identical
simple.show('v7a','v7b','v7bw') # compare voltage output for 3 versions of the 2007 parameterization
'''
from neuron import h, gui
import numpy as np
import izhi2007Wrapper as izh07
import pylab as plt
import pprint as pp
plt.ion()
# fih = []
dummy=h.Section()
# make a 2003a STATE {u,vv} cell (used for 2003, 2004)
iz03a = h.Izhi2003a(0.5,sec=dummy)
iz03a.Iin = 4
# make a 2003b (Section v) cell
sec03b = h.Section() # this section will actually be used
sec03b.L, sec03b.diam = 10, 10 # empirically tuned
iz03b = h.Izhi2003b(0.5,sec=sec03b)
iz03b.Iin = 4
def iz03b_init (): sec03b(0.5).v, iz03b.u = -65, -65*iz03b.b
# fih.append(h.FInitializeHandler(iz03b_init))
# make a 2007a (NMODL) cell
iz07a = h.Izhi2007a(0.5,sec=dummy)
iz07a.Iin = 70
# make a 2007b (section) cell
sec07b = h.Section()
sec07b.L, sec07b.diam, sec07b.cm = 10, 10, 31.831
iz07b = h.Izhi2007b(0.5,sec=sec07b)
iz07b.Iin = 70
def iz07b_init(): sec07b.v=-60
# fih.append(h.FInitializeHandler(iz07b_init))
# make a 2007b (section) cell using the Wrapper
iz07bw = izh07.IzhiCell() # defaults to RS
iz07bw.izh.Iin = 70
# fih.append(h.FInitializeHandler(iz07bw.init))
# vectors and plot
h.tstop=1250
#recd = {'u3a':[iz03a._ref_u], 'v3a':[iz03a._ref_V], 'u3b':[iz03b._ref_u], 'v3b':[sec03b(0.5)._ref_v],
recd={ 'u7a':[iz07a._ref_u], 'v7a':[iz07a._ref_V], 'u7b':[iz07b._ref_u], 'v7b':[sec07b(0.5)._ref_v],
'u7bw':[iz07bw.izh._ref_u], 'v7bw':[iz07bw.sec(0.5)._ref_v]}
[(v.append(h.Vector(h.tstop/h.dt+100)),v[1].record(v[0])) for x,v in recd.items()]
def vtvec(vv): return np.linspace(0, len(vv)*h.dt, len(vv), endpoint=True)
# run and plot
fig = None
def show (*vars):
pp.pprint(recd.keys())
global fig,tvec
if fig is None: fig = plt.figure(figsize=(10,6), tight_layout=True)
if len(vars)==0: vars=recd.keys()
tvec=vtvec(recd['v7a'][1])
plt.clf()
[plt.plot(tvec,v[1], label=x) for x,v in recd.items() if x in vars]
plt.legend()
pp.pprint([v[1].as_numpy()[-5:] for x,v in recd.items() if x in vars])
plt.xlim(0,h.tstop)
# h.run()
# show()
|
[
"analkumar2@gmail.com"
] |
analkumar2@gmail.com
|
7ad9e8f61007ca40b7e2ed29febbc24fd453ed71
|
473568bf080e3637ee118b374f77e9f561286c6c
|
/SudoPlacementCourse/CountTotalSetBits.py
|
792ac8808411330c1dcb4f524d5347aec611ec82
|
[] |
no_license
|
VineetPrasadVerma/GeeksForGeeks
|
c2f7fc94b0a07ba146025ca8a786581dbf7154c8
|
fdb4e4a7e742c4d67015977e3fbd5d35b213534f
|
refs/heads/master
| 2020-06-02T11:23:11.421399
| 2020-01-07T16:51:18
| 2020-01-07T16:51:18
| 191,138,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
test_cases = int(input())
for _ in range(test_cases):
num = int(input())
count = 0
for j in range(1, num+1):
binary_from = list('{0:0b}'.format(j))
for i in range(len(binary_from)-1, -1, -1):
if binary_from[i] == '1':
count += 1
print(count)
|
[
"vineetpd1996@gmail.com"
] |
vineetpd1996@gmail.com
|
413122105cb86bc99aeaeb7a8248848f9e2afc0b
|
f9f074c44b67a11d4630b5e1cc15e016e8d73cc8
|
/factory-ai-vision/EdgeSolution/modules/ModelManagerModule/app/downloader/tools/accuracy_checker/accuracy_checker/evaluators/custom_evaluators/sr_evaluator.py
|
f1702d6f7f957df2949e04b434b06e0c1a3553a8
|
[
"MIT"
] |
permissive
|
Azure-Samples/azure-intelligent-edge-patterns
|
361694680c7e48d3761c5416175788355b684dcd
|
1d2f42cbf9f21157c1e1abf044b26160dfed5b16
|
refs/heads/master
| 2023-05-26T13:15:47.085088
| 2023-02-28T17:25:53
| 2023-02-28T17:25:53
| 186,706,933
| 193
| 164
|
MIT
| 2023-02-28T17:25:55
| 2019-05-14T22:02:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 24,249
|
py
|
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import pickle
from functools import partial
from collections import OrderedDict
import numpy as np
from ..base_evaluator import BaseEvaluator
from ..quantization_model_evaluator import create_dataset_attributes
from ...adapters import create_adapter
from ...config import ConfigError
from ...launcher import create_launcher
from ...utils import contains_all, contains_any, extract_image_representations, get_path
from ...progress_reporters import ProgressReporter
from ...logging import print_info
def generate_name(prefix, with_prefix, layer_name):
return prefix + layer_name if with_prefix else layer_name.split(prefix)[-1]
class SuperResolutionFeedbackEvaluator(BaseEvaluator):
def __init__(self, dataset_config, launcher, model):
self.dataset_config = dataset_config
self.preprocessing_executor = None
self.preprocessor = None
self.dataset = None
self.postprocessor = None
self.metric_executor = None
self.launcher = launcher
self.srmodel = model
self._metrics_results = []
@classmethod
def from_configs(cls, config, delayed_model_loading=False):
dataset_config = config['datasets']
launcher_config = config['launchers'][0]
if launcher_config['framework'] == 'dlsdk' and 'device' not in launcher_config:
launcher_config['device'] = 'CPU'
launcher = create_launcher(launcher_config, delayed_model_loading=True)
model = SRFModel(
config.get('network_info', {}), launcher, config.get('_models', []), config.get('_model_is_blob'),
delayed_model_loading
)
return cls(dataset_config, launcher, model)
def process_dataset(
self, subset=None,
num_images=None,
check_progress=False,
dataset_tag='',
output_callback=None,
allow_pairwise_subset=False,
dump_prediction_to_annotation=False,
calculate_metrics=True,
**kwargs):
if self.dataset is None or (dataset_tag and self.dataset.tag != dataset_tag):
self.select_dataset(dataset_tag)
self._annotations, self._predictions = [], []
self._create_subset(subset, num_images, allow_pairwise_subset)
metric_config = self.configure_intermediate_metrics_results(kwargs)
compute_intermediate_metric_res, metric_interval, ignore_results_formatting = metric_config
if 'progress_reporter' in kwargs:
_progress_reporter = kwargs['progress_reporter']
_progress_reporter.reset(self.dataset.size)
else:
_progress_reporter = None if not check_progress else self._create_progress_reporter(
check_progress, self.dataset.size
)
self.srmodel.init_feedback(self.dataset.data_reader)
for batch_id, (batch_input_ids, batch_annotation, batch_inputs, batch_identifiers) in enumerate(self.dataset):
self.srmodel.fill_feedback(batch_inputs)
batch_inputs = self.preprocessor.process(batch_inputs, batch_annotation)
batch_inputs_extr, _ = extract_image_representations(batch_inputs)
callback = None
if callback:
callback = partial(output_callback,
metrics_result=None,
element_identifiers=batch_identifiers,
dataset_indices=batch_input_ids)
batch_raw_prediction, batch_prediction = self.srmodel.predict(
batch_identifiers, batch_inputs_extr, callback=callback
)
annotation, prediction = self.postprocessor.process_batch(batch_annotation, batch_prediction)
self.srmodel.feedback(prediction)
metrics_result = None
if self.metric_executor and calculate_metrics:
metrics_result, _ = self.metric_executor.update_metrics_on_batch(
batch_input_ids, annotation, prediction
)
if self.metric_executor.need_store_predictions:
self._annotations.extend(annotation)
self._predictions.extend(prediction)
if output_callback:
output_callback(
batch_raw_prediction[0],
metrics_result=metrics_result,
element_identifiers=batch_identifiers,
dataset_indices=batch_input_ids
)
if _progress_reporter:
_progress_reporter.update(batch_id, len(prediction))
if compute_intermediate_metric_res and _progress_reporter.current % metric_interval == 0:
self.compute_metrics(
print_results=True, ignore_results_formatting=ignore_results_formatting
)
if _progress_reporter:
_progress_reporter.finish()
if self.srmodel.store_predictions:
self.srmodel.save_predictions()
def compute_metrics(self, print_results=True, ignore_results_formatting=False):
if self._metrics_results:
del self._metrics_results
self._metrics_results = []
for result_presenter, evaluated_metric in self.metric_executor.iterate_metrics(
self._annotations, self._predictions):
self._metrics_results.append(evaluated_metric)
if print_results:
result_presenter.write_result(evaluated_metric, ignore_results_formatting)
return self._metrics_results
def extract_metrics_results(self, print_results=True, ignore_results_formatting=False):
if not self._metrics_results:
self.compute_metrics(False, ignore_results_formatting)
result_presenters = self.metric_executor.get_metric_presenters()
extracted_results, extracted_meta = [], []
for presenter, metric_result in zip(result_presenters, self._metrics_results):
result, metadata = presenter.extract_result(metric_result)
if isinstance(result, list):
extracted_results.extend(result)
extracted_meta.extend(metadata)
else:
extracted_results.append(result)
extracted_meta.append(metadata)
if print_results:
presenter.write_result(metric_result, ignore_results_formatting)
return extracted_results, extracted_meta
def print_metrics_results(self, ignore_results_formatting=False):
if not self._metrics_results:
self.compute_metrics(True, ignore_results_formatting)
return
result_presenters = self.metric_executor.get_metric_presenters()
for presenter, metric_result in zip(result_presenters, self._metrics_results):
presenter.write_result(metric_result, ignore_results_formatting)
@property
def dataset_size(self):
return self.dataset.size
def release(self):
self.srmodel.release()
self.launcher.release()
def reset(self):
if self.metric_executor:
self.metric_executor.reset()
if hasattr(self, '_annotations'):
del self._annotations
del self._predictions
del self._input_ids
del self._metrics_results
self._annotations = []
self._predictions = []
self._input_ids = []
self._metrics_results = []
if self.dataset:
self.dataset.reset(self.postprocessor.has_processors)
@staticmethod
def get_processing_info(config):
module_specific_params = config.get('module_config')
model_name = config['name']
dataset_config = module_specific_params['datasets'][0]
launcher_config = module_specific_params['launchers'][0]
return (
model_name, launcher_config['framework'], launcher_config['device'], launcher_config.get('tags'),
dataset_config['name']
)
def _create_subset(self, subset=None, num_images=None, allow_pairwise=False):
if self.dataset.batch is None:
self.dataset.batch = 1
if subset is not None:
self.dataset.make_subset(ids=subset, accept_pairs=allow_pairwise)
elif num_images is not None:
self.dataset.make_subset(end=num_images, accept_pairs=allow_pairwise)
@staticmethod
def configure_intermediate_metrics_results(config):
compute_intermediate_metric_res = config.get('intermediate_metrics_results', False)
metric_interval, ignore_results_formatting = None, None
if compute_intermediate_metric_res:
metric_interval = config.get('metrics_interval', 1000)
ignore_results_formatting = config.get('ignore_results_formatting', False)
return compute_intermediate_metric_res, metric_interval, ignore_results_formatting
def load_network(self, network=None):
self.srmodel.load_network(network, self.launcher)
def load_network_from_ir(self, models_list):
self.srmodel.load_model(models_list, self.launcher)
def get_network(self):
return self.srmodel.get_network()
def get_metrics_attributes(self):
if not self.metric_executor:
return {}
return self.metric_executor.get_metrics_attributes()
def register_metric(self, metric_config):
if isinstance(metric_config, str):
self.metric_executor.register_metric({'type': metric_config})
elif isinstance(metric_config, dict):
self.metric_executor.register_metric(metric_config)
else:
raise ValueError('Unsupported metric configuration type {}'.format(type(metric_config)))
def register_postprocessor(self, postprocessing_config):
pass
def register_dumped_annotations(self):
pass
def select_dataset(self, dataset_tag):
if self.dataset is not None and isinstance(self.dataset_config, list):
return
dataset_attributes = create_dataset_attributes(self.dataset_config, dataset_tag)
self.dataset, self.metric_executor, self.preprocessor, self.postprocessor = dataset_attributes
@staticmethod
def _create_progress_reporter(check_progress, dataset_size):
pr_kwargs = {}
if isinstance(check_progress, int) and not isinstance(check_progress, bool):
pr_kwargs = {"print_interval": check_progress}
return ProgressReporter.provide('print', dataset_size, **pr_kwargs)
class BaseModel:
def __init__(self, network_info, launcher, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
def predict(self, identifiers, input_data):
raise NotImplementedError
def release(self):
pass
# pylint: disable=E0203
class BaseDLSDKModel:
def print_input_output_info(self):
print_info('{} - Input info:'.format(self.default_model_suffix))
has_info = hasattr(self.network if self.network is not None else self.exec_network, 'input_info')
if self.network:
if has_info:
network_inputs = OrderedDict(
[(name, data.input_data) for name, data in self.network.input_info.items()]
)
else:
network_inputs = self.network.inputs
network_outputs = self.network.outputs
else:
if has_info:
network_inputs = OrderedDict([
(name, data.input_data) for name, data in self.exec_network.input_info.items()
])
else:
network_inputs = self.exec_network.inputs
network_outputs = self.exec_network.outputs
for name, input_info in network_inputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(input_info.precision))
print_info('\tshape {}\n'.format(input_info.shape))
print_info('{} - Output info'.format(self.default_model_suffix))
for name, output_info in network_outputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(output_info.precision))
print_info('\tshape: {}\n'.format(output_info.shape))
def automatic_model_search(self, network_info):
model = Path(network_info.get('srmodel', network_info.get('model')))
if model.is_dir():
is_blob = network_info.get('_model_is_blob')
if is_blob:
model_list = list(model.glob('*{}.blob'.format(self.default_model_suffix)))
if not model_list:
model_list = list(model.glob('*.blob'))
else:
model_list = list(model.glob('*{}.xml'.format(self.default_model_suffix)))
blob_list = list(model.glob('*{}.blob'.format(self.default_model_suffix)))
if not model_list and not blob_list:
model_list = list(model.glob('*.xml'))
blob_list = list(model.glob('*.blob'))
if not model_list:
model_list = blob_list
if not model_list:
raise ConfigError('Suitable model for {} not found'.format(self.default_model_suffix))
if len(model_list) > 1:
raise ConfigError('Several suitable models for {} found'.format(self.default_model_suffix))
model = model_list[0]
accepted_suffixes = ['.blob', '.xml']
if model.suffix not in accepted_suffixes:
raise ConfigError('Models with following suffixes are allowed: {}'.format(accepted_suffixes))
print_info('{} - Found model: {}'.format(self.default_model_suffix, model))
if model.suffix == '.blob':
return model, None
weights = get_path(network_info.get('weights', model.parent / model.name.replace('xml', 'bin')))
accepted_weights_suffixes = ['.bin']
if weights.suffix not in accepted_weights_suffixes:
raise ConfigError('Weights with following suffixes are allowed: {}'.format(accepted_weights_suffixes))
print_info('{} - Found weights: {}'.format(self.default_model_suffix, weights))
return model, weights
def load_network(self, network, launcher):
self.network = network
self.exec_network = launcher.ie_core.load_network(network, launcher.device)
def update_inputs_outputs_info(self):
raise NotImplementedError
def load_model(self, network_info, launcher, log=False):
model, weights = self.automatic_model_search(network_info)
if weights is not None:
self.network = launcher.read_network(str(model), str(weights))
self.exec_network = launcher.ie_core.load_network(self.network, launcher.device)
else:
self.exec_network = launcher.ie_core.import_network(str(model))
self.update_inputs_outputs_info()
if log:
self.print_input_output_info()
def create_model(model_config, launcher, delayed_model_loading=False):
launcher_model_mapping = {
'dlsdk': ModelDLSDKModel,
'tf': ModelTFModel,
}
framework = launcher.config['framework']
if 'predictions' in model_config and not model_config.get('store_predictions', False):
framework = 'dummy'
model_class = launcher_model_mapping.get(framework)
if not model_class:
raise ValueError('model for framework {} is not supported'.format(framework))
return model_class(model_config, launcher, delayed_model_loading)
class SRFModel(BaseModel):
def __init__(self, network_info, launcher, models_args, is_blob, delayed_model_loading=False):
super().__init__(network_info, launcher)
if models_args and not delayed_model_loading:
model = network_info.get('srmodel', {})
if not contains_any(model, ['model', 'onnx_model']) and models_args:
model['srmodel'] = models_args[0]
model['_model_is_blob'] = is_blob
network_info.update({'sr_model': model})
if not contains_all(network_info, ['srmodel']) and not delayed_model_loading:
raise ConfigError('network_info should contain srmodel field')
self.srmodel = create_model(network_info['srmodel'], launcher, delayed_model_loading)
self.feedback = self.srmodel.feedback
self.init_feedback = self.srmodel.init_feedback
self.fill_feedback = self.srmodel.fill_feedback
self.store_predictions = network_info['srmodel'].get('store_predictions', False)
self._predictions = [] if self.store_predictions else None
self._part_by_name = {'srmodel': self.srmodel}
self._raw_outs = OrderedDict()
def predict(self, identifiers, input_data, callback=None):
predictions, raw_outputs = [], []
for data in input_data:
output, prediction = self.srmodel.predict(identifiers, data)
if self.store_predictions:
self._predictions.append(prediction)
raw_outputs.append(output)
predictions.append(prediction)
return raw_outputs, predictions
def reset(self):
self.processing_frames_buffer = []
if self._predictions is not None:
self._predictions = []
def release(self):
self.srmodel.release()
def save_predictions(self):
if self._predictions is not None:
prediction_file = Path(self.network_info['srmodel'].get('predictions', 'model_predictions.pickle'))
with prediction_file.open('wb') as file:
pickle.dump(self._predictions, file)
def load_network(self, network_list, launcher):
for network_dict in network_list:
self._part_by_name[network_dict['name']].load_network(
network_dict.get('srmodel', network_dict.get('model')), launcher)
self.update_inputs_outputs_info()
def load_model(self, network_list, launcher):
for network_dict in network_list:
self._part_by_name[network_dict.get('name', 'srmodel')].load_model(network_dict, launcher)
self.update_inputs_outputs_info()
def _add_raw_predictions(self, prediction):
for key, output in prediction.items():
if key not in self._raw_outs:
self._raw_outs[key] = []
self._raw_outs[key].append(output)
def get_network(self):
return [{'name': 'srmodel', 'model': self.srmodel.network}]
def update_inputs_outputs_info(self):
if hasattr(self.srmodel, 'update_inputs_outputs_info'):
self.srmodel.update_inputs_outputs_info()
class FeedbackMixin:
def configure_feedback(self):
self._idx_to_name = {}
self._name_to_idx = {}
self._feedback_name = self.network_info['feedback_input']
self._feedback_data = {self._feedback_name: None}
self._first_step = True
self._inputs = self.network_info['inputs']
self._feedback_inputs = {self._feedback_name: [t for t in self._inputs if t['name'] == self._feedback_name][0]}
for input_info in self._inputs:
idx = int(input_info['value'])
self._idx_to_name[idx] = input_info['name']
self._name_to_idx[input_info['name']] = idx
self._feedback_idx = self._name_to_idx[self._feedback_name]
def init_feedback(self, reader):
info = self._feedback_inputs[self._feedback_name]
self._feedback_data[self._feedback_name] = reader.read(info['initializer'])
def feedback(self, data):
data = data[0]
self._feedback_data[self._feedback_name] = data[0].value
def fill_feedback(self, data):
data[0].data[self._feedback_idx] = self._feedback_data[self._feedback_name]
return data
class ModelDLSDKModel(BaseModel, BaseDLSDKModel, FeedbackMixin):
default_model_suffix = 'srmodel'
def __init__(self, network_info, launcher, delayed_model_loading=False):
super().__init__(network_info, launcher)
self.input_blob, self.output_blob = None, None
self.with_prefix = None
if not delayed_model_loading:
self.load_model(network_info, launcher, log=True)
self.adapter = create_adapter(network_info.get('adapter', 'super_resolution'))
self.configure_feedback()
def predict(self, identifiers, input_data):
input_data = self.fit_to_input(input_data)
raw_result = self.exec_network.infer(input_data)
result = self.adapter.process([raw_result], identifiers, [{}])
return raw_result, result
def release(self):
del self.exec_network
del self.launcher
def fit_to_input(self, input_data):
has_info = hasattr(self.exec_network, 'input_info')
if has_info:
input_info = self.exec_network.input_info
else:
input_info = self.exec_network.inputs
fitted = {}
for name, info in input_info.items():
data = input_data[self._name_to_idx[name]]
data = np.expand_dims(data, axis=0)
data = np.transpose(data, [0, 3, 1, 2])
assert tuple(info.input_data.shape) == np.shape(data)
fitted[name] = data
return fitted
def update_inputs_outputs_info(self):
has_info = hasattr(self.exec_network, 'input_info')
input_info = self.exec_network.input_info if has_info else self.exec_network.inputs
input_blob = next(iter(input_info))
with_prefix = input_blob.startswith(self.default_model_suffix + '_')
if (with_prefix != self.with_prefix) and with_prefix:
self.network_info['feedback_input'] = '_'.join([self.default_model_suffix,
self.network_info['feedback_input']])
for inp in self.network_info['inputs']:
inp['name'] = '_'.join([self.default_model_suffix, inp['name']])
if 'blob' in inp.keys():
inp['blob'] = '_'.join([self.default_model_suffix, inp['blob']])
self.network_info['adapter']['target_out'] = '_'.join([self.default_model_suffix,
self.network_info['adapter']['target_out']])
self.with_prefix = with_prefix
class ModelTFModel(BaseModel, FeedbackMixin):
default_model_suffix = 'srmodel'
def __init__(self, network_info, launcher, *args, **kwargs):
super().__init__(network_info, launcher)
model = self.automatic_model_search(network_info)
self.inference_session = launcher.create_inference_session(str(model))
self.adapter = create_adapter(network_info.get('adapter', 'super_resolution'))
self.configure_feedback()
def predict(self, identifiers, input_data):
input_data = self.fit_to_input(input_data)
raw_result = self.inference_session.predict([input_data])
result = self.adapter.process(raw_result, identifiers, [{}])
return raw_result, result
def fit_to_input(self, input_data):
fitted = {}
for idx, data in enumerate(input_data):
name = self._idx_to_name[idx]
data = np.expand_dims(data, axis=0)
fitted[name] = data
return fitted
def release(self):
del self.inference_session
@staticmethod
def automatic_model_search(network_info):
model = Path(network_info['model'])
return model
|
[
"waitingkuo0527@gmail.com"
] |
waitingkuo0527@gmail.com
|
0766ad3e1de55e681c5f1291cfd66701d939cc30
|
6597141b3ac01f083ced3dc2b476a63a4e055c20
|
/inputs.py
|
1089f09e0785b2401b43e07979df7f4c78e9708c
|
[] |
no_license
|
kittychi/adventofcode2015
|
02314dc0dc23e55dc55112343aeb50088c85b3c5
|
11b23bf0c71b392651887bd4e2ea093f4dddf5b2
|
refs/heads/master
| 2021-01-10T08:25:57.124676
| 2015-12-25T05:34:01
| 2015-12-25T05:34:01
| 47,864,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135,177
|
py
|
day1 = '()(((()))(()()()((((()(((())(()(()((((((()(()(((())))((()(((()))((())(()((()()()()(((())(((((((())))()()(()(()(())(((((()()()((())(((((()()))))()(())(((())(())((((((())())))(()())))()))))()())()())((()()((()()()()(()((((((((()()())((()()(((((()(((())((())(()))()((((()((((((((())()((()())(())((()))())((((()())(((((((((((()()(((((()(()))())(((()(()))())((()(()())())())(()(((())(())())()()(()(()((()))((()))))((((()(((()))))((((()(()(()())())()(((()((((())((((()(((()()(())()()()())((()((((((()((()()))()((()))()(()()((())))(((()(((()))((()((()(()))(((()()(()(()()()))))()()(((()(((())())))))((()(((())()(()(())((()())))((((())))(()(()(()())()((()())))(((()((()(())()()((()((())(()()((())(())()))()))((()(())()))())(((((((()(()()(()(())())))))))(()((((((())((((())((())())(()()))))()(())(()())()())((())(()))))(()))(()((()))()(()((((((()()()()((((((((()(()(())((()()(()()))(())()())()((())))()))()())(((()))(())()(())()))()((()((()(()()())(())()()()((())())))((()()(()()((()(())()()())(((()(()()))))(())))(()(()())()))()()))))))()))))((((((())))())))(()(())())(()())))))(()))()))))))()((()))))()))))(()(()((()())())(()()))))(((())()))())())())(((()(()()))(())()(())(())((((((()()))))((()(()))))))(()))())(((()()(()))()())()()()())))))))))))))(())(()))(()))((()(())(()())(())())(()())(())()()(()())))()()()))(())())()))())())(())((())))))))(())))(())))))()))))((())(()(((()))))(()))()((()(())))(()())(((((()))()())()()))))()))))()))())(()(()()()))()))))))((()))))))))))()((()))((()(())((())()()(()()))()(()))))()()(()))()))(((())))(())()((())(())(()())()())())))))))())))()((())))()))(()))()()))(((((((()))())(()()))(()()(()))()(()((()())()))))))(((()()()())))(())()))()())(()()))()()))))))))(())))()))()()))))))()))()())))()(())(())))))()(())()()(()()))))())((()))))()))))(()(((((()))))))))())))())()(())()()))))(())))())()()())()()())()(()))))()))()))))))))())))((()))()))()))())))()())()()())))())))(()((())()((()))())))))())()(())((())))))))))))())()())(())())())(()))(()))()))())(()(())())()())()()(()))))(()(())))))))(())))())(())))))))())()()(())())())))(())))))()))()(()())()(()))())())))))()()(()))()))))())))))))))()))))()))))))())()())()()))))()())))())))))))))))()()))))()()(((()))()()(())()))))((()))))(()))(())())))(())()))))))(()))()))))(())())))))()))(()())))))))))))))())))))))))()((()())(()())))))))((()))))(())(())))()(()())())))())())(()()()())))()))))))())))))())()()())))))))))))()()(()))))()())()))((()())(()))))()(()))))))))))()())())(((())(()))))())()))()))()))))))()))))))(()))))()))))()(())))(())))(()))())()()(()()))()))(()()))))))))()))(()))())(()()(()(()())()()))()))))))))(())))))((()()(()))())())))))()))())(()())()()))())))()(()()()()))((())())))())()(()()))()))))))))(()))(())))()))))(()(()())(()))))()())())()))()()))())))))))))))())()))))))()))))))))())))))()))))())(()())))(())()))())())))))()()(()()())(()())))()()))(((()))(()()()))))()))))()))))((())))()((((((()()))))))())))))))))))(((()))))))))))))(())())))))())(()))))))(()))((()))())))()(()((()))()))()))))))))))())()))()(()()))))())))())(())()(()))()))())(()))()))))(()()))()()(())))))()))(())(()(()()))(()()())))))(((()))))))()))))))))))))(())(()))))()())())()()((()()))())))))(()))))())))))))()()()))))))))())))()(((()()))(())))))(((())())))))((()))()(()))(()))))(()())))(()))())))))()))))(())(())))()((()))(())())))()()))()))))))))()))(()()()(()()()(()))())(())()())(((()))(())))))))))(((()())))()()))))))))()(())(()))()((((())(())(()())))()))(((())()()()))((()))(()))())())))())))(()))())()())())(()(())())()()()(())))())(())))(())))(())()))()))(()((()))))))))())(()))))))())(()()))()()))()(()(()())))()()(()((()((((((()))(())))()()()))())()))((()()(()))())((()(()(()))(()()))))()())))()))()())))))))()()((()())(())))()))(()))(())(()))())(()(())))()()))))))(((()(((()()))()(()(())())((()()))()))()))()))()(()()()(()))((()())()(())))()()))(((())()()())(())()((()()()()(()(())(()()))()(((((()())))((())))))(()()()))))(((()(())))()))((()((()(())()(()((())))((()())()(()))(((()())()()(()))(())(((()((()())()((())()())(((()()))((()((())(()))(()())(()()()))((()))(())(()((()()())((()))(())))(())(())(())))(()())))(((((()(()(((((()())((((()(()())(())(()()(((())((()(((()()(((()()((((((())))())(()((((((()(()))()))()()((()((()))))()(()()(()((()()))))))(((((()(((((())()()()(())())))))))()))((()()(())))(())(()()()())))))(()((((())))))))()()(((()(()(()(()(()())()()()(((((((((()()())()(()))((()()()()()(((((((()())()((())()))((((((()(()(()(()())(((()(((((((()(((())(((((((((())(())())()))((()(()))(((()()())(())(()(()()(((()(())()))())))(())((((((())(()()())()()(((()(((())(()(((())(((((((()(((((((((()))(())(()(()(()))))((()))()(())())())((()(()((()()))((()()((()(())(())(()((())(((())(((()()()((((((()()(())((((())()))))(())((()(()((())))(((((()(()()())())((())())))((())((()((()()((((((())(((()()(()())())(()(()))(()(()))())())()(((((((()(((()(())()()((())((()(()()((()(()()(((((((((((())((())((((((())((()((((()(()((((()(((((((())()((()))))())()((()((((()(()(((()((()())))(())())(((()(((())((((((()(((((((((()()(())))(()(((((()((((()())))((()((()((()(()()(((())((((((((((((()(((())(()(((((()))(()()(()()()()()()((())(((((((())(((((())))))())()(()()(()(()(((()()(((((())(()((()((()(((()()((()((((())()))()((((())(())))()())(((())(())(()()((()(((()()((((((((((()()(()())())(((((((((())((((()))()()((((())(()((((()(((())())(((((((((((()((((())))(())(()(((()(((()((())(((((()((()()(()(()()((((((()((((()((()(()((()(()((((((()))))()()(((((()((()(()(())()))(())(((((((()((((()())(()((()((()(()))())))(())((()))))(((((((()()()())(()))(()()((()())()((()((()()()(()(()()))(()())(())(((((()(((((((((((()((()(((()(((((((()()((((((()(((((()(()((()(((((())((((((()))((((())((()()((())(((())()(((((()()(((((()((()(()(((((((()(((((()((()((()((())(())((())(()))()()))(()()(()(()()(((((((()(((()(((())()(((((()((((((()())((((())()((()((()(()()())(()))((((()()((((((()((()(()(()((((()((()((())((((((()(()(())((((((()((((((((((()((())()))()(()(()(((((()()()))((())))()(()((((((((((((((()(((()((((()((())((()((()(((()()(()(((()((())(()()())))()(()(()(((((()()(()(()((((()(((((())()(()(()))(((((()()(((()()(())((((((((((((((())((())(((((((((((())()()()(())()(()(()(((((((((())(((()))(()()())(()((((()(())(((((()())(())((((((((())()((((()((((((())(()((()(())(((()((((()))(((((((((()()))((((()(())()()()(())(()((())((()()))()(((())(((((())((((((()()))(((((((((()((((((())))(((((((()((()(()(())))())(()(()))()(((((()())(()))()(()(())(((()))))())()())))(((((()))())()((()(()))))((()()()((((((()))()()((((((((())((()(()(((()(()((())((()())(()((((())(()(((()()()(()(()()))())())((((((((((())())((()))()((())(())(())))())()(()()(())))())(()))(((()(()()(((()(((())))()(((()(())()((((((())()))()))()((((((()(()(((((()())))()))))())()()(((()(((((())((()()(()((()((()(()(()(())))(()()()()((()(())(((()((()))((((()))())(())))())(()))()()()())()))(((()()())()((())))(())(()()()()(()())((()(()()((((())))((()((()(())((()(()((())()(()()(((()())()()())((()))((())(((()()(())))()()))(((()((())()(((((()())(())((())()())())((((((()(()(((((()))(()('
day2 = ["20x3x11", "15x27x5", "6x29x7", "30x15x9", "19x29x21", "10x4x15", "1x26x4", "1x5x18", "10x15x23", "10x14x20", "3x5x18", "29x23x30", "7x4x10", "22x24x29", "30x1x2", "19x2x5", "11x9x22", "23x15x10", "11x11x10", "30x28x5", "22x5x4", "6x26x20", "16x12x30", "10x20x5", "25x14x24", "16x17x22", "11x28x26", "1x11x10", "1x24x15", "13x17x21", "30x3x13", "20x25x17", "22x12x5", "22x20x24", "9x2x14", "6x18x8", "27x28x24", "11x17x1", "1x4x12", "5x20x13", "24x23x23", "22x1x25", "18x19x5", "5x23x13", "8x16x4", "20x21x9", "1x7x11", "8x30x17", "3x30x9", "6x16x18", "22x25x27", "9x20x26", "16x21x23", "5x24x17", "15x17x15", "26x15x10", "22x16x3", "20x24x24", "8x18x10", "23x19x16", "1x21x24", "23x23x9", "14x20x6", "25x5x5", "16x3x1", "29x29x20", "11x4x26", "10x23x24", "29x25x16", "27x27x22", "9x7x22", "6x21x18", "25x11x19", "14x13x3", "15x28x17", "14x3x12", "29x8x19", "30x14x20", "20x23x4", "8x16x5", "4x11x18", "20x8x24", "21x13x21", "14x26x29", "27x4x17", "27x4x25", "5x28x6", "23x24x11", "29x22x5", "30x20x6", "23x2x10", "11x4x7", "27x23x6", "10x20x19", "8x20x22", "5x29x22", "16x13x2", "2x11x14", "6x12x4", "3x13x6", "16x5x18", "25x3x28", "21x1x5", "20x16x19", "28x30x27", "26x7x18", "25x27x24", "11x19x7", "21x19x17", "2x12x27", "20x5x14", "8x5x8", "6x24x8", "7x28x20", "3x20x28", "5x20x30", "13x29x1", "26x29x5", "19x28x25", "5x19x11", "11x20x22", "4x23x1", "19x25x12", "3x10x6", "3x14x10", "28x16x12", "23x12x2", "23x12x19", "20x28x10", "9x10x25", "16x21x16", "1x18x20", "9x4x26", "3x25x8", "17x16x28", "9x28x16", "27x3x12", "17x24x12", "13x21x10", "7x17x13", "6x10x9", "7x29x25", "11x19x30", "1x24x5", "20x16x23", "24x28x21", "6x29x19", "25x2x19", "12x5x26", "25x29x12", "16x28x22", "26x26x15", "9x13x5", "10x29x7", "1x24x16", "22x2x2", "6x16x13", "3x12x28", "4x12x13", "14x27x21", "14x23x26", "7x5x18", "8x30x27", "15x9x18", "26x16x5", "3x29x17", "19x7x18", "16x18x1", "26x15x30", "24x30x21", "13x20x7", "4x12x10", "27x20x11", "28x29x21", "20x14x30", "28x12x3", "19x1x8", "4x8x6", "21x14x2", "27x19x21", "17x24x14", "15x18x11", "18x7x26", "25x28x29", "27x26x9", "18x12x17", "24x28x25", "13x24x14", "26x9x28", "9x3x30", "9x2x9", "8x1x29", "18x30x10", "18x14x5", "26x8x30", "12x1x1", "30x5x28", "26x17x21", "10x10x10", "20x7x27", "13x17x6", "21x13x17", "2x16x8", "7x9x9", "15x26x4", "11x28x25", "10x6x19", "21x6x29", "15x5x6", "28x9x16", "14x3x10", "12x29x5", "22x19x19", "25x15x22", "30x6x28", "11x23x13", "20x25x14", "26x1x13", "6x14x15", "16x25x17", "28x4x13", "10x24x25", "4x13x10", "9x15x16", "15x24x6", "22x9x19", "11x11x8", "4x19x12", "24x5x4", "27x12x13", "7x27x16", "2x6x9", "29x27x15", "18x26x23", "19x16x15", "14x5x25", "9x16x30", "4x6x4", "13x10x10", "1x8x29", "23x5x17", "19x20x20", "11x27x24", "27x15x5", "15x11x12", "21x11x3", "1x13x22", "17x8x8", "13x14x14", "17x22x7", "9x5x8", "2x6x3", "25x9x15", "11x8x13", "9x25x12", "3x16x12", "12x16x8", "16x24x17", "4x6x26", "22x29x11", "14x17x19", "28x2x27", "24x22x19", "22x20x30", "23x28x4", "16x12x14", "22x24x22", "29x1x28", "26x29x16", "3x25x30", "27x3x13", "22x24x26", "25x3x2", "7x24x2", "10x5x3", "28x8x29", "25x6x4", "12x17x14", "24x3x5", "23x27x7", "26x23x30", "11x10x19", "23x7x11", "26x14x15", "14x3x25", "12x24x14", "2x14x12", "9x12x16", "9x2x28", "3x8x2", "22x6x9", "2x30x2", "25x1x9", "20x11x2", "14x11x12", "7x14x12", "24x8x26", "13x21x23", "18x17x23", "13x6x17", "20x20x19", "13x17x29", "7x24x24", "23x8x6", "19x10x28", "3x8x21", "15x20x18", "11x27x1", "11x24x28", "13x20x11", "18x19x22", "27x22x12", "28x3x2", "13x4x29", "26x5x6", "14x29x25", "7x4x7", "5x17x7", "2x8x1", "22x30x24", "22x21x28", "1x28x13", "11x20x4", "25x29x19", "9x23x4", "30x6x11", "25x18x10", "28x10x24", "3x5x20", "19x28x10", "27x19x2", "26x20x4", "19x21x6", "2x12x30", "8x26x27", "11x27x10", "14x13x17", "4x3x21", "2x20x21", "22x30x3", "2x23x2", "3x16x12", "22x28x22", "3x23x29", "8x25x15", "9x30x4", "10x11x1", "24x8x20", "10x7x27", "7x22x4", "27x13x17", "5x28x5", "30x15x13", "10x8x17", "8x21x5", "8x17x26", "25x16x4", "9x7x25", "13x11x20", "6x30x9", "15x14x12", "30x1x23", "5x20x24", "22x7x6", "26x11x23", "29x7x5", "13x24x28", "22x20x10", "18x3x1", "15x19x23", "28x28x20", "7x26x2", "9x12x20", "15x4x6", "1x17x21", "3x22x17", "9x4x20", "25x19x5", "9x11x22", "14x1x17", "14x5x16", "30x5x18", "19x6x12", "28x16x22", "13x4x25", "29x23x18", "1x27x3", "12x14x4", "10x25x19", "15x19x30", "11x30x4", "11x22x26", "13x25x2", "17x13x27", "11x30x24", "15x1x14", "17x18x4", "26x11x3", "16x22x28", "13x20x9", "1x18x3", "25x11x12", "20x21x1", "22x27x4", "8x28x23", "7x13x27", "17x9x26", "27x27x20", "11x20x12", "26x21x11", "29x14x12", "27x25x1", "28x29x25", "21x23x28", "5x18x18", "19x5x4", "7x6x30", "27x8x11", "12x24x12", "16x25x22", "26x11x29", "25x22x17", "15x23x23", "17x9x6", "30x10x16", "21x3x5", "18x27x2", "28x21x14", "16x18x17", "4x18x2", "9x1x14", "9x1x9", "5x27x12", "8x16x30", "3x19x19", "16x26x24", "1x6x9", "15x14x3", "11x7x19", "8x19x3", "17x26x26", "6x18x11", "19x12x4", "29x20x16", "20x17x23", "6x6x5", "20x30x19", "18x25x18", "2x26x2", "3x1x1", "14x25x18", "3x1x6", "11x14x18", "17x23x27", "25x29x9", "6x25x20", "20x10x9", "17x5x18", "29x14x8", "14x25x26", "10x15x29", "23x19x11", "22x2x2", "4x5x5", "13x23x25", "19x13x19", "20x18x6", "30x7x28", "26x18x17", "29x18x10", "30x29x1", "12x26x24", "18x17x26", "29x28x15", "3x12x20", "24x10x8", "30x15x6", "28x23x15", "14x28x11", "10x27x19", "14x8x21", "24x1x23", "1x3x27", "6x15x6", "8x25x26", "13x10x25", "6x9x8", "10x29x29", "26x23x5", "14x24x1", "25x6x22", "17x11x18", "1x27x26", "18x25x23", "20x15x6", "2x21x28", "2x10x13", "12x25x14", "2x14x23", "30x5x23", "29x19x21", "29x10x25", "14x22x16", "17x11x26", "12x17x30", "8x17x7", "20x25x28", "20x11x30", "15x1x12", "13x3x24", "16x23x23", "27x3x3", "26x3x27", "18x5x12", "12x26x7", "19x27x12", "20x10x28", "30x12x25", "3x14x10", "21x26x1", "24x26x26", "7x21x30", "3x29x12", "29x28x5", "5x20x7", "27x11x2", "15x20x4", "16x15x15", "19x13x7", "7x17x15", "27x24x15", "9x17x28", "20x21x14", "14x29x29", "23x26x13", "27x23x21", "18x13x6", "26x16x21", "18x26x27", "9x3x12", "30x18x24", "12x11x29", "5x15x1", "1x16x3", "14x28x11", "2x18x1", "19x18x19", "18x28x21", "2x3x14", "22x16x5", "28x18x28", "24x16x18", "7x4x10", "19x26x19", "24x17x7", "25x9x6", "25x17x7", "20x22x20", "3x3x7", "23x19x15", "21x27x21", "1x23x11", "9x19x4", "22x4x18", "6x15x5", "15x25x2", "23x11x20", "27x16x6", "27x8x5", "10x10x19", "22x14x1", "7x1x29", "8x11x17", "27x9x27", "28x9x24", "17x7x3", "26x23x8", "7x6x30", "25x28x2", "1x30x25", "3x18x18", "28x27x15", "14x14x1", "10x25x29", "18x12x9", "20x28x16", "26x27x22", "8x26x1", "21x2x12", "25x16x14", "21x19x5", "12x9x22", "16x5x4", "5x4x16", "25x29x3", "4x29x13", "15x16x29", "8x11x24", "30x11x20", "17x21x14", "12x24x10", "10x12x6", "3x26x30", "15x14x25", "20x12x21", "13x11x16", "15x13x3", "5x17x29", "6x3x23", "9x26x11", "30x1x8", "14x10x30", "18x30x10", "13x19x19", "16x19x17", "28x7x10", "28x29x4", "3x21x10", "4x28x24", "7x28x9", "2x4x9", "25x27x13", "6x12x15", "4x18x20", "20x1x16", "5x13x24", "11x11x10", "12x9x23", "1x9x30", "17x28x24", "9x5x27", "21x15x16", "17x4x14", "8x14x4", "13x10x7", "17x12x14", "9x19x19", "2x7x21", "8x24x23", "19x5x12", "11x23x21", "13x3x1", "5x27x15", "12x25x25", "13x21x16", "9x17x11", "1x15x21", "4x26x17", "11x5x15", "23x10x15", "12x17x21", "27x15x1", "4x29x14", "5x24x25", "10x10x12", "18x12x9", "11x24x23", "24x23x3", "28x12x15", "29x9x14", "11x25x8", "5x12x2", "26x26x29", "9x21x2", "8x8x25", "1x16x30", "17x29x20", "9x22x13", "7x18x16", "3x3x23", "26x25x30", "15x23x24", "20x23x5", "20x16x10", "23x7x8", "20x18x26", "8x27x6", "30x23x23", "7x7x24", "21x11x15", "1x30x25", "26x27x22", "30x28x13", "20x13x13", "3x1x15", "16x7x1", "7x25x15", "12x7x18", "16x9x23", "16x12x18", "29x5x2", "17x7x7", "21x17x5", "9x9x17", "26x16x10", "29x29x23", "17x26x10", "5x19x17", "1x10x1", "14x21x20", "13x6x4", "13x13x3", "23x4x18", "4x16x3", "16x30x11", "2x11x2", "15x30x15", "20x30x22", "18x12x16", "23x5x16", "6x14x15", "9x4x11", "30x23x21", "20x7x12", "7x18x6", "15x6x5", "18x22x19", "16x10x22", "26x20x25", "9x25x25", "29x21x10", "9x21x24", "7x18x21", "14x3x15", "18x19x19", "4x29x17", "14x10x9", "2x26x14", "13x3x24", "4x4x17", "6x27x24", "2x18x3", "14x25x2", "30x14x17", "11x6x14", "4x10x18", "15x4x2", "27x7x10", "13x24x1", "7x12x6", "25x22x26", "19x2x18", "23x29x2", "2x15x4", "12x6x9", "16x14x29", "9x17x3", "21x9x12", "23x18x22", "10x8x4", "29x2x7", "19x27x15", "4x24x27", "25x20x14", "8x23x19", "1x24x19", "6x20x10", "15x8x5", "18x28x5", "17x23x22", "9x16x13", "30x24x4", "26x3x13", "12x22x18", "29x17x29", "26x4x16", "15x7x20", "9x15x30", "12x7x18", "28x19x18", "11x23x23", "24x20x1", "20x3x24", "1x26x1", "14x10x6", "5x27x24", "13x21x12", "20x20x5", "6x28x9", "11x26x11", "26x29x12", "21x4x11", "20x11x17", "22x27x20", "19x11x21", "2x11x11", "13x5x7", "12x10x25", "21x28x1", "15x30x17", "28x19x1", "4x19x12", "11x4x12", "4x10x30", "11x18x5", "22x20x12", "3x7x27", "20x26x4", "13x27x26", "23x14x13", "4x19x7", "26x27x16", "20x5x20", "18x5x8", "19x21x1", "22x8x1", "29x4x1", "24x10x15", "24x9x20", "10x3x8", "29x30x3", "2x8x24", "16x7x18", "2x11x23", "23x15x16", "21x12x6", "24x28x9", "6x1x13", "14x29x20", "27x24x13", "16x26x8", "5x6x17", "21x8x1", "28x19x21", "1x14x16", "18x2x9", "29x28x10", "22x26x27", "18x26x23", "22x24x2", "28x26x1", "27x29x12", "30x13x11", "1x25x5", "13x30x18", "3x13x22", "22x10x11", "2x7x7", "18x17x8", "9x22x26", "30x18x16", "10x2x3", "7x27x13", "3x20x16", "9x21x16", "1x18x15", "21x30x30", "4x25x23", "3x11x7", "5x6x12", "27x1x20", "13x15x24", "23x29x2", "13x5x24", "22x16x15", "28x14x3", "29x24x9", "2x20x4", "30x10x4", "23x7x20", "22x12x21", "3x19x11", "4x28x28", "5x4x7", "28x12x25", "2x16x26", "23x20x7", "5x21x29", "9x21x16", "9x6x10", "9x6x4", "24x14x29", "28x11x6", "10x22x1", "21x30x20", "13x17x8", "2x25x24", "19x21x3", "28x8x14", "6x29x28", "27x10x28", "30x11x12", "17x2x10", "14x19x17", "2x11x4", "26x1x2", "13x4x4", "23x20x18", "2x17x21", "28x7x15", "3x3x27", "24x17x30", "28x28x20", "21x5x29", "13x12x19", "24x29x29", "19x10x6", "19x12x14", "21x4x17", "27x16x1", "4x17x30", "23x23x18", "23x15x27", "26x2x11", "12x8x8", "15x23x26", "30x17x15", "17x17x15", "24x4x30", "9x9x10", "14x25x20", "25x11x19", "20x7x1", "9x21x3", "7x19x9", "10x6x19", "26x12x30", "21x9x20", "15x11x6", "30x21x9", "10x18x17", "22x9x8", "8x30x26", "28x12x27", "17x17x7", "11x13x8", "5x3x21", "24x1x29", "1x28x2", "18x28x10", "8x29x14", "26x26x27", "17x10x25", "22x30x3", "27x9x13", "21x21x4", "30x29x16", "22x7x20", "24x10x2", "16x29x17", "28x15x17", "19x19x22", "9x8x6", "26x23x24", "25x4x27", "16x12x2", "11x6x18", "19x14x8", "9x29x13", "23x30x19", "10x16x1", "4x21x28", "23x25x25", "19x9x16", "30x11x12", "24x3x9", "28x19x4", "18x12x9", "7x1x25", "28x7x1", "24x3x12", "30x24x22", "27x24x26", "9x30x30", "29x10x8", "4x6x18", "10x1x15", "10x4x26", "23x20x16", "6x3x14", "30x8x16", "25x14x20", "11x9x3", "15x23x25", "8x30x22", "22x19x18", "25x1x12", "27x25x7", "25x23x3", "13x20x8", "5x30x7", "18x19x27", "20x23x3", "1x17x21", "21x21x27", "13x1x24", "7x30x20", "21x9x18", "23x26x6", "22x9x29", "17x6x21", "28x28x29", "19x25x26", "9x27x21", "5x26x8", "11x19x1", "10x1x18", "29x4x8", "21x2x22", "14x12x8"]
day3 = '>^^v^<>v<<<v<v^>>v^^^<v<>^^><^<<^vv>>>^<<^>><vv<<v^<^^><>>><>v<><>^^<^^^<><>>vv>vv>v<<^>v<>^>v<v^<>v>><>^v<<<<v^vv^><v>v^>>>vv>v^^^<^^<>>v<^^v<>^<vv^^<^><<>^>><^<>>><><vv><>v<<<><><>v><<>^^^^v>>^>^<v<<vv^^<v<^<^>^^v^^^^^v<><^v><<><^v^>v<<>^<>^^v^<>v<v^>v>^^<vv^v><^<>^v<><^><v^><><><<<<>^vv^>^vvvvv><><^<vv^v^v>v<<^<^^v^<>^<vv><v<v^v<<v<<^^>>^^^v^>v<><^vv<<^<>v<v><><v^^><v<>^^>^^>v^>^<<<<v><v<<>v><^v>^>><v^^<^>v<vvvv<>>>>>^v^^>v<v<^<vv>^>^vv^>vv^^v<<^<^^<>v>vv^v>><>>>v^>^>^^v<>^<v<<>^vv>v^<<v>v<<><v>^vvv<v<vvv^v<vv<v^^^>v><<^<>><v^^>^v^>>^v<^<><v<>>v^<>>v<>>v^^^><^>>vvvv>^v<^><<>>^<>^>vv><v<<>>^^>v^^^><^<<^^v>v<^<<>v>^^vvv^v^>v^<>^^<>v^v>v>v<v^>vv>^^v<>v>>^<>><>v>v^<<vvvv<vvv><v^<^>^v<>>^><v>><>^<v>v<v>vv^>>vvv<>v>v<v^>>^>>v<<>^<>^<>>>^v<<<^<^v>vv^>><<><v^>^v^^^v<>^^vv><>><>>^>v^<v<>v<>>^<<^v>^^^<>^v^><>v<<v>vv^>vv<<>>><<^v^<>v<vv>>>^^<>^><<^>vv>>^<<v^^vv<>>><v>v><^<v<<>>>^^<>>^<^v><>vv^^^v>vvv>^><<>^^>^<<v^<v<^v<<>vvv<^<<>^>^v<vv<^>vvv>v>vv^<v^><>>^vv<^^^vv><^vv<v^<><v^vvv><<^>^^><v<<vv^>v<vv<v>^<>^v<<>v<v^v^>^>^>v<<^vvv<<<v>^^>^<<<<>vv>>^<>^>>>v<v>^^<v^<v<>>>vv>^^v<<>>>^^v><<<v<v<^v<>^^><v<^v<<v^><><^<><v<^^v>>><v^^v<<v^><^<><<v^>><^<>v>v^<><^<v>^v^>^>^vv^>^^<<vv^>vv<^vvv<>>^^<^>v^>^>^<v^><v<v>>>v<<<><^v<<><^<vv^v^^^>v<^^<v^vvv<v<><v<vv<^vv<>vv<v^<>>vvvvv<<>^v^v>vv>>>vvv^^<^<^<><>v<v>><^v><^<<<>><<<v>^>v<>^>^v>>^<>v^<^>><<>^<v>^>^^^>^^<v>>>><>^v^v><<<<vv^<vv<>vv>v<>v^<v^>v><>>>v^<><^vvv>vv^<^<<^<^^v>^>>>v<^<^v^^<^<^>>><v>vv>^<<><>^>>v>^<<>><^<>v<>vv^^>^>vvv^v<<^<^^<vv<>^vvv<^^v^vv^>>v<^>^^<v^<>v<^<^vv>v<<vv>vv>^>vvv>>>^^>v<>^v>v^<^>>v>^^v>>>>v^<v>v<^>v<v<<>>^v<^^<v><^<>>^<<vv^>>v<<v>^v<>><^>vv<v<^>>^^<vvvvvvvvv>>>v<v<>v^<>>^vv<v^^v<<^vvv^<<^><>vv<><<>>v>vv^><>>^^v^>>v^v^><<<>>^^<^v<<^<>>>>^<^>v^><<^>v<^v<^>>^^<<<<><^<^v^v<>>^v<^<<vv^<><^^vv><v^v^v>^>>^>^vv^>^v<v^v<<vvv^><>>^v^^><>v>vv><^>>vv<vvv<<<<^<>vvv^v<v>^<v<^>^<^<v<><>v^^^^<<vv<^^vv<v>><<v^><>>><v^>^v><^>^><vv^<><^<v>><<^vv<>>v^<<v<>v><v<><><vv>^>>v^<^<v>^><>>><^><v^v<>>>^^<^>v<v>vvv<>^<<><v^^>^>>v<^v>^>v>>>vv>v>>v^^^<^<vvv^<>^>^<v^<v^v>v>^>vv>vvv<>v<^>v>^^>>^<vv^^v>v^^^^^v^vv><^<><>^>vv<^>>^vvvv^^^>^<vv>^v<<^><^^>^<>^^>^<<v<^>>>^><<^^>v^v>>^>vvvv>^^v><v>>vv><<<vv<^>v>^^^<v>v^vvv<^><<^>^<>^><<<<<v^<<vv^v>^<>v<v>^>^>><>v^v<^vv^^>vv<<v^v>vv^vvv<<<<>^v<v^^v^v>v<<v>^^<>^vv^^>^>^v^vv^>>v^vv^^<vv><<v^v^^v><vv<^vvv<vv^^<<v>v^v^^^^v<^<^>v>^>v>^vv^v^^<v<^vvvv<<<>^<^^^<^^<>^<><vv<^^<<^>>><v^vvvv>^<>>^^>v^^v^<<v^^^<<<><^<v^v^^v<v^<>v><<v<>^v>v<^><^>vv^^<vvv<^v>>v>^<><v^><^^^<v^>>vv<<<<<^<>^v^v>^vv^<>v>v<^>vv<<^vv>vv<v<><>>v>><v<^<^^>><<v^v<<^><v<^<vv<v<<vv^>^<<><^^>^<^>>^<vv>><v<<vvv<^^v^>^^<^v>^v<v<>v><v^v^<<^<><<v<<^v>v<<>>^>v>>v>>v<^<<^<^>>>v>^^^v><^>^^>>v<<>^v><v>vvv^vv<<<>vvv<<>^>>>v<v<v^<^<^>^<^>v^^v<^^<v<>v<>>^^>^v^>v<<<<^<>v^><<<v>>>><<v^<^vv>v>><>>^<<<^<^^>v<>>v<>vv<<^<<><<^>v^^^vv^>vvvv>>v>v^><<v<>vv^<<><<vvv>^>>>^<<<^<^<<v>^>v<>>v>>vv^^><<<<^^^v>><<^><v><v^^><v<<v^^v^^v>>v<><><<>^><v><^<vv>><^v<>v<vvv<>^>><v>>v<^><<v>^<>^v><^><^^<v>^><^^v^<<><>>^>v^<^v^vv<><^>vv^>v^vvv^<>>^><^<^<>^<<v^v<^v><>^v<v>>^>>^v^vv>><vv><v^^<<^v^<>^v<<>^><^>><v>>v<<<v^^vv<>^^v>>><><><<v^<<<v^<^^><v^>v^^vv<v^<>>vv^<^v<>^v>>v^v>v<^^vv><>^v<<>v^<>v^>>v>vvv<^><><^^>^vv^>>v^>^<^^<><>><<>^^^><^v^v><<<><<^v^vv>v>><^>>><v^>v<v><><v^v<>v^^>>v<<>v>v<v<v<^^<><>v^^<>>v<^v<v>v<><v<v>^<<>v>vv^^<>>^^^<>^^>^v>v>>>^v^v><v^^<><v>^^v^v<^<^^><<v<^<^<>^<>><<>^>>^>^^><v><>v<><>><<<>>>>vv>>>^>>^v<^>v^^^v<<vv>><<<^<<<>>>>>^>vv<^v^<>^<v^>^v><v>vvv<>>>^v^^^v<<<<>>^^<vv<^<^^>^<>v<^<<<>><>>v<^<>^<vvv<^<>><><<v>^^^>^^<<v<v^>^^v^>><<^vv><v>^v>>^<v>v>^^>^v>^vvv<>v^v^^<><vv>vv^>>><>v<^><v<v^<><<<>^v>^v<<<^>^>^>v^v<<><vvv<<v^^<><v>^>>><vv>><v>>v^<vv>>vv<<^v^v<<><^v<vv>>>vv<>>>>^vv>v^<>vv>v^v<v^><v<^^^^^>vv<><<vvv^<v><^<vv><^^^vv^<>^^^^<^><^<>v^<v^v<<^v<<^^<>>^<v^^>>>vv<vvv<>v<<>><^vvv^<<^^<<>>>^<>>>v^^><>><<>><v^v>>>>>><>>><v^<<vvv^>v<>>v^<>vv<><^^^^v^<<^<v^vv><<^^>v<^vvv^v>>v>^>>v>^^><<v^<>v<>vv<^v^vv><v><<vv^v>>v^>>v<^^^>^><<v<>^><>v>>>vvv<v<vv<^>>^v<v>^<^^^^^v><>v><>v^v^v<v^vv^v>vvvv<>vv<<<vv<v<<>^<^>^^v^<<>^<v><^><v<v<><<>v^<<^<><vv>v<<^v>>^v<><v>^>>^^><>v^<^<vvv^>^>^<<<<>vv>^v^v<^^^<vv>><>^^<<v<^<^^>>>v^v<<^^^<v<v<^<>^v<v><v^vv^^v^^v^^<vv<>^<><vv^<^v^<<^><<vvv>^^<^^^<^v>^>^vv><<<^v<v>vv>v<>v^v<v^>v^>>>v^v<>^v<<>^vv>v>v>v^<^>v^^<^>^^^^vv>^^><^>vv^>>^^v>><<<<^><>v<>^<v<vv^>^^><<^><v>v^>^^<^>>><>><v^v<v^<v<vv^v^<<^<vvv>>><vv<^^>>^>^><<v^<>>v>v^v^^><<>vv^v>v^<v><^<>^^<^>v>^<><<<v>^<^<^>^>^>^^v^<<^^v^^<^<>><^>v>>^^<>^^^<<<<v^>^v<^vv>^<<<v<><<v<>vv>>>v><>>><>>v<<<vv><>^v>v<^>><^><><v<>^v^>^v>^v<<><<^<>>v>^><>^>><>><^<v^><v^^<><v><^^>^v^^<>v^<v^<^v<v^^^^^v^<<^>^^^<^v><>^^<<<><<<<<^^>v^vvvv>v<>>vv<^>^v^>v<^vv^v<<><<v>v^v>^^><><^<v^>v><vv><>>><<>^vv<>v>>v<^v>>>v<v>v>v>^vv<<>^^vv<v<^v^<v<v>vv<>^<^<vv<v^<^v^^><<>^>><^v>vv^^v<<^^><<>v^^<><><v^^<v^v>^>^>^>v<^<v>^v^^>v<>vvv<^v<v^v><<v^><<^^><^<<v^v^>v<>^>v><><v>^<v<v>^<^^^>^v<<><<><>vv>v^<>v^><v^v<v><><<v>v<vv><<v>>v>^<<<>vv>>vvv>^^vv^v^^<^^<>v^^<>v>>^^>^>^>v>><^>><>>^<<>><^>v<<<<<<<^v^v<v^<v^^>^<><<v<^>v^>v^vv<<^^vv^>>>>^<>v<^v<>v<vv<^>>v^vv>vv><vv<<^>v>><vv>>>vv^<<<<vv^>v<<<<^^>^^v^><<^<v^>v^>^^<v<>vvv^>^<>vvv<v<^^>v^<<v>><>v<v<>^^<vvv>^>vv><><<<^^vv<v^<v<>v<>><<v><^vv^>^<^>^^^<<<v>vv^<^<<>^>^<vv>v><v<<^><^>^^<vv^v^^>>>>vv^><^^vv><>^<v^v>v<vv>v><<<v>v<v>^><v^^><v>v<^v^>>^^<v^>^^>vv>>vv^><^vv^vv<<^>vv>^v<v><vv><v<vvvvv>^^v^v><v>>>^vv<>v>^^^^<^>><>^v^^^>v<^^<<^^v<vv<>vvv<^>><><^>>^><^<>v<v<<><<v><v^v<>><^>v><<v^<v>v<^<vv^v^v^>vvv^^>v>^<vv^>v^v^<>v>^>>vv>><^^<v<<>^vv<><><<^v<v>v<<vv><>><^v<v>>v^>vvv^v^<<^><v<>^vv^>v^<v<^>>v<v><v><v>>^<<<v^<><<>v>^>^^<v<>>^<>^>^><<<^<<^<<^>^v>>><vvv>><<<<v>>>>>>>^<^v<^>v<>vv<><>v>>^>>^>vv^^><<^<v<v>>^^<<^>v<^>>vv>^<>v><^>v<vv>>>>>>^v<^<<<v^><vv<<>>vv<<><v<><<<v<^<v<>>v<^^^^v^^<^^^<^<vv><<^>><>v<<>v<v<>>>><>v^vv>^>^>>vv^v<v<<><^v>vv^><v<<>v^v<^>vv<<^^v><^>>^^vv<^<>>v^^>><v>^v>>>^>>v>v<>v<^vv><>^<<^>vv>>><><>v^><>v^>v>v><^v<><v<v>^v<<^vv^><^^>><^^^<<<^>v>^v>>><^>><^>>>^^^<^>vv<><<<v^>^<^^>>^^^v^v^v>v<v>>>><^>>>v>^vv<<^^^<^^vv>v<<><v<<^^>v>><<v^^><^>^<^>^v^>v><^<^vv>v>><>^<<vv<<v>v<vv<v>^>^>><^^<v>^v^v<><<>vvv<^<v>^><>^>vvv>>>^><<>><v^^<^<<^v>>^v<v<vv>vv^v^>v<<vvv<^^v^v>^<^>>^>v<^>^v<<><<<^>^<^^^>vv<^^^^vv<v<^^v<<<<v<^v^<><v<<^><<>vv>>><^<^<>>>^>^>>^<<<<<^^v>^>^<>vvv^^<^><^>^^v>^vv^><v^<^<<v^<vvv<<^v<><^><^>>>v>^v>^>^v<vv^v>><v><^><v^^>v^>^<><<><>v<v^>vvv^>^>>v<>^><^>^><vvv>^^v^v>v<>^v^><^>>v>v^><<<^>>^<>^<>>v><>>v^>^>^^<>>v^>^<vvvv<^vvvv^>>vv^<v^v>^vv<>v<>^<v<v>v>^^><^>vv^<^v^<<^<^<><vv<^v<^v><>>>^v^<<^><^>vv<v>v<^>vv^>v<<<>^<><v<^^^>v><^^<>^<^<v^vv^<<^>><<v^v<^vvv<<<>>vvvv^v^^^>v<>>><<>vvv<<^^^>v>v>>v<<v<v^v^>^^v>^><^<><<v^<v<v^^^><>v^^^<v>vv<>^>^^vv>^<<^v<^v><v>>>^>>><^<<>^v>>^>vv<<<v<>^<v><v^<^<>v>v^^v^>><<^v<<<<>v>v>v^^<^><>^^<<<v>vv<>>>^>>v<><v^>^<><vv>v>v^v<v^<^>>^>><<^^<^^v<vv<>><<<v<^<<^^^>vvv^<vvv<^>vv><>><<<^<v^v^^<<^vvv^^<^<><<>^<^<>>vvv<>^<>v^v<><>>v^v><<>>>vvv>v<>^>>^><^>vv<<>>v<<^><>v>>^^<v>^>^<<>><^<<vv<^<vv^vv><>>>><^<v>^>vv<v><>^<>vvvvv^vv<<v<>>>^<<><>^^vvv>>>vv<<^^><^v^^v<>^^>^><^>v^^^^v<^<<vv<vv<>vv^^>v^vv>v><>>vv>^<^<v^v^>>v^v^^v>^>vv^>v<vvvv<^v<^v>^v>^^v<<^>^^<<>^><^v>>>vv^>^^>vvvv>>v<^<v>^>>>v^<><^<^^<v>vv^^><v>v^<>^^^>>><^^v>v>^<<>^<v^>vvv^>^^^><v<^>>v<v>>^v><<><<>v<^<<>^><>^>vv>^<v>^^v<<^v^vvv^^>^vv^<^>^>^^v>v^>^<<><<^>v>>vv^vv><v>>^<<^<v^^<^<v^^vv^><^^<^^><v^^>v^^^<^<>^<>>^v<^vvv^^v^<><^>>>>>v><><<<>vv<^v>><<>vvv<><<vv<<<^>v^^>>^>^v>><><^^v<>><>>v^>^<vv><<<>><><<v>^^<>>v<><^<vv>vv<^v>^<<<<v<^<<^^>>^<><^>><<>^>v>^^^v>>^<^^v><v^v>^><<><>>^>>^<<v<>^v<>^>^<v>>vv>^vvv<<v<<^>^>^<<^^<>^^^^vvv<>^vv<vvvvv^^>^^<^>>><>v^<><^<<^>v^^v<>>^vv<>v^^<>>v^vvvvv<<v^<v^^>>><vvvvv>><^>vv>v^v^<v<^>^^><^>^^^^v<><^v<<>v^>v>>vv<<>^<v^^>vvv>^^<v^<>vv^><>><v^^v<>^>>^>v><>>^^v>^>^>>>^>v<^v>v>^<^^^^^>>v<v<>>v<<^>^<v<<>^^>><<^><>v<>^^^vv<>^^>><<^^>v>vv>vv>v^>^v>v^^<>>><<v><v<<>>v><>vvv^^v>^^>^vvvv^>^<>^vvvv><v><v<>>><>^<^vv<>^v<^v<>^vvv<<>><vvv^>>^><<vv^<v^>^<v<<^^>^^<^^v^>v<>v^v><>><v^^>>^vvv><^vv>v^<^<^v>>v^^>^vvv^<v^^v^^>v<^<>>^<>>>^^<><^^vv<>^vv^<>>>>^^<<^^<>vv^^><>^^<v<<v>^<v^^>^v<><><>vvv>^v^>>vv<<^v<<>><v>^><^>>>^<^<^^>vv^<<^<>>^^><><<v>^^<v>>v<<vvvv>^v^vv>><^^<<^>>v>v<^^^<^><^^vv>^vv<^<vv<>v><^<><v><^^^>>^<><^<v>>>>v^<v>>>>>v<><^^>v<^<^>><v<>^>vv>^^v^v^<<v<><<<^v^><<^<><<<<v<^>><<<>v>>vv><vv<><<^<^<><vv>^^^^<>v<<<<v>vv<>vv^^^>><>vv^><>>^vv<<><^^vv<>v^>>^<<>^<v^<^>v<'
day4 = 'bgvyzdsv'
day5 = ['uxcplgxnkwbdwhrp','suerykeptdsutidb','dmrtgdkaimrrwmej','ztxhjwllrckhakut','gdnzurjbbwmgayrg','gjdzbtrcxwprtery','fbuqqaatackrvemm','pcjhsshoveaodyko','lrpprussbesniilv','mmsebhtqqjiqrusd','vumllmrrdjgktmnb','ptsqjcfbmgwdywgi','mmppavyjgcfebgpl','zexyxksqrqyonhui','npulalteaztqqnrl','mscqpccetkktaknl','ydssjjlfejdxrztr','jdygsbqimbxljuue','ortsthjkmlonvgci','jfjhsbxeorhgmstc','vdrqdpojfuubjbbg','xxxddetvrlpzsfpq','zpjxvrmaorjpwegy','laxrlkntrukjcswz','pbqoungonelthcke','niexeyzvrtrlgfzw','zuetendekblknqng','lyazavyoweyuvfye','tegbldtkagfwlerf','xckozymymezzarpy','ehydpjavmncegzfn','jlnespnckgwmkkry','bfyetscttekoodio','bnokwopzvsozsbmj','qpqjhzdbuhrxsipy','vveroinquypehnnk','ykjtxscefztrmnen','vxlbxagsmsuuchod','punnnfyyufkpqilx','zibnnszmrmtissww','cxoaaphylmlyljjz','zpcmkcftuuesvsqw','wcqeqynmbbarahtz','kspontxsclmbkequ','jeomqzucrjxtypwl','ixynwoxupzybroij','ionndmdwpofvjnnq','tycxecjvaxyovrvu','uxdapggxzmbwrity','csskdqivjcdsnhpe','otflgdbzevmzkxzx','verykrivwbrmocta','ccbdeemfnmtputjw','suyuuthfhlysdmhr','aigzoaozaginuxcm','ycxfnrjnrcubbmzs','fgbqhrypnrpiizyy','taoxrnwdhsehywze','echfzdbnphlwjlew','jhmomnrbfaawicda','fywndkvhbzxxaihx','aftuyacfkdzzzpem','yytzxsvwztlcljvb','iblbjiotoabgnvld','kvpwzvwrsmvtdxcx','ardgckwkftcefunk','oqtivsqhcgrcmbbd','wkaieqxdoajyvaso','rkemicdsrtxsydvl','sobljmgiahyqbirc','pbhvtrxajxisuivj','ggqywcbfckburdrr','gmegczjawxtsywwq','kgjhlwyonwhojyvq','bpqlmxtarjthtjpn','pxfnnuyacdxyfclr','isdbibbtrqdfuopn','vucsgcviofwtdjcg','ywehopujowckggkg','mzogxlhldvxytsgl','mllyabngqmzfcubp','uwvmejelibobdbug','brebtoppnwawcmxa','fcftkhghbnznafie','sqiizvgijmddvxxz','qzvvjaonnxszeuar','abekxzbqttczywvy','bkldqqioyhrgzgjs','lilslxsibyunueff','ktxxltqgfrnscxnx','iwdqtlipxoubonrg','twncehkxkhouoctj','bdwlmbahtqtkduxz','smbzkuoikcyiulxq','bjmsdkqcmnidxjsr','icbrswapzdlzdanh','eyszxnhbjziiplgn','pdxhrkcbhzqditwb','nfulnpvtzimbzsze','glayzfymwffmlwhk','bejxesxdnwdlpeup','ukssntwuqvhmsgwj','hoccqxlxuuoomwyc','rapztrdfxrosxcig','cxowzhgmzerttdfq','yzhcurqhdxhmolak','kqgulndpxbwxesxi','yjkgcvtytkitvxiu','xnhfqhnnaceaqyue','qkuqreghngfndifr','xesxgeaucmhswnex','occbvembjeuthryi','dmefxmxqjncirdwj','ystmvxklmcdlsvin','pplykqlxmkdrmydq','cbbjkpbdvjhkxnuc','embhffzsciklnxrz','asrsxtvsdnuhcnco','xcbcrtcnzqedktpi','mglwujflcnixbkvn','mnurwhkzynhahbjp','cekjbablkjehixtj','kbkcmjhhipcjcwru','usifwcsfknoviasj','rsfgocseyeflqhku','prgcyqrickecxlhm','asbawplieizkavmq','sylnsirtrxgrcono','nzspjfovbtfkloya','qfxmsprfytvaxgtr','yckpentqodgzngnv','ycsfscegcexcnbwq','kbmltycafudieyuh','tpahmvkftilypxuf','qivqozjrmguypuxu','gdhbfradjuidunbk','vxqevjncsqqnhmkl','rpricegggcfeihst','xucvzpprwtdpzifq','egyjcyyrrdnyhxoo','kfbrzmbtrrwyeofp','qpjdsocrtwzpjdkd','reboldkprsgmmbit','vwkrzqvvhqkensuy','ydvmssepskzzvfdp','vqbigplejygdijuu','mzpgnahrhxgjriqm','uiejixjadpfsxqcv','tosatnvnfjkqiaha','yipuojpxfqnltclx','pcxwvgcghfpptjlf','shrudjvvapohziaj','jdckfjdtjsszdzhj','hgisfhcbdgvxuilk','gytnfjmrfujnmnpp','ohflkgffnxmpwrrs','jzxajbkwwjknasjh','xrcxfollmejrislv','djjlwykouhyfukob','rittommltkbtsequ','lpbvkxdcnlikwcxm','vkcrjmcifhwgfpdj','dkhjqwtggdrmcslq','swnohthfvjvoasvt','yrzoksmcnsagatii','duommjnueqmdxftp','inlvzlppdlgfmvmx','xibilzssabuqihtq','inkmwnvrkootrged','ldfianvyugqtemax','gbvwtiexcuvtngti','temjkvgnwxrhdidc','askbbywyyykerghp','onezejkuwmrqdkfr','kybekxtgartuurbq','ubzjotlasrewbbkl','stueymlsovqgmwkh','lhduseycrewwponi','yohdmucunrgemqcu','onnfbxcuhbuifbyc','odrjkigbrsojlqbt','imqkqqlkgmttpxtx','sxmlkspqoluidnxw','akaauujpxhnccleb','xvgpghhdtpgvefnk','jdxeqxzsbqtvgvcq','mdusenpygmerxnni','agihtqvgkmgcbtaw','dovxcywlyvspixad','uulgazeyvgtxqkfz','ndhmvrwuflhktzyo','hcaqkmrbvozaanvm','tvfozbqavqxdqwqv','rlkpycdzopitfbsv','dmyjtmjbtnvnedhs','fmwmqeigbzrxjvdu','twgookcelrjmczqi','grxosmxvzgymjdtz','zsstljhzugqybueo','jpeapxlytnycekbd','iasykpefrwxrlvxl','azohkkqybcnsddus','aoaekngakjsgsonx','awsqaoswqejanotc','sgdxmketnjmjxxcp','ylnyuloaukdrhwuy','ewoqjmakifbefdib','ytjfubnexoxuevbp','ewlreawvddptezdd','vmkonztwnfgssdog','ahbpuqygcwmudyxn','kmahpxfjximorkrh','otjbexwssgpnpccn','aewskyipyztvskkl','urqmlaiqyfqpizje','nrfrbedthzymfgfa','vndwwrjrwzoltfgi','iiewevdzbortcwwe','qiblninjkrkhzxgi','xmvaxqruyzesifuu','yewuzizdaucycsko','hmasezegrhycbucy','dwpjrmkhsmnecill','hnffpbodtxprlhss','avmrgrwahpsvzuhm','nksvvaswujiukzxk','zzzapwhtffilxphu','vwegwyjkbzsrtnol','qurpszehmkfqwaok','iknoqtovqowthpno','brlmpjviuiagymek','efxebhputzeulthq','mzkquarxlhlvvost','xsigcagzqbhwwgps','qufztljyzjxgahdp','dlfkavnhobssfxvx','hgdpcgqxjegnhjlr','fboomzcvvqudjfbi','wnjuuiivaxynqhrd','nhcgzmpujgwisguw','wjeiacxuymuhykgk','qmeebvxijcgdlzpf','nmmnxsehhgsgoich','ejluaraxythbqfkl','mdbsbwnaypvlatcj','nnfshfibmvfqrbka','dvckdmihzamgqpxr','foztgqrjbwyxvewk','okpryqcbvorcxhoh','fpiwsndulvtthctx','zrbiovlmzdmibsiq','setwafbnnzcftutg','nyvqghxhgkxfobdm','enpvqadzarauhajl','twblhpvkazpdmhmr','lbhlllsgswvhdesh','tdfwkgxnqjxcvsuo','lnvyjjbwycjbvrrb','jsxqdvmzaydbwekg','xirbcbvwlcptuvoa','hwnukxenilatlfsk','khwopjqkxprgopmd','sljzdoviweameskw','stkrdmxmpaijximn','fdilorryzhmeqwkc','mfchaaialgvoozra','gjxhoxeqgkbknmze','beowovcoqnginrno','mkgmsgwkwhizunxo','phnhfusyoylvjdou','csehdlcmwepcpzmq','pgojomirzntgzohj','fkffgyfsvwqhmboz','mrvduasiytbzfwdn','epzrmsifpmfaewng','ooqxnoyqrlozbbyf','ahcxfmgtedywrbnx','ibqktvqmgnirqjot','xarssauvofdiaefn','xradvurskwbfzrnw','nxklmulddqcmewad','twichytatzoggchg','qmgvroqwrjgcycyv','yvezgulgrtgvyjjm','jgmcklzjdmznmuqk','bytajdwwconasjzt','apjttucpycyghqhu','flfejjzihodwtyup','gmrtrwyewucyqotv','nlohdrlymbkoenyl','wxcmqwbrwgtmkyfe','njtzlceyevmisxfn','htbbidsfbbshmzlt','gxhjeypjwghnrbsf','cifcwnbtazronikv','ezvjijcjcyszwdjy','srffeyrvyetbecmc','xpjefrtatrlkbkzl','yhncvfqjcyhsxhbb','pqhcufzlcezhihpr','qtdsfvxfqmsnzisp','dfonzdicxxhzxkrx','mqqqzhxkyfpofzty','dodjadoqyxsuazxt','jjwkrlquazzjbvlm','ttosfloajukoytfb','llateudmzxrzbqph','criqihrysgesmpsx','npszvlittbcxxknj','qmzojrvraitrktil','cfyoozzpwxwkwoto','daxohtcgvtktggfw','vthkpkoxmiuotjaj','pkfkyobvzjeecnui','ojcjiqrfltbhcdze','scbivhpvjkjbauun','ysowvwtzmqpjfwyp','laeplxlunwkfeaou','jufhcikovykwjhsa','xrucychehzksoitr','pyaulaltjkktlfkq','oypfrblfdhwvqxcv','zybrgxixvhchgzcf','puoagefcmlxelvlp','xjnhfdrsbhszfsso','ocgvzryoydaoracw','bxpnqllmptkpeena','pziyeihxlxbbgdio','bvtrhtlbfzmglsfc','ggpuvtseebylsrfk','pukenexjqecnivfj','jswabfbzpnhhdbpn','enojrtwqpfziyqsv','rjtmxudgcudefuiz','iqmjxynvtvdacffc','uheywxlsusklitvl','kwhxduejafdpmqdc','rspgblenbqlmcltn','rczhurnrqqgjutox','dqhytibjzxkdblzl','hpbieadydiycvfys','pucztfoqvenxiuym','nqpfzgpblwijiprf','ltgseeblgajbvltk','mwxukbsnapewhfrc','dvxluiflicdtnxix','pexfbpgnqiqymxcq','dakudfjjwtpxuzxy','letlceyzlgmnrewu','ojktahbsdifdfhmd','anezoybbghjudbih','sawxtlvzysaqkbbf','ttnkctcevpjiwqua','edrwrdvbaoqraejd','wnbfilvuienjxlcr','wqhzwvyybyxhhtsm','jxbgvyaqczwdlxfo','wbypqfmbwrsvfmdv','izdxjyfpidehbets','vbxbggqseurknjor','egpmpoxickhvwdlz','ivfrzklvpwoemxsy','xkziseheibmrpdww','xnrmtoihaudozksa','efemdmbxdsaymlrw','yjdjeckmsrckaagx','vlftqxxcburxnohv','fwyquwgajaxebduj','dwpmqvcxqwwnfkkr','isduxxjfsluuvwga','avdtdppodpntojgf','vrcoekdnutbnlgqk','kbhboxjmgomizxkl','cgsfpjrmewexgzfy','usdtnhjxbvtnafvp','bjoddgxbuxzhnsqd','hoyqdzofddedevsb','rwiwbvqfjajotaoj','iabomphsuyfptoos','bubeonwbukprpvhy','xurgunofmluhisxm','puyojzdvhktawkua','dbvqhztzdsncrxkb','oaeclqzyshuuryvm','nmgwfssnflxvcupr','vjkiwbpunkahtsrw','romyflhrarxchmyo','yecssfmetezchwjc','qwtocacqdslhozkd','mesexvfbtypblmam','mtjucgtjesjppdtt','pvodhqqoeecjsvwi','vvlcwignechiqvxj','wiqmzmmjgjajwgov','kwneobiiaixhclev','lkdeglzrrxuomsyt','oqovuwcpwbghurva','lfsdcxsasmuarwwg','awkbafhswnfbhvck','sztxlnmyvqsiwljg','hozxgyxbcxjzedvs','oifkqgfqmflxvyzn','mfvnehsajlofepib','delgbyfhsyhmyrfa','uenimmwriihxoydv','vjqutpilsztquutn','kfebsaixycrodhvl','coifyqfwzlovrpaj','xiyvdxtkqhcqfsqr','hoidcbzsauirpkyt','fiumhfaazfkbaglq','fzwdormfbtkdjgfm','faxqrortjdeihjfv','ljhaszjklhkjvrfi','pzrxsffkuockoqyl','immbtokjmwyrktzn','lzgjhyiywwnuxpfx','vhkocmwzkfwjuzog','ghntjkszahmdzfbl','gbcthxesvqbmzggy','oyttamhpquflojkh','nbscpfjwzylkfbtv','wnumxzqbltvxtbzs','jfhobjxionolnouc','nrtxxmvqjhasigvm','hweodfomsnlgaxnj','lfgehftptlfyvvaj','ccoueqkocrdgwlvy','euhgvirhsaotuhgf','pdlsanvgitjvedhd','seokvlbhrfhswanv','pntdqaturewqczti','jkktayepxcifyurj','dhzzbiaisozqhown','wehtwakcmqwczpbu','zwvozvspqmuckkcd','efucjlrwxuhmjubr','lzodaxuyntrnxwvp','qdezfvpyowfpmtwd','mizijorwrkanesva','txmitbiqoiryxhpz','xhsqgobpouwnlvps','muixgprsknlqaele','disgutskxwplodra','bmztllsugzsqefrm','ymwznyowpaaefkhm','ebfifzloswvoagqh','pkldomvvklefcicw','ziqzbbfunmcgrbtq','iuekfpbkraiwqkic','jflgjidirjapcuqo','achsfbroyrnqnecg','udbhouhlgjjzapzr','arerrohyhhkmwhyo','txyjzkqexgvzdtow','ogzrjwibvzoucrpg','rfdftaesxdnghwhd','axdhwmpuxelmpabo','gtktemowbsvognac','wkfuclilhqjzxztk','qbwjouutzegaxhrz','opfziwqqbwhzzqhj','pvcvcsupfwsmeacs','xsbohvbguzsgpawn','sczoefukwywxriwj','oqkhcqfdeaifbqoc','vtsrholxbjkhwoln','yuvapljnwbssfbhi','dxdfwccqvyzeszyl','gdbmjtonbiugitmb','qunirtqbubxalmxr','zzxsirhdaippnopr','fibtndkqjfechbmq','gqgqyjvqmfiwiyio','ihwsfkwhtzuydlzw','eygyuffeyrbbhlit','zdlsaweqomzrhdyy','ptbgfzuvxiuuxyds','llxlfdquvovzuqva','wfrltggyztqtyljv','kwipfevnbralidbm','gbhqfbrvuseellbx','obkbuualrzrakknv','hlradjrwyjgfqugu','vtqlxbyiaiorzdsp','tedcbqoxsmbfjeyy','cxdppfvklbdayghy','gjnofexywmdtgeft','ldzeimbbjmgpgeax','egrwsmshbvbawvja','vadfrjvcrdlonrkg','mojorplakzfmzvtp','jyurlsoxhubferpo','ijwqogivvzpbegkm','cnmetoionfxlutzg','lawigelyhegqtyil','mqosapvnduocctcd','eqncubmywvxgpfld','vigfretuzppxkrfy','ncwynsziydoflllq','cbllqinsipfknabg','ndtbvdivzlnafziq','iqrrzgzntjquzlrs','damkuheynobqvusp','jxctymifsqilyoxa','ylritbpusymysmrf','paoqcuihyooaghfu','obhpkdaibwixeepl','igrmhawvctyfjfhd','ybekishyztlahopt','vkbniafnlfqhhsrq','kltdigxmbhazrywf','ufhcoyvvxqzeixpr','klcxdcoglwmeynjt','funpjuvfbzcgdhgs','akgyvyfzcpmepiuc','zhlkgvhmjhwrfmua','ibsowtbnrsnxexuz','vpufbqilksypwlrn','ngrintxhusvdkfib','ziuwswlbrxcxqslw','sucledgxruugrnic','zwnsfsyotmlpinew','oaekskxfcwwuzkor','qjmqwaktpzhwfldu','tmgfgqgpxaryktxo','qfaizepgauqxvffk','addkqofusrstpamf','shdnwnnderkemcts','gwfygbsugzptvena','fpziernelahopdsj','bkkrqbsjvyjtqfax','gxrljlqwxghbgjox','ipfwnqaskupkmevm','nnyoyhnqyfydqpno','lgzltbrrzeqqtydq','fgzxqurhtdfucheb','jvpthtudlsoivdwj','bmlhymalgvehvxys','fhklibetnvghlgnp','hfcyhptxzvblvlst','donanindroexgrha','oqawfmslbgjqimzx','jzgehjfjukizosep','bhlgamcjqijpvipb','jrcrdjrvsyxzidsk','ouwfwwjqezkofqck','wrvsbnkhyzayialf','knhivfqjxrxnafdl','hbxbgqsqwzijlngf','qlffukpfmnxpfiyq','evhxlouocemdkwgk','baxhdrmhaukpmatw','nwlyytsvreqaminp','ljsjjzmlsilvxgal','onunatwxfzwlmgpk','njgolfwndqnwdqde','ngdgcjzxupkzzbqi','ieawycvvmvftbikq','ccyvnexuvczvtrit','enndfwjpwjyasjvv','tcihprzwzftaioqu','bkztdkbrxfvfeddu','qkvhtltdrmryzdco','rurtxgibkeaibofs','mjxypgscrqiglzbp','unpkojewduprmymd','csqtkhjxpbzbnqog','mednhjgbwzlhmufi','sfrwfazygygzirwd','ijqeupbrhhpqxota','cmhpncanwudyysyh','wwcxbwzrplfzrwxd','jriomldifuobjpmq','radonyagpulnnyee','ryqjwxsspbbhnptd','yeoqpnsdhludlmzf','qsqlkeetyalenueh','qnnedenwsjdrcrzt','lejkuhsllxbhfcrx','anddbvllrrqefvke','wdtljquijaksvdsv','adslgvfuqqdkzvbc','whbccefjpcnjwhaq','kqrfuankaibohqsg','fyxisfwihvylgnfd','rwqdrddghyqudcif','syhzowthaaiiouaf','zjmrtgrnohxmtidu','deecwkfvjffxrzge','dztmvolqxkhdscxe','cdghcrgavygojhqn','pepqmdbjhnbugqeu','pnumdjpnddbxhieg','jzfhxeyahiagizfw','hdkwugrhcniueyor','gmgudeqlbmqynflu','toidiotdmfkxbzvm','pyymuoevoezlfkjb','etrbwuafvteqynlr','usvytbytsecnmqtd','dfmlizboawrhmvim','vrbtuxvzzefedlvs','vslcwudvasvxbnje','xdxyvoxaubtwjoif','mduhzhascirittdf','cqoqdhdxgvvvxamk','dshnfwhqjbhuznqr','zimthfxbdmkulkjg','luylgfmmwbptyzpj','iujpcgogshhotqrc','caqcyzqcumfljvsp','sprtitjlbfpygxya','fnconnrtnigkpykt','irmqaqzjexdtnaph','bbqrtoblmltvwome','ozjkzjfgnkhafbye','hwljjxpxziqbojlw','zahvyqyoqnqjlieb','dptshrgpbgusyqsc','uzlbnrwetkbkjnlm','yccaifzmvbvwxlcc','wilnbebdshcrrnuu','evxnoebteifbffuq','khbajekbyldddzfo','kjivdcafcyvnkojr','wtskbixasmakxxnv','uzmivodqzqupqkwx','rxexcbwhiywwwwnu','rowcapqaxjzcxwqi','fkeytjyipaxwcbqn','pyfbntonlrunkgvq','qiijveatlnplaifi','ltnhlialynlafknw','urrhfpxmpjwotvdn','xklumhfyehnqssys','civrvydypynjdoap','fvbmxnfogscbbnyd','oznavyflpzzucuvg','iyshrpypfbirahqo','qmzbfgelvpxvqecy','xkkxaufomsjbofmk','irlouftdmpitwvlq','csjoptbdorqxhnjg','bkryeshfsaqpdztm','guxbdqzfafsjoadl','tgrltexgrzatzwxf','cwsgsijqdanubxad','xafnexgturwrzyrg','apcrsqdbsbaxocxr','pspgxnzcevmvvejk','szephmeegvegugdt','ndjsoloeacasxjap','bdnfksliscnirjfu','ehglacmzpcgglpux','jwweijomqfcupvzw','yesblmmkqhbazmdu','sjsmalypmuslzgac','fkiqatyttlnuhdho','tlhnyuzdocvfdihq','ngehtjmycevnybga','obxodzcdgtrycgry','stkyrvdfbwovawmk','bdkhqcfrqaxhxloo','gpvumnuoiozipnrk','jbhanddinpqhxeol','hwkzkmbmsrvunzit','rfuomegkxbyamjpw','yzbljuksletipzwm','eafedkagwitzqigl','prenqvsbotqckgwy','spedpbwzphdrfxfz','cmsuqwemhwixkxet','xgdyeqbqfldvaccq','eooxgsrfsbdaolja','kyhqylxooewrhkho','mswieugqpoefmspt','uszoqundysdyeqlc','hkmjdggxefdyykbq','dtuhjnlaliodtlvh','oalbueqbhpxoxvvx','oowxtxsoqdwhzbya','lclajfsrpmtwvzkm','fxmjufpqtpyazeqo','ozlmreegxhfwwwmf','mqzrajxtxbaemrho','nfglecsyqduhakjr','nkxqtmasjjkpkqbp','jjfonbqimybvzeus','vjqkhkhjlmvpwkud','wxxhnvfhetsamzjr','pladhajujzttgmsw','dbycgxeymodsdlhm','qxszeuaahuoxjvwu','adultomodzrljxve','dmhgrbhvvpxyzwdn','slohrlwxerpahtyp','mngbocwyqrsrrxdb','facyrtflgowfvfui','hyvazpjucgghmmxh','twtrvjtncmewcxit','uejkrpvilgccfpfr','psqvolfagjfvqkum','nvzolslmiyavugpp','lpjfutvtwbddtqiu','fkjnfcdorlugmcha','eaplrvdckbcqqvhq','xrcydhkockycburw','iswmarpwcazimqxn','kicnnkjdppitjwrl','vwywaekzxtmeqrsu','dxlgesstmqaxtjta','pmeljgpkykcbujbb','vhpknqzhgnkyeosz','jprqitpjbxkqqzmz','fiprxgsqdfymyzdl','dzvfwvhfjqqsifga','aeakhfalplltmgui','frqrchzvenhozzsu','hsvikeyewfhsdbmy','puedjjhvxayiwgvg','zmsonnclfovjoewb','bnirelcaetdyaumi','szvudroxhcitatvf','sccfweuyadvrjpys','yiouqrnjzsdwyhwa','xyjhkqbnfmjjdefz','fjwgemkfvettucvg','aapqpwapzyjnusnr','dytxpkvgmapdamtc','hgocpfoxlheqpumw','twzuiewwxwadkegg','qdbosnhyqmyollqy','fclbrlkowkzzitod','sgxnrrpwhtkjdjth','xckvsnkvnvupmirv','nioicfeudrjzgoas','lcemtyohztpurwtf','oyjxhhbswvzekiqn','idkblbyjrohxybob','rthvloudwmktwlwh','oyzhmirzrnoytaty','ysdfhuyenpktwtks','wxfisawdtbpsmwli','vgmypwlezbmzeduk','rpepcfpelvhzzxzj','zxbovsmixfvmamnj','cpkabmaahbnlrhiz','jvomcbqeoqrmynjj','iqdeisnegnkrkdws','ilhemlrtxdsdnirr','fjimtscrwbfuwmpo','lmfiylebtzwtztmx','ddouhysvomrkcpgu','xtjwvzdhgnwwauwi','cntzuwcumbsebwyy','hieqvdlvnxkygeda','hushfszxskjdrjxi','xvdfzqblccfoxvyq','nldnrtieteunyxnb','vszpidfocenlhzqb','ofcuvtwhortxesoq','bwniqemqwxlejcfq','wkqiwdjnytjnomps','rbadoommlmrictte','nsmxhpothlulxivt','bvzbfcvenskqxejr','sdqeczmzpqqtqabq','bjveyzniaaliatkw','zxsqlntyjajjxytk','jkoxlerbtidsuepg','ewtlibdkeqwgxnqt','lmrshemwxrdwzrgc','nekcdyxmftlymfir','edaqvmulzkskzsfy','znmvqaupykjmyebx','ximtebuxwhqpzubd','rrlstppkknqyxlho','uyibwcitxixjfwcr','chrvoierkimesqmm','dltxmwhheldvxwqe','xfuthxjuuizanfjy','vtiwavmxwonpkpug','phchnujfnxewglht','owvmetdjcynohxtw','cbtujdrumixxatry','iirzildsfxipfipe','sqxcscqyofohotcy','sbubnekndkvovuqg','jzhsqqxqdrtibtcd','mscwasyvxkhlvwbn','bpafxtagbuxivbwz','uhvueesygaxrqffw','trrxlibhtmzuwkkl','yktkmkokmfslgkml','gfzzzdptaktytnqg','pgqmaiwzhplnbyhg','qjiptlkwfshunsfb','lewvlpescsyunxck','tywsfatykshogjas','qtrnwjjgxdektjgi','arypcritpwijczkn','jwxvngigbhfpiubf','upsjdctitlbqlnhf','lvpjlrpnmdjiscrq','jvzchdrsnkgpgsti','wuoesbwunpseyqzu','xuqspvoshgxmrnrb','icdawnmfnpnmyzof','hwcwtibgpvctznuo','bzdjrniddyamfloq','hffkxtzuazageruv','deixfxjvzbitalnc','zihsohukiqrgsnvw','nwoondfnlgowavkg','qnuulsywgnoillgn','koozejhfjyzuhviy','oetcoipohymhpump','cizwpfczfoodwuly','jghlinczhtaxifau','svjejifbidnvvdvy','rxmbsnaqhzcnbfcl','vveubmiecvdtrket','sbihpvrcnzjtgfep','iqbuljuxkwrlebvw','ptrhvxrpezqvmmvv','duwzugnhktpiybjw','lijafjnujfeflkva','coylvegferuuyfop','fowsjrgammrqkkof','pgmcruaioccmbrbz','osejwflxagwqtjoi','otqflckqgxzvtper','slwyntdcrncktoka','hzcdzsppcfkrblqg','jksdmmvtzkqaompg','galwwwgugetdohkg','zbghtjvuikmfjuef','dmqwcamjtlcofqib','zbczldlfdzemxeys','mdlqoklybhppdkwe','tuyajhkexrrrvnlb','ylfolaubymxmkowo','nnsyrfnoyrxswzxn','zkhunhhhigbsslfk','spbokzdfkbmflanz','zmzxvrwdhiegfely','imywhfczvmgahxwl','fnvabvxeiqvsarqq','yschramprctnputs','ubyjrgdzsvxzvouj','qnvdhpptympctfer','smipxcntyhjpowug','ouhjibgcmotegljy','zpflubaijjqqsptz','fgysnxrnfnxprdmf','pbpznrexzxomzfvj','thhzjresjpmnwtdv','sbmokolkhvbfqmua','sxxpdohxlezmqhhx','pevvsyqgoirixtqh','wdxrornmhqsbfznb','zjqziqbctxkshqcn','nbqcwpzfwfaahylk','bxbvkonpcxprxqjf','xplbpqcnwzwqxheb','prsakggmnjibrpoy','xoguxbpnrvyqarjl','ilrgryrmgwjvpzjy','efwrmokaoigjtrij','yhcncebopycjzuli','gwcmzbzaissohjgn','lggmemwbbjuijtcf','fkqedbfrluvkrwwl','jcbppekecevkwpuk','onvolrckkxeyzfjt','zzousprgrmllxboy','cajthmamvxuesujl','rmiozfsikufkntpg','lvekypkwjbpddkcv','dwaqzfnzcnabersa','pcdsskjopcqwhyis','uabepbrrnxfbpyvx','yxlgdomczciiunrk','ccerskfzctqxvrkz','edvmkntljlncwhax','xtcbwecdwygrvowo','axqgqjqkqwrgcqot','tyjrynolpzqwnjgj','thrtmlegdjsuofga','mpgoeqkzzqqugait','emuslxgoefdjyivl','klehpcehdznpssfb','xfgvugyrdxolixkc','acenyrbdwxywmwst','yqgperajsfsamgan','dbjxlnumrmhipquw','hsnhirmswcenewxm','qehqkbhmgucjjpwo','gprjdglsbtsfzqcw','wvqkyrkoratfmvfi','myhzlerupqbduqsl','couyazesiuhwwhht','scxzehubxhkfejrr','gqlitwfriqkmzqdd','pxtbmqelssoagxko','dzhklewjqzmrfzsw','yxgeypduywntnbji','kwzbgzhkzbgedlfh','vukmuyfstgmscuab','vcmaybfvdgwnasgt','qmybkqqdhjigzmum','cbnuicuncvczyalu','qdgpsdpdlgjasjqr','kdzxqqheurupejjo','mcatrxfchbqnxelm','badunwkeggdkcgco','ntaeanvcylpoqmxi','ghnyfytpzgvuokjn','ozepydixmjijdmts','qefcfwzdhwmcyfvp','ycyktmpaqgaxqsxt','edpizkxnsxeeebfl','uwciveajsxxwoqyr','rbvjkljpxtglqjsh','nbplrskduutrptfk','vewrbadvkseuloec','upaotnjxquomoflx','qfwxkinrousqywdd','mqzxvvskslbxvyjt','oxicszyiqifoyugx','bkitxwzjpabvhraj','ydrbyjecggynjpir','hezyteaublxxpamq','hxkuektnoovsehnd','cwtbbavnhlpiknza','qrwvkhbyasgfxwol','qryjbohkprfazczc','wjksnogpxracrbud','znmsxbhliqxhvesr','gkippedrjzmnnwkp','pklylwsnsyyxwcwg','osdpwbxoegwaiemr','kpslrrrljgtjiqka','vuqkloqucpyzfxgk','bvtdsisgvkuzghyl','qlcayluuyvlhdfyy','kbimqwnzanlygaya','nvoeanlcfhczijed','kqvcijcuobtdwvou','pmhdpcmxnprixitl','yueilssewzabzmij','zqxhafrvjyeyznyg','mhdounmxkvnnsekx','hnacyglnzicxjakg','iaxfdqibnrcjdlyl','iypoelspioegrwix','uiqouxzmlnjxnbqt','kslgjfmofraorvjo','bgvotsdqcdlpkynk','huwcgxhvrrbvmmth','vpqyfnkqqjacpffw','hpjgdfovgmrzvrcl','vbntbhbvdeszihzj','nrbyyuviwyildzuw','wckeoadqzsdnsbox','xgsobwuseofxsxox','anvhsxdshndembsd','iygmhbegrwqbqerg','ylrsnwtmdsrgsvlh','zvvejnrarsavahvc','yncxhmmdtxxeafby','kekgiglblctktnes','uoqgymsrlrwdruzc','saaoymtmnykusicw','bqvcworpqimwglcp','zbpgtheydoyzipjv','pkykzslwsjbhcvcj','jhwxxneyuuidrzvl','pafeyajcrlehmant','klszcvtmcdeyfsmj','ledsltggvrbvlefn','hubpbvxknepammep','gthxhaapfpgtilal','jtfhbozlometwztj','jrhshycyenurbpwb','fyaxbawrsievljqv','lgfcgbenlqxqcxsd','dhedabbwbdbpfmxp','mxzgwhaqobyvckcm','qboxojoykxvwexav','jcpzfjnmvguwjnum','ohpsxnspfwxkkuqe','nyekrqjlizztwjqp','thuynotacpxjzroj','wymbolrlwosnbxqx','iyaqihnqvewxdtjm','hdvdbtvfpdrejenu','gtjscincktlwwkkf','wtebigbaythklkbd']
day6 = ['toggle 461,550 through 564,900', 'off 370,39 through 425,839', 'off 464,858 through 833,915', 'off 812,389 through 865,874', 'on 599,989 through 806,993', 'on 376,415 through 768,548', 'on 606,361 through 892,600', 'off 448,208 through 645,684', 'toggle 50,472 through 452,788', 'toggle 205,417 through 703,826', 'toggle 533,331 through 906,873', 'toggle 857,493 through 989,970', 'off 631,950 through 894,975', 'off 387,19 through 720,700', 'off 511,843 through 581,945', 'toggle 514,557 through 662,883', 'off 269,809 through 876,847', 'off 149,517 through 716,777', 'off 994,939 through 998,988', 'toggle 467,662 through 555,957', 'on 952,417 through 954,845', 'on 565,226 through 944,880', 'on 214,319 through 805,722', 'toggle 532,276 through 636,847', 'toggle 619,80 through 689,507', 'on 390,706 through 884,722', 'toggle 17,634 through 537,766', 'toggle 706,440 through 834,441', 'toggle 318,207 through 499,530', 'toggle 698,185 through 830,343', 'toggle 566,679 through 744,716', 'toggle 347,482 through 959,482', 'toggle 39,799 through 981,872', 'on 583,543 through 846,710', 'off 367,664 through 595,872', 'on 805,439 through 964,995', 'toggle 209,584 through 513,802', 'off 106,497 through 266,770', 'on 975,2 through 984,623', 'off 316,684 through 369,876', 'off 30,309 through 259,554', 'off 399,680 through 861,942', 'toggle 227,740 through 850,829', 'on 386,603 through 552,879', 'off 703,795 through 791,963', 'off 573,803 through 996,878', 'off 993,939 through 997,951', 'on 809,221 through 869,723', 'off 38,720 through 682,751', 'off 318,732 through 720,976', 'toggle 88,459 through 392,654', 'off 865,654 through 911,956', 'toggle 264,284 through 857,956', 'off 281,776 through 610,797', 'toggle 492,660 through 647,910', 'off 879,703 through 925,981', 'off 772,414 through 974,518', 'on 694,41 through 755,96', 'on 452,406 through 885,881', 'off 107,905 through 497,910', 'off 647,222 through 910,532', 'on 679,40 through 845,358', 'off 144,205 through 556,362', 'on 871,804 through 962,878', 'on 545,676 through 545,929', 'off 316,716 through 413,941', 'toggle 488,826 through 755,971', 'toggle 957,832 through 976,992', 'toggle 857,770 through 905,964', 'toggle 319,198 through 787,673', 'on 832,813 through 863,844', 'on 818,296 through 818,681', 'on 71,699 through 91,960', 'off 838,578 through 967,928', 'toggle 440,856 through 507,942', 'toggle 121,970 through 151,974', 'toggle 391,192 through 659,751', 'on 78,210 through 681,419', 'on 324,591 through 593,939', 'toggle 159,366 through 249,760', 'off 617,167 through 954,601', 'toggle 484,607 through 733,657', 'on 587,96 through 888,819', 'off 680,984 through 941,991', 'on 800,512 through 968,691', 'off 123,588 through 853,603', 'on 1,862 through 507,912', 'on 699,839 through 973,878', 'off 848,89 through 887,893', 'toggle 344,353 through 462,403', 'on 780,731 through 841,760', 'toggle 693,973 through 847,984', 'toggle 989,936 through 996,958', 'toggle 168,475 through 206,963', 'on 742,683 through 769,845', 'toggle 768,116 through 987,396', 'on 190,364 through 617,526', 'off 470,266 through 530,839', 'toggle 122,497 through 969,645', 'off 492,432 through 827,790', 'on 505,636 through 957,820', 'on 295,476 through 698,958', 'toggle 63,298 through 202,396', 'on 157,315 through 412,939', 'off 69,789 through 134,837', 'off 678,335 through 896,541', 'toggle 140,516 through 842,668', 'off 697,585 through 712,668', 'toggle 507,832 through 578,949', 'on 678,279 through 886,621', 'toggle 449,744 through 826,910', 'off 835,354 through 921,741', 'toggle 924,878 through 985,952', 'on 666,503 through 922,905', 'on 947,453 through 961,587', 'toggle 525,190 through 795,654', 'off 62,320 through 896,362', 'on 21,458 through 972,536', 'on 446,429 through 821,970', 'toggle 376,423 through 805,455', 'toggle 494,896 through 715,937', 'on 583,270 through 667,482', 'off 183,468 through 280,548', 'toggle 623,289 through 750,524', 'on 836,706 through 967,768', 'on 419,569 through 912,908', 'on 428,260 through 660,433', 'off 683,627 through 916,816', 'on 447,973 through 866,980', 'on 688,607 through 938,990', 'on 245,187 through 597,405', 'off 558,843 through 841,942', 'off 325,666 through 713,834', 'toggle 672,606 through 814,935', 'off 161,812 through 490,954', 'on 950,362 through 985,898', 'on 143,22 through 205,821', 'on 89,762 through 607,790', 'toggle 234,245 through 827,303', 'on 65,599 through 764,997', 'on 232,466 through 965,695', 'on 739,122 through 975,590', 'off 206,112 through 940,558', 'toggle 690,365 through 988,552', 'on 907,438 through 977,691', 'off 838,809 through 944,869', 'on 222,12 through 541,832', 'toggle 337,66 through 669,812', 'on 732,821 through 897,912', 'toggle 182,862 through 638,996', 'on 955,808 through 983,847', 'toggle 346,227 through 841,696', 'on 983,270 through 989,756', 'off 874,849 through 876,905', 'off 7,760 through 678,795', 'toggle 973,977 through 995,983', 'off 911,961 through 914,976', 'on 913,557 through 952,722', 'off 607,933 through 939,999', 'on 226,604 through 517,622', 'off 3,564 through 344,842', 'toggle 340,578 through 428,610', 'on 248,916 through 687,925', 'toggle 650,185 through 955,965', 'toggle 831,359 through 933,536', 'off 544,614 through 896,953', 'toggle 648,939 through 975,997', 'on 464,269 through 710,521', 'off 643,149 through 791,320', 'off 875,549 through 972,643', 'off 953,969 through 971,972', 'off 236,474 through 772,591', 'toggle 313,212 through 489,723', 'toggle 896,829 through 897,837', 'toggle 544,449 through 995,905', 'off 278,645 through 977,876', 'off 887,947 through 946,977', 'on 342,861 through 725,935', 'on 636,316 through 692,513', 'toggle 857,470 through 950,528', 'off 736,196 through 826,889', 'on 17,878 through 850,987', 'on 142,968 through 169,987', 'on 46,470 through 912,853', 'on 182,252 through 279,941', 'toggle 261,143 through 969,657', 'off 69,600 through 518,710', 'on 372,379 through 779,386', 'toggle 867,391 through 911,601', 'off 174,287 through 900,536', 'toggle 951,842 through 993,963', 'off 626,733 through 985,827', 'toggle 622,70 through 666,291', 'off 980,671 through 985,835', 'off 477,63 through 910,72', 'off 779,39 through 940,142', 'on 986,570 through 997,638', 'toggle 842,805 through 943,985', 'off 890,886 through 976,927', 'off 893,172 through 897,619', 'off 198,780 through 835,826', 'toggle 202,209 through 219,291', 'off 193,52 through 833,283', 'toggle 414,427 through 987,972', 'on 375,231 through 668,236', 'off 646,598 through 869,663', 'toggle 271,462 through 414,650', 'off 679,121 through 845,467', 'toggle 76,847 through 504,904', 'off 15,617 through 509,810', 'toggle 248,105 through 312,451', 'off 126,546 through 922,879', 'on 531,831 through 903,872', 'toggle 602,431 through 892,792', 'off 795,223 through 892,623', 'toggle 167,721 through 533,929', 'toggle 813,251 through 998,484', 'toggle 64,640 through 752,942', 'on 155,955 through 892,985', 'on 251,329 through 996,497', 'off 341,716 through 462,994', 'toggle 760,127 through 829,189', 'on 86,413 through 408,518', 'toggle 340,102 through 918,558', 'off 441,642 through 751,889', 'on 785,292 through 845,325', 'off 123,389 through 725,828', 'on 905,73 through 983,270', 'off 807,86 through 879,276', 'toggle 500,866 through 864,916', 'on 809,366 through 828,534', 'toggle 219,356 through 720,617', 'off 320,964 through 769,990', 'off 903,167 through 936,631', 'toggle 300,137 through 333,693', 'toggle 5,675 through 755,848', 'off 852,235 through 946,783', 'toggle 355,556 through 941,664', 'on 810,830 through 867,891', 'off 509,869 through 667,903', 'toggle 769,400 through 873,892', 'on 553,614 through 810,729', 'on 179,873 through 589,962', 'off 466,866 through 768,926', 'toggle 143,943 through 465,984', 'toggle 182,380 through 569,552', 'off 735,808 through 917,910', 'on 731,802 through 910,847', 'off 522,74 through 731,485', 'on 444,127 through 566,996', 'off 232,962 through 893,979', 'off 231,492 through 790,976', 'on 874,567 through 943,684', 'toggle 911,840 through 990,932', 'toggle 547,895 through 667,935', 'off 93,294 through 648,636', 'off 190,902 through 532,970', 'off 451,530 through 704,613', 'toggle 936,774 through 937,775', 'off 116,843 through 533,934', 'on 950,906 through 986,993', 'on 910,51 through 945,989', 'on 986,498 through 994,945', 'off 125,324 through 433,704', 'off 60,313 through 75,728', 'on 899,494 through 940,947', 'toggle 832,316 through 971,817', 'toggle 994,983 through 998,984', 'toggle 23,353 through 917,845', 'toggle 174,799 through 658,859', 'off 490,878 through 534,887', 'off 623,963 through 917,975', 'toggle 721,333 through 816,975', 'toggle 589,687 through 890,921', 'on 936,388 through 948,560', 'off 485,17 through 655,610', 'on 435,158 through 689,495', 'on 192,934 through 734,936', 'off 299,723 through 622,847', 'toggle 484,160 through 812,942', 'off 245,754 through 818,851', 'on 298,419 through 824,634', 'toggle 868,687 through 969,760', 'toggle 131,250 through 685,426', 'off 201,954 through 997,983', 'on 353,910 through 832,961', 'off 518,781 through 645,875', 'off 866,97 through 924,784', 'toggle 836,599 through 857,767', 'on 80,957 through 776,968', 'toggle 277,130 through 513,244', 'off 62,266 through 854,434', 'on 792,764 through 872,842', 'off 160,949 through 273,989', 'off 664,203 through 694,754', 'toggle 491,615 through 998,836', 'off 210,146 through 221,482', 'off 209,780 through 572,894', 'on 766,112 through 792,868', 'on 222,12 through 856,241']
d6test =['toggle 461,550 through 564,900', 'off 370,39 through 425,839', 'off 464,858 through 833,915']
day7 = ['lf AND lq -> ls', 'iu RSHIFT 1 -> jn', 'bo OR bu -> bv', 'gj RSHIFT 1 -> hc', 'et RSHIFT 2 -> eu', 'bv AND bx -> by', 'is OR it -> iu', 'b OR n -> o', 'gf OR ge -> gg', 'NOT kt -> ku', 'ea AND eb -> ed', 'kl OR kr -> ks', 'hi AND hk -> hl', 'au AND av -> ax', 'lf RSHIFT 2 -> lg', 'dd RSHIFT 3 -> df', 'eu AND fa -> fc', 'df AND dg -> di', 'ip LSHIFT 15 -> it', 'NOT el -> em', 'et OR fe -> ff', 'fj LSHIFT 15 -> fn', 't OR s -> u', 'ly OR lz -> ma', 'ko AND kq -> kr', 'NOT fx -> fy', 'et RSHIFT 1 -> fm', 'eu OR fa -> fb', 'dd RSHIFT 2 -> de', 'NOT go -> gp', 'kb AND kd -> ke', 'hg OR hh -> hi', 'jm LSHIFT 1 -> kg', 'NOT cn -> co', 'jp RSHIFT 2 -> jq', 'jp RSHIFT 5 -> js', '1 AND io -> ip', 'eo LSHIFT 15 -> es', '1 AND jj -> jk', 'g AND i -> j', 'ci RSHIFT 3 -> ck', 'gn AND gp -> gq', 'fs AND fu -> fv', 'lj AND ll -> lm', 'jk LSHIFT 15 -> jo', 'iu RSHIFT 3 -> iw', 'NOT ii -> ij', '1 AND cc -> cd', 'bn RSHIFT 3 -> bp', 'NOT gw -> gx', 'NOT ft -> fu', 'jn OR jo -> jp', 'iv OR jb -> jc', 'hv OR hu -> hw', '19138 -> b', 'gj RSHIFT 5 -> gm', 'hq AND hs -> ht', 'dy RSHIFT 1 -> er', 'ao OR an -> ap', 'ld OR le -> lf', 'bk LSHIFT 1 -> ce', 'bz AND cb -> cc', 'bi LSHIFT 15 -> bm', 'il AND in -> io', 'af AND ah -> ai', 'as RSHIFT 1 -> bl', 'lf RSHIFT 3 -> lh', 'er OR es -> et', 'NOT ax -> ay', 'ci RSHIFT 1 -> db', 'et AND fe -> fg', 'lg OR lm -> ln', 'k AND m -> n', 'hz RSHIFT 2 -> ia', 'kh LSHIFT 1 -> lb', 'NOT ey -> ez', 'NOT di -> dj', 'dz OR ef -> eg', 'lx -> a', 'NOT iz -> ja', 'gz LSHIFT 15 -> hd', 'ce OR cd -> cf', 'fq AND fr -> ft', 'at AND az -> bb', 'ha OR gz -> hb', 'fp AND fv -> fx', 'NOT gb -> gc', 'ia AND ig -> ii', 'gl OR gm -> gn', '0 -> c', 'NOT ca -> cb', 'bn RSHIFT 1 -> cg', 'c LSHIFT 1 -> t', 'iw OR ix -> iy', 'kg OR kf -> kh', 'dy OR ej -> ek', 'km AND kn -> kp', 'NOT fc -> fd', 'hz RSHIFT 3 -> ib', 'NOT dq -> dr', 'NOT fg -> fh', 'dy RSHIFT 2 -> dz', 'kk RSHIFT 2 -> kl', '1 AND fi -> fj', 'NOT hr -> hs', 'jp RSHIFT 1 -> ki', 'bl OR bm -> bn', '1 AND gy -> gz', 'gr AND gt -> gu', 'db OR dc -> dd', 'de OR dk -> dl', 'as RSHIFT 5 -> av', 'lf RSHIFT 5 -> li', 'hm AND ho -> hp', 'cg OR ch -> ci', 'gj AND gu -> gw', 'ge LSHIFT 15 -> gi', 'e OR f -> g', 'fp OR fv -> fw', 'fb AND fd -> fe', 'cd LSHIFT 15 -> ch', 'b RSHIFT 1 -> v', 'at OR az -> ba', 'bn RSHIFT 2 -> bo', 'lh AND li -> lk', 'dl AND dn -> do', 'eg AND ei -> ej', 'ex AND ez -> fa', 'NOT kp -> kq', 'NOT lk -> ll', 'x AND ai -> ak', 'jp OR ka -> kb', 'NOT jd -> je', 'iy AND ja -> jb', 'jp RSHIFT 3 -> jr', 'fo OR fz -> ga', 'df OR dg -> dh', 'gj RSHIFT 2 -> gk', 'gj OR gu -> gv', 'NOT jh -> ji', 'ap LSHIFT 1 -> bj', 'NOT ls -> lt', 'ir LSHIFT 1 -> jl', 'bn AND by -> ca', 'lv LSHIFT 15 -> lz', 'ba AND bc -> bd', 'cy LSHIFT 15 -> dc', 'ln AND lp -> lq', 'x RSHIFT 1 -> aq', 'gk OR gq -> gr', 'NOT kx -> ky', 'jg AND ji -> jj', 'bn OR by -> bz', 'fl LSHIFT 1 -> gf', 'bp OR bq -> br', 'he OR hp -> hq', 'et RSHIFT 5 -> ew', 'iu RSHIFT 2 -> iv', 'gl AND gm -> go', 'x OR ai -> aj', 'hc OR hd -> he', 'lg AND lm -> lo', 'lh OR li -> lj', 'da LSHIFT 1 -> du', 'fo RSHIFT 2 -> fp', 'gk AND gq -> gs', 'bj OR bi -> bk', 'lf OR lq -> lr', 'cj AND cp -> cr', 'hu LSHIFT 15 -> hy', '1 AND bh -> bi', 'fo RSHIFT 3 -> fq', 'NOT lo -> lp', 'hw LSHIFT 1 -> iq', 'dd RSHIFT 1 -> dw', 'dt LSHIFT 15 -> dx', 'dy AND ej -> el', 'an LSHIFT 15 -> ar', 'aq OR ar -> as', '1 AND r -> s', 'fw AND fy -> fz', 'NOT im -> in', 'et RSHIFT 3 -> ev', '1 AND ds -> dt', 'ec AND ee -> ef', 'NOT ak -> al', 'jl OR jk -> jm', '1 AND en -> eo', 'lb OR la -> lc', 'iu AND jf -> jh', 'iu RSHIFT 5 -> ix', 'bo AND bu -> bw', 'cz OR cy -> da', 'iv AND jb -> jd', 'iw AND ix -> iz', 'lf RSHIFT 1 -> ly', 'iu OR jf -> jg', 'NOT dm -> dn', 'lw OR lv -> lx', 'gg LSHIFT 1 -> ha', 'lr AND lt -> lu', 'fm OR fn -> fo', 'he RSHIFT 3 -> hg', 'aj AND al -> am', '1 AND kz -> la', 'dy RSHIFT 5 -> eb', 'jc AND je -> jf', 'cm AND co -> cp', 'gv AND gx -> gy', 'ev OR ew -> ex', 'jp AND ka -> kc', 'fk OR fj -> fl', 'dy RSHIFT 3 -> ea', 'NOT bs -> bt', 'NOT ag -> ah', 'dz AND ef -> eh', 'cf LSHIFT 1 -> cz', 'NOT cv -> cw', '1 AND cx -> cy', 'de AND dk -> dm', 'ck AND cl -> cn', 'x RSHIFT 5 -> aa', 'dv LSHIFT 1 -> ep', 'he RSHIFT 2 -> hf', 'NOT bw -> bx', 'ck OR cl -> cm', 'bp AND bq -> bs', 'as OR bd -> be', 'he AND hp -> hr', 'ev AND ew -> ey', '1 AND lu -> lv', 'kk RSHIFT 3 -> km', 'b AND n -> p', 'NOT kc -> kd', 'lc LSHIFT 1 -> lw', 'km OR kn -> ko', 'id AND if -> ig', 'ih AND ij -> ik', 'jr AND js -> ju', 'ci RSHIFT 5 -> cl', 'hz RSHIFT 1 -> is', '1 AND ke -> kf', 'NOT gs -> gt', 'aw AND ay -> az', 'x RSHIFT 2 -> y', 'ab AND ad -> ae', 'ff AND fh -> fi', 'ci AND ct -> cv', 'eq LSHIFT 1 -> fk', 'gj RSHIFT 3 -> gl', 'u LSHIFT 1 -> ao', 'NOT bb -> bc', 'NOT hj -> hk', 'kw AND ky -> kz', 'as AND bd -> bf', 'dw OR dx -> dy', 'br AND bt -> bu', 'kk AND kv -> kx', 'ep OR eo -> eq', 'he RSHIFT 1 -> hx', 'ki OR kj -> kk', 'NOT ju -> jv', 'ek AND em -> en', 'kk RSHIFT 5 -> kn', 'NOT eh -> ei', 'hx OR hy -> hz', 'ea OR eb -> ec', 's LSHIFT 15 -> w', 'fo RSHIFT 1 -> gh', 'kk OR kv -> kw', 'bn RSHIFT 5 -> bq', 'NOT ed -> ee', '1 AND ht -> hu', 'cu AND cw -> cx', 'b RSHIFT 5 -> f', 'kl AND kr -> kt', 'iq OR ip -> ir', 'ci RSHIFT 2 -> cj', 'cj OR cp -> cq', 'o AND q -> r', 'dd RSHIFT 5 -> dg', 'b RSHIFT 2 -> d', 'ks AND ku -> kv', 'b RSHIFT 3 -> e', 'd OR j -> k', 'NOT p -> q', 'NOT cr -> cs', 'du OR dt -> dv', 'kf LSHIFT 15 -> kj', 'NOT ac -> ad', 'fo RSHIFT 5 -> fr', 'hz OR ik -> il', 'jx AND jz -> ka', 'gh OR gi -> gj', 'kk RSHIFT 1 -> ld', 'hz RSHIFT 5 -> ic', 'as RSHIFT 2 -> at', 'NOT jy -> jz', '1 AND am -> an', 'ci OR ct -> cu', 'hg AND hh -> hj', 'jq OR jw -> jx', 'v OR w -> x', 'la LSHIFT 15 -> le', 'dh AND dj -> dk', 'dp AND dr -> ds', 'jq AND jw -> jy', 'au OR av -> aw', 'NOT bf -> bg', 'z OR aa -> ab', 'ga AND gc -> gd', 'hz AND ik -> im', 'jt AND jv -> jw', 'z AND aa -> ac', 'jr OR js -> jt', 'hb LSHIFT 1 -> hv', 'hf OR hl -> hm', 'ib OR ic -> id', 'fq OR fr -> fs', 'cq AND cs -> ct', 'ia OR ig -> ih', 'dd OR do -> dp', 'd AND j -> l', 'ib AND ic -> ie', 'as RSHIFT 3 -> au', 'be AND bg -> bh', 'dd AND do -> dq', 'NOT l -> m', '1 AND gd -> ge', 'y AND ae -> ag', 'fo AND fz -> gb', 'NOT ie -> if', 'e AND f -> h', 'x RSHIFT 3 -> z', 'y OR ae -> af', 'hf AND hl -> hn', 'NOT h -> i', 'NOT hn -> ho', 'he RSHIFT 5 -> hh']
day7b = ['lf AND lq -> ls', 'iu RSHIFT 1 -> jn', 'bo OR bu -> bv', 'gj RSHIFT 1 -> hc', 'et RSHIFT 2 -> eu', 'bv AND bx -> by', 'is OR it -> iu', 'b OR n -> o', 'gf OR ge -> gg', 'NOT kt -> ku', 'ea AND eb -> ed', 'kl OR kr -> ks', 'hi AND hk -> hl', 'au AND av -> ax', 'lf RSHIFT 2 -> lg', 'dd RSHIFT 3 -> df', 'eu AND fa -> fc', 'df AND dg -> di', 'ip LSHIFT 15 -> it', 'NOT el -> em', 'et OR fe -> ff', 'fj LSHIFT 15 -> fn', 't OR s -> u', 'ly OR lz -> ma', 'ko AND kq -> kr', 'NOT fx -> fy', 'et RSHIFT 1 -> fm', 'eu OR fa -> fb', 'dd RSHIFT 2 -> de', 'NOT go -> gp', 'kb AND kd -> ke', 'hg OR hh -> hi', 'jm LSHIFT 1 -> kg', 'NOT cn -> co', 'jp RSHIFT 2 -> jq', 'jp RSHIFT 5 -> js', '1 AND io -> ip', 'eo LSHIFT 15 -> es', '1 AND jj -> jk', 'g AND i -> j', 'ci RSHIFT 3 -> ck', 'gn AND gp -> gq', 'fs AND fu -> fv', 'lj AND ll -> lm', 'jk LSHIFT 15 -> jo', 'iu RSHIFT 3 -> iw', 'NOT ii -> ij', '1 AND cc -> cd', 'bn RSHIFT 3 -> bp', 'NOT gw -> gx', 'NOT ft -> fu', 'jn OR jo -> jp', 'iv OR jb -> jc', 'hv OR hu -> hw', '16076 -> b', 'gj RSHIFT 5 -> gm', 'hq AND hs -> ht', 'dy RSHIFT 1 -> er', 'ao OR an -> ap', 'ld OR le -> lf', 'bk LSHIFT 1 -> ce', 'bz AND cb -> cc', 'bi LSHIFT 15 -> bm', 'il AND in -> io', 'af AND ah -> ai', 'as RSHIFT 1 -> bl', 'lf RSHIFT 3 -> lh', 'er OR es -> et', 'NOT ax -> ay', 'ci RSHIFT 1 -> db', 'et AND fe -> fg', 'lg OR lm -> ln', 'k AND m -> n', 'hz RSHIFT 2 -> ia', 'kh LSHIFT 1 -> lb', 'NOT ey -> ez', 'NOT di -> dj', 'dz OR ef -> eg', 'lx -> a', 'NOT iz -> ja', 'gz LSHIFT 15 -> hd', 'ce OR cd -> cf', 'fq AND fr -> ft', 'at AND az -> bb', 'ha OR gz -> hb', 'fp AND fv -> fx', 'NOT gb -> gc', 'ia AND ig -> ii', 'gl OR gm -> gn', '0 -> c', 'NOT ca -> cb', 'bn RSHIFT 1 -> cg', 'c LSHIFT 1 -> t', 'iw OR ix -> iy', 'kg OR kf -> kh', 'dy OR ej -> ek', 'km AND kn -> kp', 'NOT fc -> fd', 'hz RSHIFT 3 -> ib', 'NOT dq -> dr', 'NOT fg -> fh', 'dy RSHIFT 2 -> dz', 'kk RSHIFT 2 -> kl', '1 AND fi -> fj', 'NOT hr -> hs', 'jp RSHIFT 1 -> ki', 'bl OR bm -> bn', '1 AND gy -> gz', 'gr AND gt -> gu', 'db OR dc -> dd', 'de OR dk -> dl', 'as RSHIFT 5 -> av', 'lf RSHIFT 5 -> li', 'hm AND ho -> hp', 'cg OR ch -> ci', 'gj AND gu -> gw', 'ge LSHIFT 15 -> gi', 'e OR f -> g', 'fp OR fv -> fw', 'fb AND fd -> fe', 'cd LSHIFT 15 -> ch', 'b RSHIFT 1 -> v', 'at OR az -> ba', 'bn RSHIFT 2 -> bo', 'lh AND li -> lk', 'dl AND dn -> do', 'eg AND ei -> ej', 'ex AND ez -> fa', 'NOT kp -> kq', 'NOT lk -> ll', 'x AND ai -> ak', 'jp OR ka -> kb', 'NOT jd -> je', 'iy AND ja -> jb', 'jp RSHIFT 3 -> jr', 'fo OR fz -> ga', 'df OR dg -> dh', 'gj RSHIFT 2 -> gk', 'gj OR gu -> gv', 'NOT jh -> ji', 'ap LSHIFT 1 -> bj', 'NOT ls -> lt', 'ir LSHIFT 1 -> jl', 'bn AND by -> ca', 'lv LSHIFT 15 -> lz', 'ba AND bc -> bd', 'cy LSHIFT 15 -> dc', 'ln AND lp -> lq', 'x RSHIFT 1 -> aq', 'gk OR gq -> gr', 'NOT kx -> ky', 'jg AND ji -> jj', 'bn OR by -> bz', 'fl LSHIFT 1 -> gf', 'bp OR bq -> br', 'he OR hp -> hq', 'et RSHIFT 5 -> ew', 'iu RSHIFT 2 -> iv', 'gl AND gm -> go', 'x OR ai -> aj', 'hc OR hd -> he', 'lg AND lm -> lo', 'lh OR li -> lj', 'da LSHIFT 1 -> du', 'fo RSHIFT 2 -> fp', 'gk AND gq -> gs', 'bj OR bi -> bk', 'lf OR lq -> lr', 'cj AND cp -> cr', 'hu LSHIFT 15 -> hy', '1 AND bh -> bi', 'fo RSHIFT 3 -> fq', 'NOT lo -> lp', 'hw LSHIFT 1 -> iq', 'dd RSHIFT 1 -> dw', 'dt LSHIFT 15 -> dx', 'dy AND ej -> el', 'an LSHIFT 15 -> ar', 'aq OR ar -> as', '1 AND r -> s', 'fw AND fy -> fz', 'NOT im -> in', 'et RSHIFT 3 -> ev', '1 AND ds -> dt', 'ec AND ee -> ef', 'NOT ak -> al', 'jl OR jk -> jm', '1 AND en -> eo', 'lb OR la -> lc', 'iu AND jf -> jh', 'iu RSHIFT 5 -> ix', 'bo AND bu -> bw', 'cz OR cy -> da', 'iv AND jb -> jd', 'iw AND ix -> iz', 'lf RSHIFT 1 -> ly', 'iu OR jf -> jg', 'NOT dm -> dn', 'lw OR lv -> lx', 'gg LSHIFT 1 -> ha', 'lr AND lt -> lu', 'fm OR fn -> fo', 'he RSHIFT 3 -> hg', 'aj AND al -> am', '1 AND kz -> la', 'dy RSHIFT 5 -> eb', 'jc AND je -> jf', 'cm AND co -> cp', 'gv AND gx -> gy', 'ev OR ew -> ex', 'jp AND ka -> kc', 'fk OR fj -> fl', 'dy RSHIFT 3 -> ea', 'NOT bs -> bt', 'NOT ag -> ah', 'dz AND ef -> eh', 'cf LSHIFT 1 -> cz', 'NOT cv -> cw', '1 AND cx -> cy', 'de AND dk -> dm', 'ck AND cl -> cn', 'x RSHIFT 5 -> aa', 'dv LSHIFT 1 -> ep', 'he RSHIFT 2 -> hf', 'NOT bw -> bx', 'ck OR cl -> cm', 'bp AND bq -> bs', 'as OR bd -> be', 'he AND hp -> hr', 'ev AND ew -> ey', '1 AND lu -> lv', 'kk RSHIFT 3 -> km', 'b AND n -> p', 'NOT kc -> kd', 'lc LSHIFT 1 -> lw', 'km OR kn -> ko', 'id AND if -> ig', 'ih AND ij -> ik', 'jr AND js -> ju', 'ci RSHIFT 5 -> cl', 'hz RSHIFT 1 -> is', '1 AND ke -> kf', 'NOT gs -> gt', 'aw AND ay -> az', 'x RSHIFT 2 -> y', 'ab AND ad -> ae', 'ff AND fh -> fi', 'ci AND ct -> cv', 'eq LSHIFT 1 -> fk', 'gj RSHIFT 3 -> gl', 'u LSHIFT 1 -> ao', 'NOT bb -> bc', 'NOT hj -> hk', 'kw AND ky -> kz', 'as AND bd -> bf', 'dw OR dx -> dy', 'br AND bt -> bu', 'kk AND kv -> kx', 'ep OR eo -> eq', 'he RSHIFT 1 -> hx', 'ki OR kj -> kk', 'NOT ju -> jv', 'ek AND em -> en', 'kk RSHIFT 5 -> kn', 'NOT eh -> ei', 'hx OR hy -> hz', 'ea OR eb -> ec', 's LSHIFT 15 -> w', 'fo RSHIFT 1 -> gh', 'kk OR kv -> kw', 'bn RSHIFT 5 -> bq', 'NOT ed -> ee', '1 AND ht -> hu', 'cu AND cw -> cx', 'b RSHIFT 5 -> f', 'kl AND kr -> kt', 'iq OR ip -> ir', 'ci RSHIFT 2 -> cj', 'cj OR cp -> cq', 'o AND q -> r', 'dd RSHIFT 5 -> dg', 'b RSHIFT 2 -> d', 'ks AND ku -> kv', 'b RSHIFT 3 -> e', 'd OR j -> k', 'NOT p -> q', 'NOT cr -> cs', 'du OR dt -> dv', 'kf LSHIFT 15 -> kj', 'NOT ac -> ad', 'fo RSHIFT 5 -> fr', 'hz OR ik -> il', 'jx AND jz -> ka', 'gh OR gi -> gj', 'kk RSHIFT 1 -> ld', 'hz RSHIFT 5 -> ic', 'as RSHIFT 2 -> at', 'NOT jy -> jz', '1 AND am -> an', 'ci OR ct -> cu', 'hg AND hh -> hj', 'jq OR jw -> jx', 'v OR w -> x', 'la LSHIFT 15 -> le', 'dh AND dj -> dk', 'dp AND dr -> ds', 'jq AND jw -> jy', 'au OR av -> aw', 'NOT bf -> bg', 'z OR aa -> ab', 'ga AND gc -> gd', 'hz AND ik -> im', 'jt AND jv -> jw', 'z AND aa -> ac', 'jr OR js -> jt', 'hb LSHIFT 1 -> hv', 'hf OR hl -> hm', 'ib OR ic -> id', 'fq OR fr -> fs', 'cq AND cs -> ct', 'ia OR ig -> ih', 'dd OR do -> dp', 'd AND j -> l', 'ib AND ic -> ie', 'as RSHIFT 3 -> au', 'be AND bg -> bh', 'dd AND do -> dq', 'NOT l -> m', '1 AND gd -> ge', 'y AND ae -> ag', 'fo AND fz -> gb', 'NOT ie -> if', 'e AND f -> h', 'x RSHIFT 3 -> z', 'y OR ae -> af', 'hf AND hl -> hn', 'NOT h -> i', 'NOT hn -> ho', 'he RSHIFT 5 -> hh']
d7test= ['123 -> x', '456 -> y', 'x AND y -> d', 'x OR y -> e', 'x LSHIFT 2 -> f', 'y RSHIFT 2 -> g', 'NOT x -> h', 'NOT y -> i']
day9 = ['Tristram to AlphaCentauri = 34', 'Tristram to Snowdin = 100', 'Tristram to Tambi = 63', 'Tristram to Faerun = 108', 'Tristram to Norrath = 111', 'Tristram to Straylight = 89', 'Tristram to Arbre = 132', 'AlphaCentauri to Snowdin = 4', 'AlphaCentauri to Tambi = 79', 'AlphaCentauri to Faerun = 44', 'AlphaCentauri to Norrath = 147', 'AlphaCentauri to Straylight = 133', 'AlphaCentauri to Arbre = 74', 'Snowdin to Tambi = 105', 'Snowdin to Faerun = 95', 'Snowdin to Norrath = 48', 'Snowdin to Straylight = 88', 'Snowdin to Arbre = 7', 'Tambi to Faerun = 68', 'Tambi to Norrath = 134', 'Tambi to Straylight = 107', 'Tambi to Arbre = 40', 'Faerun to Norrath = 11', 'Faerun to Straylight = 66', 'Faerun to Arbre = 144', 'Norrath to Straylight = 115', 'Norrath to Arbre = 135', 'Straylight to Arbre = 127']
d9test = ['London to Dublin = 464', 'London to Belfast = 518','Dublin to Belfast = 141']
graph = ['a to b = 1', 'a to c = 10', 'b to c = 1', 'b to d = 1']
day10 = '1321131112'
day11 = 'hepxcrrq'
day12 = '{"e":{"a":{"e":-39,"c":119,"a":{"c":65,"a":"orange","b":"green","d":"orange"},"g":"violet","b":{"e":6,"c":{"c":"violet","a":8,"b":["red",{"a":37},"green",84,"yellow","green",[24,45,"blue","blue",56,"yellow"],"orange"]},"a":"violet","b":{"a":85},"d":[109,66,["yellow","violet",21,-30],"violet","blue",-43,{"e":"violet","c":"red","a":"blue","b":-22,"d":[71,"red",30,"violet","red",26,120],"f":["red"]},"red"]},"d":{"e":"violet","a":"blue","d":"blue","c":"blue","h":"orange","b":{"e":"red","a":{"c":115,"a":137,"b":"green"},"d":-25,"c":"blue","h":{"a":161,"b":["yellow",56,129,-31,"yellow","red","green",105,"orange",130]},"b":142,"g":194,"f":122,"i":-16},"g":173,"f":["orange","green",54,-9],"i":-23},"f":{"c":110,"a":"yellow","b":[{"a":155},156,"violet",94,"yellow"],"d":{"e":91,"a":-18,"d":"red","c":["green","orange","orange",190,"yellow",158,"blue","orange","blue",4],"h":143,"b":"orange","g":145,"f":["orange",37,"yellow",-22,{"c":30,"a":78,"b":196,"d":84},-7,["yellow"]]}}},"b":[[{"c":0,"a":108,"b":"green","d":{"e":59,"c":119,"a":104,"b":167,"d":"blue"}},[189,"blue",121,[["green","orange","orange",-17,192,"red"],{"a":"violet"},"green",{"c":42,"a":"blue","b":"red"},{"e":78,"a":"blue","d":"violet","c":-9,"h":"violet","b":115,"g":"orange","f":"violet","i":"red"}],57,"violet"],"green"],[["blue",[1,53,"orange"],{"e":["green",-12,"blue","orange","green",136,173],"a":"violet","d":-43,"c":{"e":144,"c":133,"a":"yellow","g":154,"b":"orange","d":127,"f":194},"h":{"e":52,"a":-43,"d":"orange","c":-45,"h":"orange","b":150,"g":-12,"f":91,"i":6},"b":{"e":"yellow","c":"blue","a":"violet","g":112,"b":174,"d":"violet","f":90},"g":177,"f":"blue"},"red","violet",96],"green","violet",[{"a":["red","red",46,"red"],"b":["green",193,54,"orange"]},["orange",8,1,["violet",84,"violet"],155,"yellow",151,"blue",196],"yellow","red",{"a":["green","orange","green",61,"blue",39,-2,46,"red",54]},"violet",128]]]},"a":{"e":[{"e":["yellow"],"c":93,"a":"violet","b":{"a":{"a":"yellow","b":"blue"},"b":-4},"d":"violet"},171,103,[13,"orange",[[51,"violet","yellow",{"c":85,"a":103,"b":"green"},97,{"e":"orange","a":-11,"d":62,"j":"yellow","c":"orange","h":47,"b":83,"g":119,"f":180,"i":136},{"a":177},80],{"e":{"c":"yellow","a":"orange","b":3,"d":197},"a":130,"d":"red","j":"red","c":-44,"h":-15,"b":64,"g":125,"f":82,"i":"green"}],{"e":["orange",42,["orange",197,"violet","yellow","blue",11,"yellow"],189,"yellow","blue","green","violet"],"a":{"a":149,"b":69},"d":128,"c":[["green",150,45,86,"red",-8,41,"orange","blue"]],"h":[[-4,127,"yellow","violet",124,112,196,"violet",161,40],37,0,"orange",-30,-43,[-24,"orange",142,"violet","red"],"blue",66],"b":{"c":"violet","a":["yellow",91,182,20,"orange",159,46,55,141],"b":{"c":173,"a":-40,"b":"green","d":"violet"},"d":[67,80,27,-15]},"g":"red","f":{"c":"orange","a":99,"b":"green"}},{"e":{"c":40,"a":"orange","b":"green"},"c":"green","a":-44,"b":{"e":"blue","c":56,"a":"yellow","g":62,"b":188,"d":141,"f":-21},"d":"yellow","f":{"e":"yellow","c":67,"a":33,"g":"yellow","b":"yellow","d":51,"f":195}}],["orange","violet",["red"],["green",35,[170,-30,"orange",140,"green","violet","violet",["orange","yellow","yellow",35,"blue","violet",-36,182,"yellow",141],146]],{"a":61,"b":"orange"},[31,"blue","green",65,"red","green"],"violet"],{"c":"blue","a":["blue"],"b":17},95],"c":[[48,"blue",[49],"orange",{"e":"violet","c":"green","a":"red","b":"red","d":-29,"f":["orange",20,190,97,["orange","blue",-30,"blue","green"],"blue","yellow",-47,[123,"yellow","green"],-41]},"green",{"a":170,"b":32},[{"c":"green","a":"violet","b":"red"},["yellow",36,"yellow","violet",149,{"e":"red","c":141,"a":-24,"b":"yellow","d":-13,"f":"red"},69,"orange",19,[87,"red",167,"red",77,110]],"orange","violet"],{"a":-16}],[["red"],"violet"],{"a":[["red",83,{"e":"red","c":"blue","a":"blue","b":"orange","d":"orange"},49,"green","violet"],{"e":"yellow","a":"violet","d":17,"j":"green","c":{"e":"orange","c":"green","a":"green","b":99,"d":"yellow","f":"orange"},"h":9,"b":159,"g":"yellow","f":167,"i":147},["blue","green","violet"],"yellow",["violet",197,"blue",[170,81,"yellow","orange",196],-24,99,193],-8,["red",81,-11,"green","red","blue","yellow","blue",["blue","violet",131,184,160,-1]],"green"]},"orange"],"a":[{"e":"blue","a":"violet","d":[{"a":"blue","b":46},"violet",72,35,61,161],"j":["green",78,144,[168,["red",77,38,"green","red"],"green","yellow",-8,"yellow"],190,40,"yellow",17,171],"c":{"e":"red","c":"violet","a":"violet","b":-14,"d":"red","f":167},"h":[[[83,"green",69,"red"],"green",155,133],106,1,"orange"],"b":-41,"g":{"a":27},"f":"red","i":{"e":"green","c":"green","a":182,"g":"orange","b":"orange","d":["orange"],"f":"orange"}},"yellow","green",-26],"b":{"e":[-32,-11,{"e":"red","c":"yellow","a":{"e":"green","a":"red","d":105,"c":-20,"h":85,"b":{"e":47,"a":89,"d":"green","c":"violet","h":"orange","b":"green","g":140,"f":"green"},"g":111,"f":"orange","i":"yellow"},"b":143,"d":{"e":{"e":"orange","c":129,"a":"blue","b":142,"d":"violet"},"c":"blue","a":122,"b":["violet","orange",84,"orange"],"d":"red","f":"blue"}},58,[147]],"a":{"e":"orange","c":[{"e":141,"c":114,"a":137,"g":"orange","b":61,"d":105,"f":33}],"a":["red","red",{"e":"green","c":4,"a":"violet","b":"red","d":"blue"},"yellow",["green",15,"green",-40,149,"orange",{"e":-7,"c":74,"a":"red","b":"green","d":32}],"blue","yellow",146,[191,"blue",["orange","blue",187,"blue","orange",127,"yellow",38],120,161,55,-30,"green",-10,"violet"],"orange"],"b":{"a":-32,"b":"blue"},"d":{"e":["green",27],"c":[94,"violet","red",18,166,"yellow"],"a":{"e":"green","a":95,"d":"green","j":176,"c":84,"h":"violet","b":"yellow","g":-25,"f":51,"i":119},"b":144,"d":{"c":"violet","a":"yellow","b":"red"}}},"d":{"e":{"e":140,"a":{"e":"green","c":"green","a":0,"b":68,"d":152,"f":"red"},"d":["blue",6],"c":-29,"h":"green","b":["violet",106,"violet","orange",-38,175],"g":54,"f":[177,31,"violet","yellow"],"i":185},"a":"green","d":[111,49,"yellow","blue","orange",{"e":"yellow","a":"orange","d":"violet","j":"blue","c":"red","h":88,"b":-1,"g":"red","f":"red","i":{"e":121,"a":112,"d":195,"j":103,"c":94,"h":"red","b":12,"g":8,"f":22,"i":"orange"}},64,["violet",["blue",76,"blue","red","red"],["violet","blue","orange","yellow",144],185,{"a":66,"b":"orange"},199,"green","green"],14],"c":{"e":"green","a":"orange","d":{"c":[-23,189,-11,"green","violet",178,-4,"blue",68,"violet"],"a":["orange","red"],"b":"yellow","d":"yellow"},"j":{"c":-33,"a":"blue","b":"violet"},"c":107,"h":{"e":-16,"a":174,"d":{"e":"orange","c":"green","a":-28,"b":-30,"d":73},"j":"yellow","c":"orange","h":"orange","b":["blue","violet",-4,76,"red","red"],"g":59,"f":12,"i":199},"b":{"a":"green","b":54},"g":"violet","f":174,"i":["orange",41,85,"yellow","green",25,"red",-20,156,143]},"h":"yellow","b":"red","g":90,"f":-37,"i":{"a":146,"b":"violet"}},"j":"blue","c":[1,["yellow",-11,"green",66,"red",90,"green","yellow",3,{"e":-34,"a":194,"d":"green","c":[-11,"yellow"],"h":59,"b":"yellow","g":"blue","f":162,"i":"violet"}],81,158,170],"h":"blue","b":"green","g":["violet",["blue","blue"]],"f":[-44,"yellow",[35,[122,"red","yellow",{"a":"red","b":"violet"},"blue","orange","violet"],"violet",{"e":-48,"a":6,"d":-6,"c":4,"h":"yellow","b":"blue","g":"red","f":"red"}],[{"e":-2,"a":156,"d":"red","j":69,"c":0,"h":"violet","b":"orange","g":61,"f":102,"i":["orange","blue","violet",-1,137]},{"a":183,"b":89}],{"a":167,"b":"orange"},193,95,[97,[66,154,-32,"orange",121,{"e":198,"a":"blue","d":102,"c":"red","h":"green","b":135,"g":"orange","f":-49,"i":151},97,7,"red"],195,"blue",49,"green",10]],"i":["violet",1,{"e":"orange","c":196,"a":"blue","g":"red","b":190,"d":87,"f":128},"blue",128,147]},"d":[[{"a":"green","b":43},169,-8,"orange",{"a":["violet",["red","violet","orange","yellow",-49,"violet",-25],"violet",60,33,"violet",["yellow","orange",31,144,"red","yellow",73,"orange","red","green"],["green",-31,"red"],"orange","blue"]},"violet","blue"],40,154,{"a":{"c":"violet","a":145,"b":166,"d":["blue",["blue",125,185,"yellow","red",152,89,-18,"blue",141],26,2,35,"orange",190]}},170,"violet",-31,[[151,"orange","green",["blue",114,-5,"yellow"],"blue","green","orange","yellow",62,{"c":22,"a":"green","b":"blue"}],17,[176],"violet",129,67,{"c":{"c":"red","a":140,"b":25},"a":["blue","green",163],"b":30,"d":[60,"green","red",126,[48,"green"],-26,["yellow","green",150,"red","violet","red","violet",59,-12],167,"yellow"]},"yellow","yellow"],"blue"]},"d":{"c":"green","a":52,"b":[136,{"c":{"a":[197,["orange","red",138,67,"orange",172,2,"orange"],"red"]},"a":"violet","b":{"a":{"e":172,"c":171,"a":"yellow","b":191,"d":{"e":171,"a":"red","d":"violet","c":"green","h":"yellow","b":"yellow","g":73,"f":"yellow"}}},"d":[86,-11,-5,["orange","green",64,["blue",15,"orange","yellow","violet",181,"green","blue"],"yellow","yellow",{"e":27,"c":156,"a":"blue","g":"violet","b":38,"d":51,"f":23},"orange","violet"],10]},[55,{"e":191,"c":"blue","a":"orange","b":"yellow","d":109},"blue",{"c":164,"a":[198,-9,183,{"e":14,"a":176,"d":117,"c":"violet","h":"violet","b":-44,"g":"violet","f":"red","i":"orange"},"yellow",0],"b":58,"d":33},"red",[125,23,"blue",149,[["blue",-44,22,133,"orange","yellow","yellow","violet","violet",131],"violet",-1,"red",66,"blue"],"green","red","red",[-31,"blue",["orange","orange","yellow",44,"green","yellow","green",160,"violet"],"yellow",["orange","violet","green","violet",194,"blue",-27],"green",{"e":"violet","a":"red","d":67,"c":68,"h":"blue","b":"orange","g":"orange","f":"violet","i":"violet"},"yellow"]],"violet",[59,158,{"e":"blue","c":"blue","a":"orange","g":73,"b":97,"d":"red","f":"orange"}],["red",[31,[95,72,"orange","yellow"],"blue",192,63],[197,"green",{"e":112,"a":"violet","d":"blue","c":-2,"h":"blue","b":"green","g":124,"f":"blue"},177,"green","blue",162,107]]],43,[{"e":"red","c":{"e":["green",-33,11,154,"yellow",-4],"c":"yellow","a":[7,"orange","orange","yellow",118,169,"red","blue"],"b":"green","d":114},"a":{"e":"green","a":"green","d":187,"j":{"e":106,"c":-41,"a":"violet","b":173,"d":12},"c":"red","h":"violet","b":-4,"g":{"c":"violet","a":-26,"b":87,"d":-33},"f":"green","i":-46},"b":"green","d":13,"f":"yellow"}],["blue","violet","orange","red",{"e":"green","a":["violet",140,[76,"green",94,33,"green",31,"orange",53],"violet","yellow","violet","blue",["blue",196,"orange","yellow","orange",-42,"orange",171,-47,"violet"],"green",110],"d":31,"c":"orange","h":{"e":-45,"a":"green","d":["violet"],"c":"red","h":105,"b":130,"g":-36,"f":"orange","i":{"e":"red","c":137,"a":54,"g":"blue","b":"violet","d":102,"f":178}},"b":-41,"g":{"e":6,"a":-25,"d":66,"j":"violet","c":39,"h":30,"b":63,"g":46,"f":56,"i":"blue"},"f":6,"i":"violet"},[84,175,"orange",{"a":"green"},"green",66,{"e":"red","a":{"a":178,"b":"yellow"},"d":"yellow","c":["red","red",38,"orange","blue",136,"red",137,"red"],"h":163,"b":["orange",84,"red",178],"g":65,"f":"blue"}],[98,"yellow",[46,"orange","yellow","yellow"],"blue",151,189,["yellow",{"e":"orange","a":27,"d":45,"c":48,"h":"green","b":90,"g":180,"f":-43},"yellow"],"blue"],{"a":"green"},"blue",{"c":37,"a":{"e":59,"c":["blue"],"a":"yellow","g":38,"b":"blue","d":"violet","f":"orange"},"b":"orange","d":"violet"}],{"c":75,"a":{"e":{"a":130},"a":5,"d":1,"c":72,"h":{"e":"red","a":11,"d":157,"c":97,"h":-24,"b":"red","g":111,"f":21},"b":{"e":145,"a":"red","d":"violet","j":[-11,191,-43,"blue","orange",105,158],"c":"red","h":143,"b":{"e":"orange","c":"yellow","a":-31,"g":177,"b":"violet","d":"blue","f":"green"},"g":"green","f":48,"i":{"e":18,"a":142,"d":"yellow","c":116,"h":"violet","b":135,"g":37,"f":36}},"g":-26,"f":[186,192,"orange",117,-9,"violet",-19,55,"green",167],"i":"green"},"b":"yellow","d":{"a":138}}]},"c":"blue","h":[[19],"orange",[{"e":["yellow",68,28,29,{"e":"red","a":"violet","d":"green","c":143,"h":"red","b":"orange","g":44,"f":123,"i":"orange"},38,28,65,{"e":-26,"c":["red",132,"red",124,"yellow",115],"a":170,"g":"yellow","b":"green","d":-18,"f":{"c":1,"a":"red","b":-1}}],"a":"orange","d":129,"c":33,"h":"violet","b":"orange","g":"green","f":-24},["violet",-22],[64,-20,{"e":46,"a":76,"d":97,"c":-21,"h":98,"b":"violet","g":{"e":-7,"c":"violet","a":190,"b":"violet","d":138,"f":"violet"},"f":[178,"blue","blue"],"i":"red"},{"e":"orange","c":"red","a":{"e":"orange","a":["green"],"d":[19,"red"],"c":-32,"h":-15,"b":"yellow","g":116,"f":"blue","i":"orange"},"b":96,"d":"green","f":176},[[104,99,"yellow",-13,"red",{"c":"blue","a":"red","b":46,"d":60},-13,9],-22,-26,133,["green",["red",188,"green","green"],166,"yellow",{"e":"yellow","c":197,"a":65,"b":83,"d":-19,"f":"yellow"},[157,"violet","blue","yellow",-30,"violet",-3],21,{"a":-15}],43,["orange","blue",88,"yellow",103,31],"orange",-24]],{"c":"violet","a":{"a":26,"b":"yellow"},"b":["violet","violet",{"e":102,"a":"red","d":178,"j":187,"c":"orange","h":"violet","b":72,"g":-37,"f":"violet","i":"orange"},44,114,"yellow",85]}],{"e":150,"c":"green","a":{"e":{"e":{"e":{"e":"orange","a":48,"d":41,"j":"orange","c":"orange","h":30,"b":"yellow","g":41,"f":-40,"i":8},"c":199,"a":32,"b":"yellow","d":-28},"a":46,"d":[50,"red","violet",63,"red",56,-18,"orange","violet","red"],"c":"blue","h":{"e":"green","c":172,"a":"green","b":-30,"d":22,"f":"yellow"},"b":"orange","g":65,"f":"yellow","i":174},"a":"green","d":[196,{"e":{"e":176,"a":"violet","d":"orange","j":-37,"c":19,"h":31,"b":155,"g":"red","f":106,"i":"green"},"c":"red","a":64,"b":"orange","d":179,"f":8},"yellow",103,"violet",{"e":142,"a":111,"d":"yellow","c":"violet","h":"red","b":148,"g":29,"f":179,"i":"green"},-26],"c":-19,"h":["red",134,"green","green",105],"b":"orange","g":126,"f":76,"i":158},"b":55,"d":"green"},[[[61],[["orange","red"],[151,"yellow",127,"yellow",185,"yellow",{"e":105,"c":"yellow","a":198,"b":"orange","d":"blue","f":89},140,{"e":"violet","a":-15,"d":169,"j":3,"c":"yellow","h":74,"b":-41,"g":29,"f":112,"i":18}]],101,{"e":[45,"green","yellow","blue","violet",["violet","yellow",159,0,"orange","yellow",100,"green","blue",49],[5,-37],"blue","orange"],"c":88,"a":"blue","b":-32,"d":23},77,{"e":["yellow",119,197,["orange",-28,"yellow",179,130,74,-10,115,"violet",79],"orange",63,-15,17,"blue","violet"],"c":{"e":164,"a":83,"d":"yellow","c":119,"h":"yellow","b":148,"g":-22,"f":[-17,17,"violet","green","red"],"i":67},"a":{"e":54,"a":147,"d":"yellow","c":86,"h":113,"b":"yellow","g":77,"f":101,"i":"blue"},"g":"orange","b":[193,"orange","orange","red",39,44,43,-29],"d":"violet","f":191}],"orange",["red","violet","yellow",["red",54,{"e":{"a":41,"b":"violet"},"c":"red","a":{"e":"red","a":"yellow","d":"green","j":-33,"c":96,"h":137,"b":"yellow","g":30,"f":"green","i":"blue"},"b":181,"d":"violet","f":"green"},49,"yellow"],"orange","yellow",{"e":"orange","c":[46,"orange",["blue","green","blue","yellow","yellow","violet","orange","orange",1],"yellow",155,194,"yellow",149],"a":"green","b":-3,"d":153,"f":[-21,-26,-25,"blue","red",108,169,["green",100,43],51,-9]}],71,[[[{"a":"violet","b":27},148,109,["blue",60,47,"violet","yellow",-47,"violet"],"red",{"e":97,"c":-21,"a":"yellow","b":"green","d":126},"yellow",85,89],{"a":["red","orange","violet","blue","blue","blue",128,"blue"]},{"e":-48,"c":{"c":90,"a":"orange","b":"yellow"},"a":"yellow","b":"red","d":172},"yellow",{"a":"orange"},[{"e":"yellow","a":40,"d":-9,"j":"violet","c":153,"h":79,"b":"violet","g":178,"f":2,"i":"yellow"},"green",-29],"red",-9,[9,{"a":39,"b":"green"},5,"violet",26,{"e":167,"c":"blue","a":"yellow","b":90,"d":33},"green",88,12,"blue"]],"red",{"c":{"e":"yellow","a":-44,"d":62,"j":-19,"c":96,"h":"green","b":-12,"g":"green","f":"red","i":134},"a":22,"b":24,"d":33},"red","violet",{"e":181,"c":64,"a":"violet","g":"green","b":"yellow","d":"violet","f":101},"green",[132,93,[-6,[-3,28,"red"],-21,"red","violet",{"a":180},"red","blue",-15],{"e":171,"a":{"e":"green","c":49,"a":"orange","g":22,"b":"violet","d":"orange","f":"orange"},"d":"red","c":113,"h":"green","b":[30,"blue"],"g":{"a":139,"b":47},"f":"red","i":"red"},"violet",158,"green",5,-1],19],"red"],{"e":{"c":["orange",93,162,"green","violet",["green",178],{"a":"green","b":{"e":-12,"a":-16,"d":144,"c":"red","h":"violet","b":43,"g":"green","f":-42}}],"a":"blue","b":"blue","d":{"e":["orange","red",{"c":-19,"a":"green","b":93},"yellow","green","orange","orange","red","green"],"a":"green","d":{"e":"yellow","a":2,"d":"violet","j":"green","c":"blue","h":19,"b":"violet","g":"blue","f":[157,"green",109,59,"red",74,"red","blue","green"],"i":"orange"},"j":"yellow","c":166,"h":"yellow","b":"yellow","g":["violet",138,["violet",141,"green"]],"f":28,"i":{"e":52,"c":"yellow","a":"green","b":5,"d":{"e":153,"a":"yellow","d":191,"j":"green","c":"green","h":124,"b":"green","g":181,"f":134,"i":"yellow"},"f":193}}},"c":"violet","a":{"a":{"a":"green"},"b":[132]},"b":[{"e":11,"a":"green","d":{"e":{"e":119,"a":"violet","d":"red","c":"red","h":"violet","b":-6,"g":"blue","f":"orange","i":"orange"},"a":183,"d":[-36,"yellow"],"c":"red","h":71,"b":"yellow","g":2,"f":"orange"},"c":"green","h":"yellow","b":29,"g":"green","f":"blue","i":{"e":"yellow","a":-24,"d":[55,125,193,70,60,190,199],"c":"green","h":[49,"yellow","yellow",74,"red",163],"b":198,"g":50,"f":"blue","i":70}},"orange"],"d":"blue"},{"c":{"e":-20,"a":8,"d":["orange",157,152,"green",46,"green",7,89,"violet",[-22,-49,81,127]],"c":35,"h":["blue","yellow","orange",94,"orange","yellow"],"b":"red","g":{"a":"green","b":["yellow","orange",198]},"f":"violet","i":17},"a":-48,"b":"blue"}],"b":[[149,[{"e":"blue","c":-30,"a":"violet","g":"violet","b":"yellow","d":178,"f":-4}],"orange",131,"yellow",{"a":"red","b":"violet"},"blue"],[11],{"e":[35,118,{"e":39,"a":"yellow","d":[119,"orange",120,-43],"j":"violet","c":"orange","h":"blue","b":-32,"g":[{"e":"green","c":106,"a":144,"b":147,"d":"green"},"violet","orange"],"f":116,"i":"orange"},112,"yellow"],"a":{"e":19,"a":"orange","d":61,"c":"red","h":"blue","b":164,"g":{"c":"red","a":-10,"b":{"e":{"e":"green","c":82,"a":103,"g":67,"b":153,"d":"violet","f":22},"c":-22,"a":101,"b":71,"d":{"a":152,"b":"green"},"f":"orange"}},"f":157,"i":{"e":"green","a":"orange","d":-8,"j":[159,73,182,"red","green"],"c":13,"h":"blue","b":"yellow","g":186,"f":"orange","i":81}},"d":13,"c":[186,["yellow",["violet","violet"],"green",-28],54,["blue",[119,"red",119,91,181],117],-15,190,{"c":"red","a":13,"b":[{"c":"green","a":70,"b":8,"d":175},"orange","green","yellow","green"]},"blue",-43],"h":{"e":[{"a":"green"},88,"red","violet",10],"a":{"a":"orange","b":[62,"yellow","green"]},"d":151,"c":"red","h":"orange","b":126,"g":{"a":"orange"},"f":"orange","i":46},"b":[[-2,"violet","violet","red",{"a":192},"green",122],["orange","red",{"c":62,"a":52,"b":-45},{"e":{"a":"violet","b":-45},"a":"red","d":"orange","c":"yellow","h":{"e":67,"c":-27,"a":116,"b":"violet","d":"green","f":-18},"b":"yellow","g":"blue","f":"blue"},27,{"c":37,"a":-39,"b":"blue"}],107,"yellow",["blue","red",143],"blue",{"e":"orange","a":["yellow",["blue",8,149,141,"red",-28,"red"],18],"d":29,"c":"violet","h":-21,"b":[{"e":66,"c":"green","a":"blue","b":-29,"d":"orange","f":"violet"},"blue",-21],"g":"green","f":112},73],"g":["red",{"a":{"e":93,"a":13,"d":"violet","c":175,"h":158,"b":9,"g":194,"f":-10},"b":"blue"}],"f":130}],"g":[["red","orange","orange",172,154,{"e":{"a":"blue","b":{"a":"yellow","b":53}},"c":-11,"a":"orange","g":"yellow","b":{"e":{"e":94,"a":123,"d":184,"j":-4,"c":193,"h":152,"b":"blue","g":"red","f":101,"i":178},"c":"blue","a":178,"b":154,"d":[103,109,190,"yellow",29,"red","orange","yellow",79,"green"],"f":"orange"},"d":{"e":25,"a":{"a":"blue"},"d":"green","c":"orange","h":{"e":144,"c":["red","blue","violet",15,"green",109,72],"a":"yellow","b":"orange","d":"yellow"},"b":151,"g":[141,"orange",134,"blue","blue",4,21,"blue","green"],"f":96},"f":"blue"},"blue"],80,186,[[194,"violet",70,"green"],{"a":"orange","b":"green"},[{"e":"orange","a":179,"d":{"e":164,"c":-14,"a":"blue","g":"yellow","b":"violet","d":76,"f":-33},"c":"green","h":"violet","b":"orange","g":"blue","f":"orange","i":"green"},"blue"]],"orange","yellow",66],"f":{"a":["violet"],"b":87},"i":{"e":{"c":[{"e":"red","a":19,"d":100,"j":"red","c":"red","h":"red","b":"yellow","g":-41,"f":10,"i":"blue"},42,92,"violet","red",[149,"green",91,"blue"],-33,["green",73,129],110,{"e":168,"c":153,"a":-30,"b":"yellow","d":[192,{"c":"blue","a":"blue","b":-16,"d":-18},{"e":"red","c":"yellow","a":"violet","b":31,"d":"green"},"blue"],"f":{"c":9,"a":"yellow","b":-16,"d":128}}],"a":[[["green","violet",136,59,"orange",173,116,113,"yellow"],"green",{"e":"blue","c":"green","a":"blue","g":"red","b":98,"d":-25,"f":21},"orange",184],4,"yellow","red",-24,{"c":"orange","a":15,"b":{"e":109,"c":179,"a":61,"b":"orange","d":190,"f":9}}],"b":[[92,148,{"e":"blue","a":0,"d":108,"c":197,"h":"red","b":"orange","g":-22,"f":105,"i":"blue"},"blue"],-7,149]},"a":{"e":{"a":{"e":"green","a":["violet",172],"d":-10,"c":42,"h":"blue","b":80,"g":{"e":"red","c":"orange","a":"yellow","g":87,"b":{"e":31,"c":129,"a":"orange","b":43,"d":"blue"},"d":"blue","f":81},"f":"violet"},"b":"orange"},"a":182,"d":{"c":{"a":154},"a":"blue","b":{"e":31,"a":["yellow","blue","red",{"a":"orange"},"red",62,39,"red",["green","orange","yellow",47,"orange",55,"blue"]],"d":[72,187,"red","orange",59,"yellow","violet","green"],"j":"orange","c":"yellow","h":185,"b":{"e":"blue","c":"violet","a":["orange",7,180,150,46,"yellow",176,"orange"],"b":148,"d":"blue"},"g":"blue","f":"yellow","i":102}},"c":"red","h":-9,"b":14,"g":{"a":"green","b":{"c":-18,"a":81,"b":104}},"f":[[{"e":-33,"c":"green","a":"orange","b":"blue","d":"blue"},"yellow",141,[42,197],[-12,61,{"e":"violet","a":"violet","d":"green","c":-21,"h":-5,"b":"orange","g":39,"f":"green"},"blue"],31,[[101,"blue",-14,"red",88],58,["red","blue","violet",34],-14,"yellow",98,106,91,131],"yellow",[151,"red","green",{"e":"violet","c":"green","a":49,"g":155,"b":96,"d":"blue","f":"orange"},-18,184,{"c":"blue","a":1,"b":162},{"e":115,"a":94,"d":97,"j":-34,"c":"blue","h":115,"b":"red","g":"orange","f":149,"i":105}],38],{"c":{"e":"blue","c":90,"a":"yellow","b":142,"d":"violet"},"a":-38,"b":"violet"}],"i":189},"d":"orange","c":[[103,8,"green",13,23,"violet",55],[{"e":79,"c":74,"a":{"e":"violet","a":{"c":"red","a":"violet","b":168},"d":142,"j":"blue","c":173,"h":"yellow","b":"green","g":"blue","f":"blue","i":"blue"},"g":[{"c":"violet","a":82,"b":90},-24,{"a":"orange"},"yellow",[126,53,153,6],52,137,"violet",181],"b":105,"d":166,"f":{"e":"blue","c":-35,"a":"blue","b":188,"d":-14,"f":63}},{"e":95,"a":"green","d":"yellow","j":"red","c":81,"h":107,"b":-46,"g":162,"f":"green","i":"red"},[{"e":"violet","a":150,"d":126,"c":10,"h":{"e":"green","a":"orange","d":19,"c":"green","h":"green","b":79,"g":"red","f":"yellow","i":"blue"},"b":{"a":25,"b":147},"g":180,"f":126},{"e":-48,"a":192,"d":-45,"c":25,"h":"green","b":{"c":165,"a":"orange","b":"red","d":"blue"},"g":"green","f":-24},95,{"e":"blue","a":"violet","d":"yellow","j":"blue","c":44,"h":["blue","green","red",142,"red"],"b":[-43,"violet","green",53],"g":33,"f":"orange","i":196},"orange","green",43,[113,"violet","orange",129,{"c":6,"a":"violet","b":"green","d":"green"},20]],"green"]],"h":{"a":-11},"b":{"e":"violet","c":174,"a":"violet","g":[{"e":105,"a":{"c":-5,"a":177,"b":63},"d":-41,"c":80,"h":[110,109,113,"blue"],"b":-28,"g":"red","f":129},["green",[108],{"a":4},[182,96,29,[181,14,"yellow","violet"],13,{"e":132,"a":115,"d":"red","c":"violet","h":"violet","b":"violet","g":"green","f":"green"},{"e":"orange","a":"yellow","d":"blue","j":"red","c":137,"h":"violet","b":"green","g":-22,"f":"yellow","i":-12},"green","yellow",57],-44,{"a":88,"b":-3},22,{"c":"red","a":"violet","b":-21}],[[14,"orange"]],-36,128,"yellow",[-47,[-7,36,177],"blue","blue",["yellow","violet",-23,"violet",-40,"orange",{"e":129,"a":21,"d":51,"j":"violet","c":"red","h":15,"b":174,"g":191,"f":101,"i":105}],23,[-47,[133,66,"violet"],177,"violet","yellow","green",159,"yellow"]],{"e":[{"a":"orange"},"green",69,"orange",43,"violet","violet",192,140,"green"],"a":62,"d":"violet","j":{"c":"violet","a":-25,"b":"violet"},"c":120,"h":"red","b":{"a":13},"g":"green","f":124,"i":33},[[19,176,174,"orange","violet","violet",105,128,"red"],{"a":10,"b":"red"},["orange",37,187,"green",176],"blue","red",[153,"yellow","violet",137,"orange","blue",[70,"red",174,"blue","green","yellow",99,"red"],"violet","violet","red"]]],"b":[{"c":133,"a":"orange","b":98,"d":{"c":"red","a":-9,"b":103}},"blue",[155,"yellow"],[["yellow",-8,{"e":"blue","c":59,"a":71,"g":"orange","b":88,"d":"orange","f":17},"blue",-37],{"e":82,"c":"violet","a":99,"b":81,"d":"yellow","f":161},"blue",{"c":-7,"a":154,"b":"violet"},-9,-13,53,{"e":"violet","a":"yellow","d":-26,"c":179,"h":"green","b":"red","g":-24,"f":133,"i":-36}]],"d":{"e":[9,[49],28,"red",{"c":23,"a":157,"b":{"c":"violet","a":"green","b":-11,"d":"green"},"d":139},"orange","green",93,44,[-19]],"a":-6,"d":7,"c":{"e":{"c":"violet","a":-21,"b":12},"c":["green",[-23,-40,157],"red",["orange","blue"],"violet","red",85,"violet","yellow",150],"a":162,"g":"yellow","b":"green","d":["green","yellow",181,"green",-2,{"e":"red","a":"yellow","d":129,"c":-34,"h":129,"b":152,"g":"yellow","f":80,"i":"red"},"green","red",-9,"red"],"f":6},"h":{"e":136,"a":"violet","d":["violet",-15,129,"green",5,"green",[-28,156,141,"blue",22,"green",34],"green"],"c":149,"h":["violet"],"b":149,"g":"green","f":149},"b":{"e":123,"a":"red","d":{"e":0,"c":"green","a":"violet","b":"yellow","d":"red","f":"green"},"c":{"e":{"e":"yellow","c":"red","a":"red","b":"violet","d":"yellow"},"a":"yellow","d":{"e":-39,"a":-11,"d":63,"c":179,"h":4,"b":44,"g":"orange","f":"violet","i":"yellow"},"c":-43,"h":"green","b":"violet","g":"blue","f":"yellow","i":124},"h":48,"b":129,"g":["orange",147,174,"blue","green",115],"f":172,"i":-36},"g":"red","f":30,"i":"violet"},"f":"red"},"g":{"e":{"e":[[126,"orange",196,"orange","red",{"e":23,"a":"yellow","d":128,"c":12,"h":-49,"b":"green","g":"yellow","f":41,"i":45},"red","violet"]],"c":[["red",103,{"e":69,"a":"violet","d":"yellow","c":"green","h":"red","b":133,"g":25,"f":"violet"}],88,"green","red",-29,"red"],"a":37,"b":"green","d":{"e":"orange","c":{"e":{"e":109,"c":144,"a":"yellow","b":70,"d":83},"c":-3,"a":"green","b":"yellow","d":146},"a":86,"b":-1,"d":{"a":68}}},"c":{"c":[155,18,"blue",-16,"orange",-36,49,"red",["yellow",136,140,-10,11,"violet","red",134,156,"violet"]],"a":64,"b":{"e":"violet","c":193,"a":101,"b":["green"],"d":182,"f":86},"d":[198]},"a":"yellow","g":"violet","b":1,"d":-27,"f":"orange"},"f":{"e":20,"c":{"e":155,"c":{"c":[181,{"e":-32,"a":"orange","d":"orange","j":"yellow","c":66,"h":-39,"b":"violet","g":"violet","f":"red","i":23},4,"blue",70,"violet","blue",141,{"a":"yellow","b":184},"violet"],"a":160,"b":158,"d":197},"a":138,"g":[48],"b":26,"d":11,"f":{"c":"yellow","a":179,"b":"red","d":90}},"a":148,"g":"yellow","b":[[{"a":["yellow"]},"yellow",19,"green",39],{"c":{"a":"blue"},"a":{"e":41,"c":191,"a":173,"b":"green","d":-14,"f":19},"b":{"a":["orange",4,48],"b":193},"d":"green"},["yellow",{"e":"orange","a":"orange","d":"blue","c":-39,"h":28,"b":"yellow","g":"red","f":"orange","i":[116,"red",173,76,24,-1,"green",101,-10]},70,{"c":"violet","a":44,"b":"violet","d":36},128,{"c":"yellow","a":"red","b":["yellow",-10]},-10,6]],"d":"violet","f":{"e":"violet","c":5,"a":"violet","g":173,"b":100,"d":["violet",194,{"e":["blue",181,"violet","yellow","blue",-7,137,43,112],"c":-19,"a":120,"b":"green","d":165},-1,195,"green",104],"f":128}}}}'
day13 = ['Alice would lose 57 happiness units by sitting next to Bob.', 'Alice would lose 62 happiness units by sitting next to Carol.', 'Alice would lose 75 happiness units by sitting next to David.', 'Alice would gain 71 happiness units by sitting next to Eric.', 'Alice would lose 22 happiness units by sitting next to Frank.', 'Alice would lose 23 happiness units by sitting next to George.', 'Alice would lose 76 happiness units by sitting next to Mallory.', 'Bob would lose 14 happiness units by sitting next to Alice.', 'Bob would gain 48 happiness units by sitting next to Carol.', 'Bob would gain 89 happiness units by sitting next to David.', 'Bob would gain 86 happiness units by sitting next to Eric.', 'Bob would lose 2 happiness units by sitting next to Frank.', 'Bob would gain 27 happiness units by sitting next to George.', 'Bob would gain 19 happiness units by sitting next to Mallory.', 'Carol would gain 37 happiness units by sitting next to Alice.', 'Carol would gain 45 happiness units by sitting next to Bob.', 'Carol would gain 24 happiness units by sitting next to David.', 'Carol would gain 5 happiness units by sitting next to Eric.', 'Carol would lose 68 happiness units by sitting next to Frank.', 'Carol would lose 25 happiness units by sitting next to George.', 'Carol would gain 30 happiness units by sitting next to Mallory.', 'David would lose 51 happiness units by sitting next to Alice.', 'David would gain 34 happiness units by sitting next to Bob.', 'David would gain 99 happiness units by sitting next to Carol.', 'David would gain 91 happiness units by sitting next to Eric.', 'David would lose 38 happiness units by sitting next to Frank.', 'David would gain 60 happiness units by sitting next to George.', 'David would lose 63 happiness units by sitting next to Mallory.', 'Eric would gain 23 happiness units by sitting next to Alice.', 'Eric would lose 69 happiness units by sitting next to Bob.', 'Eric would lose 33 happiness units by sitting next to Carol.', 'Eric would lose 47 happiness units by sitting next to David.', 'Eric would gain 75 happiness units by sitting next to Frank.', 'Eric would gain 82 happiness units by sitting next to George.', 'Eric would gain 13 happiness units by sitting next to Mallory.', 'Frank would gain 77 happiness units by sitting next to Alice.', 'Frank would gain 27 happiness units by sitting next to Bob.', 'Frank would lose 87 happiness units by sitting next to Carol.', 'Frank would gain 74 happiness units by sitting next to David.', 'Frank would lose 41 happiness units by sitting next to Eric.', 'Frank would lose 99 happiness units by sitting next to George.', 'Frank would gain 26 happiness units by sitting next to Mallory.', 'George would lose 63 happiness units by sitting next to Alice.', 'George would lose 51 happiness units by sitting next to Bob.', 'George would lose 60 happiness units by sitting next to Carol.', 'George would gain 30 happiness units by sitting next to David.', 'George would lose 100 happiness units by sitting next to Eric.', 'George would lose 63 happiness units by sitting next to Frank.', 'George would gain 57 happiness units by sitting next to Mallory.', 'Mallory would lose 71 happiness units by sitting next to Alice.', 'Mallory would lose 28 happiness units by sitting next to Bob.', 'Mallory would lose 10 happiness units by sitting next to Carol.', 'Mallory would gain 44 happiness units by sitting next to David.', 'Mallory would gain 22 happiness units by sitting next to Eric.', 'Mallory would gain 79 happiness units by sitting next to Frank.', 'Mallory would lose 16 happiness units by sitting next to George.']
day14 = 'Vixen can fly 8 km/s for 8 seconds, but then must rest for 53 seconds. Blitzen can fly 13 km/s for 4 seconds, but then must rest for 49 seconds. Rudolph can fly 20 km/s for 7 seconds, but then must rest for 132 seconds. Cupid can fly 12 km/s for 4 seconds, but then must rest for 43 seconds. Donner can fly 9 km/s for 5 seconds, but then must rest for 38 seconds. Dasher can fly 10 km/s for 4 seconds, but then must rest for 37 seconds. Comet can fly 3 km/s for 37 seconds, but then must rest for 76 seconds. Prancer can fly 9 km/s for 12 seconds, but then must rest for 97 seconds. Dancer can fly 37 km/s for 1 seconds, but then must rest for 36 seconds.'
day14time = 2503
day15 = 'Sugar: capacity 3, durability 0, flavor 0, texture -3, calories 2 Sprinkles: capacity -3, durability 3, flavor 0, texture 0, calories 9 Candy: capacity -1, durability 0, flavor 4, texture 0, calories 1 Chocolate: capacity 0, durability 0, flavor -2, texture 2, calories 8'
day15test ='Butterscotch: capacity -1, durability -2, flavor 6, texture 3, calories 8 Cinnamon: capacity 2, durability 3, flavor -2, texture -1, calories 3'
day16sue = 'Sue 1: goldfish: 6, trees: 9, akitas: 0 Sue 2: goldfish: 7, trees: 1, akitas: 0 Sue 3: cars: 10, akitas: 6, perfumes: 7 Sue 4: perfumes: 2, vizslas: 0, cars: 6 Sue 5: goldfish: 1, trees: 3, perfumes: 10 Sue 6: children: 9, vizslas: 7, cars: 9 Sue 7: cars: 6, vizslas: 5, cats: 3 Sue 8: akitas: 10, vizslas: 9, children: 3 Sue 9: vizslas: 8, cats: 2, trees: 1 Sue 10: perfumes: 10, trees: 6, cars: 4 Sue 11: cars: 9, children: 1, cats: 1 Sue 12: pomeranians: 4, akitas: 6, goldfish: 8 Sue 13: cats: 10, children: 5, trees: 9 Sue 14: perfumes: 8, vizslas: 3, samoyeds: 1 Sue 15: vizslas: 2, perfumes: 8, trees: 3 Sue 16: pomeranians: 10, trees: 9, samoyeds: 4 Sue 17: akitas: 7, vizslas: 0, goldfish: 6 Sue 18: trees: 5, vizslas: 9, cars: 0 Sue 19: akitas: 3, goldfish: 9, trees: 10 Sue 20: perfumes: 7, samoyeds: 3, vizslas: 10 Sue 21: perfumes: 7, pomeranians: 10, akitas: 8 Sue 22: vizslas: 6, trees: 8, akitas: 10 Sue 23: goldfish: 0, trees: 4, children: 9 Sue 24: goldfish: 7, pomeranians: 9, akitas: 4 Sue 25: cars: 7, trees: 4, pomeranians: 4 Sue 26: trees: 9, akitas: 9, pomeranians: 7 Sue 27: samoyeds: 0, perfumes: 9, goldfish: 10 Sue 28: cars: 5, trees: 7, vizslas: 1 Sue 29: perfumes: 9, trees: 1, children: 6 Sue 30: goldfish: 10, trees: 0, cars: 4 Sue 31: akitas: 2, perfumes: 5, goldfish: 5 Sue 32: goldfish: 0, akitas: 5, trees: 0 Sue 33: vizslas: 2, akitas: 2, samoyeds: 3 Sue 34: goldfish: 8, perfumes: 5, cars: 3 Sue 35: akitas: 1, cats: 4, trees: 9 Sue 36: cars: 4, vizslas: 4, goldfish: 7 Sue 37: akitas: 5, perfumes: 7, trees: 3 Sue 38: goldfish: 10, trees: 2, vizslas: 9 Sue 39: goldfish: 4, pomeranians: 5, vizslas: 5 Sue 40: perfumes: 5, samoyeds: 4, akitas: 6 Sue 41: goldfish: 9, cars: 4, perfumes: 5 Sue 42: trees: 6, pomeranians: 9, goldfish: 8 Sue 43: perfumes: 7, pomeranians: 1, akitas: 2 Sue 44: vizslas: 9, cars: 5, cats: 0 Sue 45: akitas: 1, goldfish: 6, trees: 0 Sue 46: akitas: 5, vizslas: 8, trees: 2 Sue 47: trees: 9, akitas: 2, vizslas: 9 Sue 48: goldfish: 10, trees: 5, akitas: 2 Sue 49: cars: 7, vizslas: 2, perfumes: 6 Sue 50: akitas: 5, goldfish: 6, perfumes: 0 Sue 51: cars: 9, cats: 7, trees: 5 Sue 52: akitas: 7, goldfish: 10, cars: 0 Sue 53: cars: 10, cats: 4, perfumes: 2 Sue 54: goldfish: 2, pomeranians: 5, perfumes: 10 Sue 55: vizslas: 5, akitas: 4, cars: 8 Sue 56: goldfish: 9, vizslas: 4, akitas: 5 Sue 57: perfumes: 8, samoyeds: 7, cars: 9 Sue 58: cars: 5, akitas: 7, perfumes: 8 Sue 59: samoyeds: 8, cars: 10, vizslas: 10 Sue 60: akitas: 6, samoyeds: 0, goldfish: 3 Sue 61: trees: 8, pomeranians: 0, akitas: 2 Sue 62: trees: 1, perfumes: 3, vizslas: 4 Sue 63: vizslas: 6, samoyeds: 9, goldfish: 8 Sue 64: goldfish: 7, trees: 6, vizslas: 3 Sue 65: cars: 1, vizslas: 0, akitas: 6 Sue 66: cats: 6, pomeranians: 4, cars: 9 Sue 67: trees: 10, pomeranians: 7, samoyeds: 3 Sue 68: pomeranians: 5, goldfish: 9, akitas: 1 Sue 69: akitas: 1, vizslas: 0, trees: 9 Sue 70: cats: 4, goldfish: 4, vizslas: 10 Sue 71: vizslas: 7, perfumes: 7, trees: 8 Sue 72: children: 2, vizslas: 9, cats: 3 Sue 73: cars: 8, pomeranians: 0, perfumes: 6 Sue 74: akitas: 1, pomeranians: 8, vizslas: 10 Sue 75: vizslas: 5, perfumes: 5, cars: 7 Sue 76: cars: 3, vizslas: 3, goldfish: 0 Sue 77: akitas: 9, samoyeds: 1, pomeranians: 3 Sue 78: trees: 0, vizslas: 0, akitas: 6 Sue 79: pomeranians: 9, cars: 1, perfumes: 0 Sue 80: perfumes: 10, trees: 1, cats: 0 Sue 81: goldfish: 5, akitas: 9, trees: 0 Sue 82: vizslas: 1, akitas: 6, children: 4 Sue 83: samoyeds: 7, perfumes: 8, pomeranians: 4 Sue 84: perfumes: 3, children: 3, cats: 7 Sue 85: goldfish: 9, trees: 3, cars: 9 Sue 86: cars: 0, perfumes: 9, vizslas: 0 Sue 87: children: 3, trees: 4, akitas: 3 Sue 88: trees: 1, samoyeds: 1, goldfish: 0 Sue 89: akitas: 8, cars: 3, vizslas: 9 Sue 90: pomeranians: 9, trees: 9, goldfish: 8 Sue 91: goldfish: 7, trees: 10, children: 0 Sue 92: cats: 9, cars: 7, perfumes: 7 Sue 93: vizslas: 2, goldfish: 7, cats: 9 Sue 94: akitas: 5, cars: 8, vizslas: 4 Sue 95: goldfish: 7, vizslas: 1, perfumes: 2 Sue 96: goldfish: 5, trees: 6, perfumes: 10 Sue 97: trees: 0, perfumes: 7, cars: 0 Sue 98: cars: 2, perfumes: 6, trees: 8 Sue 99: trees: 10, children: 7, cats: 9 Sue 100: samoyeds: 5, goldfish: 6, vizslas: 6 Sue 101: cars: 10, perfumes: 9, vizslas: 3 Sue 102: pomeranians: 6, trees: 1, samoyeds: 4 Sue 103: cars: 2, perfumes: 1, goldfish: 5 Sue 104: goldfish: 2, cars: 8, pomeranians: 2 Sue 105: goldfish: 6, vizslas: 0, trees: 10 Sue 106: trees: 10, akitas: 10, pomeranians: 0 Sue 107: vizslas: 2, pomeranians: 10, trees: 3 Sue 108: children: 3, vizslas: 8, akitas: 7 Sue 109: perfumes: 2, akitas: 2, samoyeds: 3 Sue 110: goldfish: 7, trees: 1, perfumes: 1 Sue 111: akitas: 2, cars: 9, perfumes: 2 Sue 112: children: 10, cars: 0, akitas: 3 Sue 113: akitas: 9, vizslas: 4, children: 3 Sue 114: pomeranians: 3, trees: 2, goldfish: 5 Sue 115: perfumes: 8, cars: 6, trees: 0 Sue 116: samoyeds: 6, children: 3, pomeranians: 1 Sue 117: goldfish: 1, trees: 2, akitas: 1 Sue 118: goldfish: 10, akitas: 10, samoyeds: 0 Sue 119: vizslas: 10, perfumes: 6, cars: 0 Sue 120: cars: 2, perfumes: 9, goldfish: 5 Sue 121: vizslas: 2, trees: 2, cars: 6 Sue 122: vizslas: 3, trees: 0, akitas: 2 Sue 123: akitas: 5, samoyeds: 7, goldfish: 1 Sue 124: goldfish: 8, samoyeds: 7, trees: 8 Sue 125: trees: 3, goldfish: 8, perfumes: 5 Sue 126: cats: 3, vizslas: 9, goldfish: 0 Sue 127: pomeranians: 9, goldfish: 3, perfumes: 6 Sue 128: vizslas: 4, cars: 8, goldfish: 5 Sue 129: vizslas: 8, children: 5, perfumes: 8 Sue 130: cars: 7, children: 7, cats: 3 Sue 131: perfumes: 1, akitas: 8, vizslas: 9 Sue 132: perfumes: 7, samoyeds: 10, pomeranians: 6 Sue 133: cars: 5, perfumes: 3, goldfish: 7 Sue 134: perfumes: 9, akitas: 2, cats: 3 Sue 135: perfumes: 1, trees: 9, vizslas: 9 Sue 136: akitas: 7, cars: 3, perfumes: 7 Sue 137: vizslas: 9, goldfish: 8, cars: 5 Sue 138: trees: 0, samoyeds: 1, cars: 3 Sue 139: cars: 0, perfumes: 6, trees: 0 Sue 140: pomeranians: 4, cars: 1, perfumes: 7 Sue 141: vizslas: 10, akitas: 8, cats: 3 Sue 142: trees: 1, cats: 6, vizslas: 5 Sue 143: pomeranians: 9, cars: 7, perfumes: 9 Sue 144: cars: 0, perfumes: 2, pomeranians: 1 Sue 145: trees: 1, goldfish: 9, perfumes: 8 Sue 146: cars: 8, children: 5, vizslas: 2 Sue 147: perfumes: 2, goldfish: 5, cars: 0 Sue 148: akitas: 2, perfumes: 7, pomeranians: 6 Sue 149: goldfish: 8, cars: 0, trees: 1 Sue 150: akitas: 6, perfumes: 5, trees: 0 Sue 151: vizslas: 6, samoyeds: 8, akitas: 10 Sue 152: trees: 7, akitas: 7, perfumes: 6 Sue 153: goldfish: 9, cats: 9, cars: 3 Sue 154: vizslas: 10, trees: 0, cars: 9 Sue 155: perfumes: 3, children: 2, goldfish: 1 Sue 156: goldfish: 7, perfumes: 5, akitas: 6 Sue 157: cats: 10, trees: 1, goldfish: 0 Sue 158: cats: 7, children: 7, vizslas: 6 Sue 159: perfumes: 9, akitas: 0, cars: 0 Sue 160: akitas: 3, goldfish: 10, pomeranians: 2 Sue 161: goldfish: 10, cars: 6, perfumes: 3 Sue 162: trees: 0, cars: 9, goldfish: 1 Sue 163: cars: 8, perfumes: 9, vizslas: 5 Sue 164: goldfish: 1, trees: 10, children: 6 Sue 165: goldfish: 0, vizslas: 6, cars: 0 Sue 166: akitas: 5, vizslas: 1, cars: 5 Sue 167: vizslas: 1, samoyeds: 1, children: 4 Sue 168: samoyeds: 7, vizslas: 7, akitas: 3 Sue 169: goldfish: 3, cats: 9, trees: 2 Sue 170: cars: 5, perfumes: 9, vizslas: 5 Sue 171: goldfish: 7, cars: 6, perfumes: 10 Sue 172: cats: 6, akitas: 1, children: 6 Sue 173: cats: 4, goldfish: 1, children: 3 Sue 174: cars: 2, pomeranians: 2, vizslas: 7 Sue 175: trees: 0, children: 4, goldfish: 7 Sue 176: children: 8, cars: 5, cats: 9 Sue 177: pomeranians: 4, vizslas: 7, trees: 3 Sue 178: vizslas: 6, perfumes: 10, akitas: 6 Sue 179: cars: 4, akitas: 4, trees: 4 Sue 180: akitas: 8, goldfish: 6, trees: 9 Sue 181: perfumes: 3, vizslas: 10, cars: 3 Sue 182: vizslas: 3, samoyeds: 3, goldfish: 7 Sue 183: goldfish: 10, perfumes: 2, cats: 1 Sue 184: goldfish: 5, trees: 1, perfumes: 1 Sue 185: vizslas: 10, trees: 9, perfumes: 2 Sue 186: goldfish: 6, perfumes: 9, trees: 1 Sue 187: cars: 0, trees: 9, goldfish: 6 Sue 188: cars: 0, trees: 1, vizslas: 9 Sue 189: akitas: 7, vizslas: 2, trees: 0 Sue 190: pomeranians: 5, perfumes: 8, akitas: 10 Sue 191: vizslas: 5, akitas: 3, cats: 0 Sue 192: children: 1, trees: 1, cars: 2 Sue 193: cars: 3, goldfish: 9, trees: 2 Sue 194: samoyeds: 3, akitas: 4, perfumes: 8 Sue 195: trees: 1, vizslas: 8, akitas: 10 Sue 196: akitas: 6, cars: 5, pomeranians: 0 Sue 197: akitas: 5, vizslas: 5, cats: 1 Sue 198: trees: 4, cars: 6, goldfish: 6 Sue 199: cats: 7, cars: 5, goldfish: 6 Sue 200: vizslas: 4, cats: 0, akitas: 9 Sue 201: pomeranians: 1, perfumes: 4, children: 2 Sue 202: cats: 1, perfumes: 4, vizslas: 3 Sue 203: vizslas: 1, akitas: 9, children: 5 Sue 204: perfumes: 8, cars: 7, trees: 4 Sue 205: perfumes: 7, pomeranians: 5, cats: 9 Sue 206: vizslas: 8, trees: 2, akitas: 2 Sue 207: akitas: 6, vizslas: 2, perfumes: 10 Sue 208: vizslas: 1, children: 7, akitas: 4 Sue 209: perfumes: 4, trees: 2, children: 1 Sue 210: goldfish: 0, vizslas: 2, samoyeds: 10 Sue 211: cars: 8, perfumes: 3, trees: 1 Sue 212: cars: 8, samoyeds: 5, pomeranians: 8 Sue 213: akitas: 2, goldfish: 8, pomeranians: 2 Sue 214: akitas: 6, pomeranians: 2, cars: 0 Sue 215: trees: 10, pomeranians: 4, vizslas: 0 Sue 216: perfumes: 0, cars: 8, trees: 0 Sue 217: samoyeds: 8, akitas: 7, children: 10 Sue 218: perfumes: 1, vizslas: 6, children: 0 Sue 219: children: 1, goldfish: 4, trees: 1 Sue 220: akitas: 10, goldfish: 10, trees: 5 Sue 221: cars: 7, pomeranians: 6, perfumes: 3 Sue 222: vizslas: 6, children: 0, akitas: 5 Sue 223: perfumes: 9, cars: 1, trees: 6 Sue 224: pomeranians: 1, trees: 0, vizslas: 0 Sue 225: goldfish: 8, akitas: 4, perfumes: 10 Sue 226: pomeranians: 7, cats: 7, children: 4 Sue 227: trees: 0, akitas: 2, perfumes: 1 Sue 228: vizslas: 6, cars: 10, perfumes: 9 Sue 229: cars: 0, perfumes: 6, trees: 4 Sue 230: pomeranians: 7, perfumes: 5, trees: 2 Sue 231: goldfish: 9, cars: 6, trees: 7 Sue 232: akitas: 1, vizslas: 5, cars: 3 Sue 233: akitas: 7, samoyeds: 2, vizslas: 5 Sue 234: akitas: 6, cats: 8, pomeranians: 0 Sue 235: pomeranians: 5, akitas: 5, vizslas: 3 Sue 236: goldfish: 5, trees: 6, akitas: 5 Sue 237: goldfish: 9, perfumes: 5, cats: 5 Sue 238: cats: 8, goldfish: 4, perfumes: 0 Sue 239: samoyeds: 8, children: 6, pomeranians: 6 Sue 240: akitas: 4, samoyeds: 10, trees: 8 Sue 241: trees: 2, goldfish: 8, cars: 1 Sue 242: perfumes: 2, cars: 0, akitas: 10 Sue 243: pomeranians: 1, cars: 7, trees: 2 Sue 244: trees: 9, vizslas: 2, akitas: 10 Sue 245: cars: 9, pomeranians: 4, trees: 0 Sue 246: cars: 9, pomeranians: 7, perfumes: 1 Sue 247: trees: 0, goldfish: 1, akitas: 8 Sue 248: vizslas: 1, cats: 4, akitas: 4 Sue 249: cats: 6, children: 4, goldfish: 9 Sue 250: vizslas: 1, cars: 10, samoyeds: 5 Sue 251: cars: 0, goldfish: 1, vizslas: 7 Sue 252: cars: 7, akitas: 9, vizslas: 10 Sue 253: akitas: 7, vizslas: 2, perfumes: 5 Sue 254: vizslas: 10, akitas: 5, samoyeds: 0 Sue 255: pomeranians: 8, goldfish: 0, cats: 6 Sue 256: cars: 10, goldfish: 8, vizslas: 9 Sue 257: goldfish: 3, perfumes: 9, cats: 3 Sue 258: trees: 6, goldfish: 6, cars: 6 Sue 259: trees: 0, goldfish: 2, perfumes: 8 Sue 260: trees: 5, akitas: 0, cars: 0 Sue 261: pomeranians: 9, goldfish: 7, perfumes: 8 Sue 262: perfumes: 8, vizslas: 6, goldfish: 2 Sue 263: vizslas: 6, trees: 5, goldfish: 9 Sue 264: vizslas: 4, perfumes: 7, cars: 9 Sue 265: goldfish: 10, trees: 3, perfumes: 1 Sue 266: trees: 10, akitas: 8, goldfish: 8 Sue 267: goldfish: 4, trees: 0, samoyeds: 9 Sue 268: vizslas: 1, trees: 0, goldfish: 8 Sue 269: cars: 2, perfumes: 10, goldfish: 5 Sue 270: perfumes: 7, cars: 2, vizslas: 1 Sue 271: cars: 6, perfumes: 10, goldfish: 6 Sue 272: samoyeds: 4, goldfish: 2, vizslas: 9 Sue 273: perfumes: 4, goldfish: 4, vizslas: 1 Sue 274: children: 4, cars: 4, perfumes: 3 Sue 275: children: 8, vizslas: 3, trees: 2 Sue 276: vizslas: 5, children: 7, perfumes: 3 Sue 277: perfumes: 3, cats: 4, vizslas: 5 Sue 278: cars: 1, samoyeds: 10, akitas: 2 Sue 279: trees: 9, perfumes: 9, cars: 10 Sue 280: vizslas: 5, trees: 0, perfumes: 6 Sue 281: vizslas: 3, akitas: 10, pomeranians: 7 Sue 282: trees: 1, children: 2, akitas: 8 Sue 283: akitas: 9, goldfish: 6, cats: 5 Sue 284: cars: 9, children: 10, pomeranians: 2 Sue 285: pomeranians: 0, perfumes: 4, cars: 7 Sue 286: perfumes: 0, vizslas: 10, akitas: 10 Sue 287: cats: 2, perfumes: 3, trees: 5 Sue 288: akitas: 9, vizslas: 8, samoyeds: 9 Sue 289: perfumes: 6, children: 2, cars: 7 Sue 290: akitas: 0, children: 5, cars: 5 Sue 291: cars: 4, perfumes: 0, trees: 1 Sue 292: cats: 0, cars: 8, perfumes: 6 Sue 293: akitas: 9, cats: 5, children: 5 Sue 294: akitas: 4, cars: 9, goldfish: 3 Sue 295: cars: 2, akitas: 3, perfumes: 7 Sue 296: perfumes: 4, cars: 7, goldfish: 10 Sue 297: trees: 5, akitas: 8, vizslas: 1 Sue 298: perfumes: 0, goldfish: 6, trees: 9 Sue 299: perfumes: 6, samoyeds: 8, cars: 1 Sue 300: goldfish: 10, perfumes: 4, akitas: 2 Sue 301: cars: 3, trees: 0, goldfish: 8 Sue 302: perfumes: 7, samoyeds: 2, vizslas: 7 Sue 303: children: 10, goldfish: 7, perfumes: 2 Sue 304: samoyeds: 8, vizslas: 2, cars: 1 Sue 305: trees: 1, cats: 0, goldfish: 10 Sue 306: trees: 4, perfumes: 2, cars: 7 Sue 307: cars: 6, vizslas: 2, children: 6 Sue 308: vizslas: 2, cars: 0, akitas: 7 Sue 309: cars: 3, vizslas: 8, perfumes: 6 Sue 310: goldfish: 7, perfumes: 7, vizslas: 3 Sue 311: pomeranians: 10, trees: 2, cars: 0 Sue 312: samoyeds: 2, vizslas: 9, akitas: 1 Sue 313: cars: 4, pomeranians: 7, goldfish: 7 Sue 314: akitas: 2, pomeranians: 9, samoyeds: 10 Sue 315: akitas: 3, vizslas: 2, trees: 0 Sue 316: cars: 0, perfumes: 4, pomeranians: 6 Sue 317: akitas: 10, goldfish: 3, pomeranians: 7 Sue 318: cars: 9, trees: 0, pomeranians: 9 Sue 319: akitas: 3, vizslas: 7, children: 10 Sue 320: vizslas: 0, akitas: 8, pomeranians: 4 Sue 321: cars: 10, akitas: 9, vizslas: 3 Sue 322: perfumes: 0, akitas: 8, vizslas: 6 Sue 323: vizslas: 10, perfumes: 5, cars: 3 Sue 324: akitas: 0, goldfish: 6, vizslas: 7 Sue 325: perfumes: 9, vizslas: 5, pomeranians: 2 Sue 326: vizslas: 6, goldfish: 10, pomeranians: 8 Sue 327: vizslas: 10, cars: 1, akitas: 7 Sue 328: trees: 1, perfumes: 10, cars: 10 Sue 329: pomeranians: 5, samoyeds: 3, cars: 10 Sue 330: akitas: 6, cars: 1, pomeranians: 4 Sue 331: cars: 5, children: 2, trees: 0 Sue 332: vizslas: 6, pomeranians: 1, perfumes: 0 Sue 333: akitas: 7, trees: 1, cats: 9 Sue 334: vizslas: 6, goldfish: 9, akitas: 7 Sue 335: akitas: 3, samoyeds: 3, cars: 3 Sue 336: samoyeds: 10, perfumes: 9, trees: 6 Sue 337: vizslas: 2, cars: 9, akitas: 0 Sue 338: akitas: 6, perfumes: 9, vizslas: 3 Sue 339: cars: 3, samoyeds: 8, trees: 2 Sue 340: cats: 7, perfumes: 8, cars: 9 Sue 341: goldfish: 9, perfumes: 5, cars: 10 Sue 342: trees: 0, akitas: 3, perfumes: 5 Sue 343: perfumes: 2, children: 0, cars: 6 Sue 344: goldfish: 8, trees: 8, perfumes: 0 Sue 345: perfumes: 6, cars: 6, goldfish: 5 Sue 346: vizslas: 8, trees: 1, cars: 6 Sue 347: cars: 0, cats: 3, perfumes: 7 Sue 348: children: 7, perfumes: 10, cars: 7 Sue 349: pomeranians: 8, akitas: 5, children: 2 Sue 350: perfumes: 9, pomeranians: 4, goldfish: 3 Sue 351: perfumes: 8, pomeranians: 7, trees: 4 Sue 352: samoyeds: 1, goldfish: 9, akitas: 8 Sue 353: akitas: 6, goldfish: 10, vizslas: 8 Sue 354: akitas: 7, cars: 2, goldfish: 6 Sue 355: cars: 3, goldfish: 6, akitas: 5 Sue 356: akitas: 2, goldfish: 9, pomeranians: 1 Sue 357: goldfish: 10, cars: 6, pomeranians: 9 Sue 358: trees: 0, children: 2, goldfish: 6 Sue 359: samoyeds: 3, cars: 2, akitas: 4 Sue 360: trees: 1, goldfish: 8, cars: 5 Sue 361: akitas: 5, vizslas: 7, perfumes: 1 Sue 362: cats: 5, vizslas: 9, children: 4 Sue 363: goldfish: 9, perfumes: 3, vizslas: 9 Sue 364: children: 7, samoyeds: 2, pomeranians: 10 Sue 365: perfumes: 9, akitas: 10, pomeranians: 4 Sue 366: cars: 10, trees: 3, cats: 4 Sue 367: vizslas: 6, akitas: 10, perfumes: 5 Sue 368: akitas: 9, vizslas: 9, children: 4 Sue 369: goldfish: 8, trees: 2, perfumes: 5 Sue 370: trees: 0, children: 4, cars: 8 Sue 371: cats: 6, perfumes: 0, vizslas: 2 Sue 372: akitas: 7, cars: 5, perfumes: 3 Sue 373: cars: 0, perfumes: 4, pomeranians: 10 Sue 374: akitas: 5, perfumes: 5, vizslas: 2 Sue 375: goldfish: 7, trees: 10, pomeranians: 7 Sue 376: cars: 8, trees: 1, pomeranians: 8 Sue 377: cars: 0, akitas: 9, vizslas: 1 Sue 378: akitas: 5, perfumes: 3, vizslas: 7 Sue 379: trees: 2, goldfish: 8, pomeranians: 8 Sue 380: akitas: 5, cars: 9, perfumes: 9 Sue 381: cars: 2, perfumes: 6, trees: 3 Sue 382: perfumes: 6, vizslas: 2, goldfish: 9 Sue 383: akitas: 8, vizslas: 7, cats: 1 Sue 384: akitas: 9, trees: 10, vizslas: 7 Sue 385: cars: 0, perfumes: 7, vizslas: 2 Sue 386: vizslas: 10, akitas: 4, perfumes: 9 Sue 387: perfumes: 6, pomeranians: 5, samoyeds: 8 Sue 388: vizslas: 10, trees: 9, goldfish: 9 Sue 389: goldfish: 8, akitas: 4, perfumes: 10 Sue 390: goldfish: 6, trees: 8, akitas: 1 Sue 391: vizslas: 4, akitas: 10, goldfish: 7 Sue 392: akitas: 1, vizslas: 6, samoyeds: 5 Sue 393: trees: 6, cars: 3, akitas: 5 Sue 394: goldfish: 9, trees: 3, cars: 5 Sue 395: akitas: 6, samoyeds: 4, goldfish: 4 Sue 396: akitas: 2, trees: 1, cats: 5 Sue 397: cars: 0, children: 9, trees: 10 Sue 398: pomeranians: 3, samoyeds: 9, goldfish: 10 Sue 399: cars: 7, akitas: 4, goldfish: 8 Sue 400: cars: 4, akitas: 5, vizslas: 4 Sue 401: pomeranians: 5, akitas: 8, vizslas: 5 Sue 402: cats: 7, cars: 6, goldfish: 6 Sue 403: samoyeds: 8, perfumes: 4, cars: 5 Sue 404: akitas: 10, goldfish: 4, trees: 2 Sue 405: trees: 8, perfumes: 1, cars: 2 Sue 406: trees: 0, perfumes: 9, pomeranians: 10 Sue 407: perfumes: 4, trees: 7, goldfish: 3 Sue 408: akitas: 1, perfumes: 3, cars: 5 Sue 409: trees: 6, samoyeds: 3, cars: 9 Sue 410: vizslas: 3, goldfish: 5, akitas: 7 Sue 411: goldfish: 10, trees: 1, vizslas: 9 Sue 412: cars: 0, akitas: 6, trees: 6 Sue 413: goldfish: 7, trees: 0, cars: 3 Sue 414: pomeranians: 10, samoyeds: 3, cars: 10 Sue 415: perfumes: 6, trees: 9, cars: 4 Sue 416: trees: 2, cars: 4, goldfish: 8 Sue 417: goldfish: 2, cars: 9, cats: 5 Sue 418: vizslas: 1, cars: 9, akitas: 0 Sue 419: perfumes: 6, cats: 3, children: 9 Sue 420: cats: 5, goldfish: 7, akitas: 9 Sue 421: trees: 1, samoyeds: 6, pomeranians: 1 Sue 422: trees: 10, goldfish: 6, children: 7 Sue 423: cars: 8, goldfish: 7, vizslas: 3 Sue 424: samoyeds: 9, akitas: 7, trees: 5 Sue 425: akitas: 5, children: 4, perfumes: 9 Sue 426: goldfish: 1, children: 9, cats: 2 Sue 427: vizslas: 9, akitas: 7, goldfish: 9 Sue 428: pomeranians: 7, akitas: 5, vizslas: 1 Sue 429: vizslas: 7, goldfish: 7, cars: 9 Sue 430: trees: 7, perfumes: 0, pomeranians: 5 Sue 431: children: 9, perfumes: 5, vizslas: 7 Sue 432: trees: 6, samoyeds: 7, cats: 1 Sue 433: goldfish: 5, trees: 5, children: 6 Sue 434: goldfish: 9, akitas: 7, cars: 3 Sue 435: samoyeds: 10, perfumes: 2, cars: 0 Sue 436: akitas: 5, pomeranians: 4, perfumes: 7 Sue 437: vizslas: 5, cats: 6, perfumes: 5 Sue 438: trees: 2, goldfish: 6, vizslas: 7 Sue 439: samoyeds: 8, pomeranians: 10, goldfish: 1 Sue 440: akitas: 6, children: 9, perfumes: 4 Sue 441: cars: 2, goldfish: 9, children: 0 Sue 442: goldfish: 7, cars: 2, vizslas: 8 Sue 443: goldfish: 6, samoyeds: 3, perfumes: 2 Sue 444: trees: 2, goldfish: 7, cars: 8 Sue 445: trees: 2, pomeranians: 0, children: 0 Sue 446: perfumes: 4, akitas: 4, goldfish: 6 Sue 447: vizslas: 7, akitas: 9, cars: 3 Sue 448: goldfish: 6, trees: 9, cars: 0 Sue 449: samoyeds: 7, perfumes: 4, vizslas: 10 Sue 450: akitas: 7, cars: 10, goldfish: 7 Sue 451: goldfish: 4, children: 7, pomeranians: 4 Sue 452: cats: 4, vizslas: 6, trees: 7 Sue 453: cars: 1, trees: 10, goldfish: 9 Sue 454: trees: 2, goldfish: 3, vizslas: 10 Sue 455: pomeranians: 9, vizslas: 3, akitas: 2 Sue 456: vizslas: 10, akitas: 2, goldfish: 1 Sue 457: trees: 5, cats: 5, children: 8 Sue 458: cars: 6, goldfish: 3, akitas: 9 Sue 459: goldfish: 7, akitas: 2, cats: 7 Sue 460: akitas: 1, cars: 5, children: 8 Sue 461: cars: 8, perfumes: 0, goldfish: 6 Sue 462: pomeranians: 6, cats: 2, perfumes: 6 Sue 463: vizslas: 7, perfumes: 3, goldfish: 3 Sue 464: akitas: 10, goldfish: 10, trees: 1 Sue 465: vizslas: 0, akitas: 2, trees: 2 Sue 466: perfumes: 6, akitas: 8, cars: 2 Sue 467: goldfish: 1, cars: 10, perfumes: 3 Sue 468: goldfish: 4, trees: 2, cars: 9 Sue 469: perfumes: 6, pomeranians: 0, vizslas: 10 Sue 470: samoyeds: 8, children: 0, akitas: 7 Sue 471: children: 3, goldfish: 9, cats: 9 Sue 472: samoyeds: 0, goldfish: 0, trees: 0 Sue 473: trees: 3, goldfish: 4, vizslas: 1 Sue 474: perfumes: 10, cars: 3, trees: 7 Sue 475: akitas: 5, vizslas: 4, goldfish: 5 Sue 476: children: 2, akitas: 7, vizslas: 3 Sue 477: vizslas: 6, pomeranians: 9, trees: 6 Sue 478: vizslas: 7, pomeranians: 6, akitas: 7 Sue 479: trees: 2, perfumes: 2, children: 2 Sue 480: cars: 8, cats: 5, vizslas: 0 Sue 481: trees: 5, goldfish: 0, akitas: 3 Sue 482: cars: 8, perfumes: 6, goldfish: 10 Sue 483: goldfish: 0, cars: 3, perfumes: 10 Sue 484: pomeranians: 1, samoyeds: 1, perfumes: 3 Sue 485: trees: 0, akitas: 2, vizslas: 4 Sue 486: cars: 3, vizslas: 8, goldfish: 1 Sue 487: pomeranians: 9, vizslas: 2, children: 10 Sue 488: akitas: 6, vizslas: 10, perfumes: 9 Sue 489: goldfish: 6, vizslas: 4, cars: 2 Sue 490: vizslas: 10, cats: 8, samoyeds: 1 Sue 491: cats: 9, cars: 1, perfumes: 10 Sue 492: goldfish: 6, cars: 9, pomeranians: 9 Sue 493: children: 10, goldfish: 10, vizslas: 0 Sue 494: pomeranians: 5, cars: 0, vizslas: 0 Sue 495: vizslas: 7, perfumes: 6, samoyeds: 3 Sue 496: trees: 1, cats: 4, cars: 10 Sue 497: cats: 1, perfumes: 0, cars: 7 Sue 498: perfumes: 7, vizslas: 6, cats: 9 Sue 499: vizslas: 8, perfumes: 1, akitas: 3 Sue 500: perfumes: 4, cars: 9, trees: 4'
day16tape = 'children: 3 cats: 7 samoyeds: 2 pomeranians: 3 akitas: 0 vizslas: 0 goldfish: 5 trees: 3 cars: 2 perfumes: 1'
day16gt = 'cats trees'
day16lt = 'pomeranians goldfish'
day17 = [11, 30, 47, 31, 32, 36, 3, 1, 5, 3, 32, 36, 15, 11, 46, 26, 28, 1, 19, 3]
day18 = '''#...##......#......##.##..#...##......##.#.#.###.#.#..#..#......####..#......###.#.#....#..##..###..
####..#.#...#....#.#####.##.##.#..#.......#....#.##...###.###..#.#.#........#..#.#.##...##..#.####.#
...#..##...#.#.###.#.###..#.##.####.###...#...........#.###..##.#.##.#.###...#.#..###....#.###.#..#.
.#...##...####.#..#.....#..#...#.#.##...#...##..#.#.###....#..###.....##..#.###..###.....##..###...#
..##.#####....##..#.#..##.##..######...#..###.######.....#..##...#.#..##..##..#..#..#..##.#.#.#.#...
.###.###.###...##...##..###..##.###.#.....##..##.#.#########...##..##.#..##.#..##..####..#.#.#.#####
#.#####..###.###.##.##.#...#.#.#.#..#.###...#..##.###.#...####.#..#.#.....###..#..####..#.#.#...##..
....#...##.....#....####.##.#.###..#.#.##..#.#...##.###.###..#.##..#.#.##..##..#.##.###..#.#.###.###
##.##...#.##...#.#..#.#..#...###...###.#..#..#.#####..###.#......#.....###.#####.#.#..#.#.#.##..#.#.
#.#..#.....#.....##.#..##...###..##...##...###.#.###.#..#.#.###...##..##..#.###...#.#######.#...#.#.
#.#.....####.#..#.##...#.##....#####.###.#.....#####....###..#........##..####...#...#.###....#..###
##.#.##..#.#.##.#.....##.#.....###.####.#..######.....####.#.#..##.#.##...#..#.#.....#.####.#.......
#..#..#.#..#.######.##..##.####.....##.#.##.#.######..#.#....#.#...#.#..#..#.#.###.#..#.#.#..#...###
####..####.#.#.###.....#.#.#.##..#.##.##.##.#..##..##.#.##.....#.#..#.####.....###.#..#.####.#.#..##
###.##..##.#.##..#..##...#.#####.##.#....##.####.#.##....#..###.#.#.##...#.....#.#.#.#.#..##.#.#..#.
......#..####...##.##...#.##.##...##..#..##.###..#...#..##...#.#....###.####...#.##.###.#.##.####.##
..#...#####.#.#..#.##....#..#...#..####.....###...##.###....#..#.###...#........#.#.##..#..#.#.....#
#######.#.#.###.###..######.##..#####.##.###.###....####.#..##.##...###.#..############.#.##....##.#
#.#...##.###.#.###..#.#.#.#.#.#..##..####.#..##.....#.##..#.##...##.#..##..#.#.#....##....##.#..#.#.
..#.#.####.....###..#######.#.#.#.#...##.#####.....##...##...##.###..######.###..#...####.#..###.###
.#.##....#.#.##..##.#.##.##..######...#.....#..#.#.#.#.....#.#..##.#.#.......#######....#.......#...
..###.##.##..##....#.###...#.....##..##......###...##..###.##...##.###.#.#.#.###.###.#.#...###..#...
.##.#.#...#...##.#.#...#..#..#.#...##.#.##...##..#....#.#..##.#..#.#..#.#.....#..#.#...#######.#.##.
...####....#.###.#..###..##...##..#.#.#.###...#..##.##.##..##.#...#..#.##.....#.#........#..#.#.####
.....##..###...#....#.#.#.#...###.###...#.#...#.#.####....#..####...###..#..######..##.##..###.#####
#####.##..#....###.###....##.....#.#..#....#.#####.##.#.####.#.##...#..###...###..##...#.###.#####..
###.##..........########.######....####.###.#..##...#.##.####.#.....##..#####..###...#####.....#.#.#
##..#####.##.#.#####.#.##.##..#.##....########.#####.#...#.###.##...#.###.#.#..#....##.#..#...#.#.#.
.##.#....#..#...#..#####..#..##.#......#..#....########...#..#...#.....####.#...##...#.###.#.#..##.#
.##.##.#.##.#.##...#.#.#..##.##.###.#..##..#...###.##.###.#####.#.###..#..###.#...#.###.#...#..#.#.#
.#..#..#.#..#..###..#....###.####.##.#.###.#.##.###.#.##.###.###...###...###.#...####...#.##.##.#.#.
###..##...###...#..##.#..#.#...##....###.##.##..#####....###..#..#....#..###.###.#...#.##...#.#.#..#
#....#.......##.....#.##...#..#.###.#.##..##..#.##..#.###..##.##...#####.#..#####..#####..#####....#
.####.####....###..###.#.##.####.##.#...####.#.###.#.....#...####..#####.###..#.#.###.##.##...##..#.
####..##...##.########...##..###..#..###.##.#.#.#........#.#####.#...#.###.####.#..####..#.#.#....##
###.#..#...###.#..#..#.###...##..###.##.#.#...#..#...####..##....#.#..#..##.#.#...#####.###.#..#.#.#
...##....#.###.#.#..##...##.###.#..#..#......#...#.#..####.#.##..######.####.#...#..#..#..##.#.#.##.
##.####.#...#..#.#.##..##.#.#.###..##...####......#..######.#......#.##.#....##...###.#.#..#......##
#.....#...#######.##.#..#.#...###.#..#.####....#.#.##.#.##...###..#...#.###.##..#.###..#.##...#####.
#####.##...#..#.#.#.......#.##..#####..#####...###..##.#.#..###.#.#####.####..#.#..##...#.##...#.###
.##.#..#######.###.#.####.....##...#.##.#.#..#...##....####......######.#..######.....##########.##.
##...#.#..#.##.###.#.#.#.##.###.##..##.##.##...#.#..###.#######..#.....#####..#....######.#..##..###
.#.#.###.....#..##..#.#..##..#.###...###.#..##...#...#.#####.#.#####..###.#..#...##..#.#..#..####...
.#......##..#.....####.###....##.###.....###.##........#.###.##..#..#.#######.#.######..##..###.....
..##.#.#..#.##...#.###.###...######..#..#.#..#....###.#.#....#..........#...##.##.##.#..##..#.#####.
###.###.#..#.##..##.#..#..##.....##.....#..#######.#..#.#.#.####.###..###.#.#..#.##.##.####.###.####
#.#.#..#....########.#..#..#...##..#.##..#.#..##..####...##.....#.##.#.#...########..#.###.#..#.#.##
.##.....#...#.#...##.##....###...##..#.####...#..#.#..#..#.##..#.###.##.####.##..####.....##.#.....#
....####.#.##.#.##.#..##.#.######.##.####..#...####.#..###.#.#..#..##.#.#.....##.#####.#.####...#.#.
#..#####.#####.....##....######..##....#..#.#.###.#####.....##.##.####.#...##...#.##.#.#####.##.#...
##.####..###.#....#...#.#.#.#.###.#####.#.####..####...####......##..#..#..#.#.##...########....#...
.###.#.#.#.#..####.##.#..######..#.#.###.....#.#......#.#.#.#..####.##...##.#####.#.##..##..#..#.#..
.....###...#...#.####.###.#.#.#.#.....#....#.####.###.##.##.##.#######......#.####......#....##.....
##..#..#.#.##..#...#..##.##.##..###.#....##.##....####.#.##.###....#.##.#.#.##...##.###...#..#..####
...#.#..##..##.#...##.##...#.#......#.#.##..###....####.##...#.#.###.#..#..#.####..##..##..#####.###
.##.##..##########.##...#.##.####.#.#######.##.#.##.##..#...##....########.###..##.##.##.#..##.#.#.#
#####.#....#.##..#.....#......##.##..#.##.###..##.......###..##.#.###.##.###....####.#..#.###..#.#.#
.#...#..#.##....##....#...####....#...#..#...####...########.###.#..##.#.#.##..###..#.#.###.....##.#
##..##.....###......#..###.##.####.##.####.#.#....#..#...#..#.#..#.###.#...#...#..##.##...#..#######
.....##..###..##...#####.#.#.....###.#.#..####...#.#.#..#..####..##.#..###.####.#....##..###....#..#
#.#.##.#....#.#####.#....##...#...##...##....#.#.......#....#..#...###.###.#.####..####....#.##.#.#.
..##...##..###.#.#.##.#..#....#.#.....##.###.#.###.###.....#...#.#..#######.#####..#.###...##......#
#......###..#....#.#..#.###.##.#...##..###.####.#.#....#.##..#.###..##.#..#####..##.###.....#..###..
##.#.##..##.###.#..##.....#.##.....###....##.####.######.#...#..###....#.#...#.##.....###....#..#.#.
.##.#.#.#.##..#.#.#..##..#.###.####....#..###.######..####.#.....###.##..#...###.#..######.##.#.##..
...##.####.#..##.#####.##.#...##..#..#...#.#.#.#####...#....#..###...#..#....#.#.##.#.######.#..####
..#.#.#.#...#.######.#.....#..#.#..###....#.#.########...#....#.#.##..#...##...#.#..#.#.###....##...
#####..#..##..#..##..#..#.#.##.#....#####.####.##.#.###..##..##....#.....#.#####.#...#.#####.##.#.#.
#.#..#####...####.###.###.....####.###.....##...##...#..#..#######.#.##....##..####.....##...#..#..#
#.#.###.#.#..##..#....#.#...#.#.##.##..#.##.....##...#.#..##.......##.#.###..#####.#.##....#.##.....
...#.......#....#.#.####.#.###.###..#....#..##.#..####........#.##..#...#.#...###.#..#.#.#...#...#..
...##.#####.##.#.###.##.##.#.##..##.#.#.#.#.#.##.#..##...##.#.#..#..##.##.#####.#.###...#####..#..#.
#######.#..#..#....##.#.#..####.#..#..###...#..#.......###.#.#.####....#.###...#.#.###.#.#.#.#..###.
..##.##.#.##.###....###.##.#.###.#...#....#.####..###..###.#.#..#...##.#.#.#..##.###..###.#.##...###
######..######..##..##.#.#.##.##.#..##..#.#.#.##..#.#...#...#.#.#..######.#..#.#.######..#......##.#
#.#####.....#.......#########..###.##...#...##.#.#..#...#####...#...#..#.###.#..#.#...###.#.#.#...#.
#....##....###...##.##.#...##.........##.#.#..#.#.##.#.######.#####..#..###.###.#...#.#.##.######...
#.#...###.#.###.##.#.######.#######.###.##..#.#.#...######.##.####.##..#.#.#.#......##..##.........#
..###..##....#.....##...#.#.###.#.#.....##.#...###.####.#...#...##..##.#.#.####..###...######....#.#
..###.#.##.####.#..#.##....##..#####....#..##.##.#..#######...#.####...##.#.#.##.........#....#....#
.##.#...#.####..#.#...#.##..######.##..##.#.###.##..###.###....##..#.##.##..##.#...###.##.##.###....
#...###.###.#..#....#.......#..#.....###..#.###.##.##....#.####.#.####.##..##..#..#.....#....##.#.#.
.##.#..#..#.##.......#.####.#######.....#.##.##.#.....#.#..#....######.#..###.##.##.....#.####..##.#
###..#.###.#..####.....##....#..####....#.##.##..#...######.#########...#.#....##...###.#..#.##...#.
#..###..##..#.#.##.###.#.#.##...###.#...##.##..#.###....###..#.#...#.###..######.#..#.###..#..#..#.#
.#........##.#.###..###.#.#.##.....##.##.#.#...##..#.##....###..#.#.#.#.##....#.##..#.#...###...#...
####.####..#....#.#.#..#..##.......##.####...###.##..#.#.##.#..##..######.......##.#.##..#...#.....#
..#..#..###..##.##..######.#..###..###.#.##..##.#..#####.#.#.#.##..#.##..##.##......####.#..........
...##.##..###.#...###....#.#.#.#.....#.##.....##...#...#......####...##.##....##.#..#.####.#..###.#.
..#.....####.#.###.#####..#..###..#..#.#...#####...###.###....#.###..#...#..#..#.#..#.##..##.#.#....
..##.#####...###.###.........#....##.####.##..#.#..#.#...#...##.##.##..#.#.##.########......#####...
...###.#.#..#...#.###.###.......##.###.#..#.##########...#..#.#.#.##.#.###...######..#.#...###.##...
.#.#.#######.#..##.##..##...#...####...#..#####.#..##...###.#.#...#.##...#......#..##.####..#.....##
.##.##.#.#......#######..###.....##.#.##..###......#....####...#.###.#.##.#........#..#....##.....##
#...#.###.#.##...##.####....#...#.###..#.#.....#.#....#.#.#.##...#.#..#####.#.#..#..#..#....#...####
.....##...###......#####..##.##.##...##.#.#####..##...#.#.#.#.###...###.##.####..#.#..#.#..#.####.##
#..#..##.#.##.#.##.#.#.#..###....###.##.#.##.#...#.#..#...#....###.#..#.#.######.#...####..#..##.#.#
#..#.#..#...###.#..##.#...#...##.#......#...#..#..####..##.....#.###...#.#..#.#....#.#####.##.###...
###....#.#..#.#..###..#.##......#...#..#..##.#..###..##..#..#.####..#...########..##.#.##.#.#.#...#.
.#.#.##.##.###..#...#.#....#..#.##..#.#.#.#.##.##.#####...#........####..###..####.#####..#.##.#.##.'''
d19rules='''Al => ThF
Al => ThRnFAr
B => BCa
B => TiB
B => TiRnFAr
Ca => CaCa
Ca => PB
Ca => PRnFAr
Ca => SiRnFYFAr
Ca => SiRnMgAr
Ca => SiTh
F => CaF
F => PMg
F => SiAl
H => CRnAlAr
H => CRnFYFYFAr
H => CRnFYMgAr
H => CRnMgYFAr
H => HCa
H => NRnFYFAr
H => NRnMgAr
H => NTh
H => OB
H => ORnFAr
Mg => BF
Mg => TiMg
N => CRnFAr
N => HSi
O => CRnFYFArF
O => CRnMgAr
O => HP
O => NRnFAr
O => OTi
P => CaP
P => PTi
P => SiRnFAr
Si => CaSi
Th => ThCa
Ti => BP
Ti => TiTi
e => HF
e => NAl
e => OMg'''
d19in = 'CRnSiRnCaPTiMgYCaPTiRnFArSiThFArCaSiThSiThPBCaCaSiRnSiRnTiTiMgArPBCaPMgYPTiRnFArFArCaSiRnBPMgArPRnCaPTiRnFArCaSiThCaCaFArPBCaCaPTiTiRnFArCaSiRnSiAlYSiThRnFArArCaSiRnBFArCaCaSiRnSiThCaCaCaFYCaPTiBCaSiThCaSiThPMgArSiRnCaPBFYCaCaFArCaCaCaCaSiThCaSiRnPRnFArPBSiThPRnFArSiRnMgArCaFYFArCaSiRnSiAlArTiTiTiTiTiTiTiRnPMgArPTiTiTiBSiRnSiAlArTiTiRnPMgArCaFYBPBPTiRnSiRnMgArSiThCaFArCaSiThFArPRnFArCaSiRnTiBSiThSiRnSiAlYCaFArPRnFArSiThCaFArCaCaSiThCaCaCaSiRnPRnCaFArFYPMgArCaPBCaPBSiRnFYPBCaFArCaSiAl'
d20 = 36000000
weapons = '''Dagger 8 4 0
Shortsword 10 5 0
Warhammer 25 6 0
Longsword 40 7 0
Greataxe 74 8 0'''
armor = '''Leather 13 0 1
Chainmail 31 0 2
Splintmail 53 0 3
Bandedmail 75 0 4
Platemail 102 0 5'''
rings = '''Damage +1 25 1 0
Damage +2 50 2 0
Damage +3 100 3 0
Defense +1 20 0 1
Defense +2 40 0 2
Defense +3 80 0 3'''
d21boss = [100, 8, 2]
d23 = '''jio a, +18
inc a
tpl a
inc a
tpl a
tpl a
tpl a
inc a
tpl a
inc a
tpl a
inc a
inc a
tpl a
tpl a
tpl a
inc a
jmp +22
tpl a
inc a
tpl a
inc a
inc a
tpl a
inc a
tpl a
inc a
inc a
tpl a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
jio a, +8
inc b
jie a, +4
tpl a
inc a
jmp +2
hlf a
jmp -7'''
d23r = '''jio a, +22
inc a
tpl a
tpl a
tpl a
inc a
tpl a
inc a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
jmp +19
tpl a
tpl a
tpl a
tpl a
inc a
inc a
tpl a
inc a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
inc a
tpl a
tpl a
jio a, +8
inc b
jie a, +4
tpl a
inc a
jmp +2
hlf a
jmp -7'''
d24 = [1, 2, 3, 5, 7, 13, 17, 19, 23, 29, 31, 37, 41, 43, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113]
d24r = [1, 2, 3, 7, 11, 13, 17, 19, 23, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113]
|
[
"kitty.sham@gmail.com"
] |
kitty.sham@gmail.com
|
10c1266dba82fbfb4b1f0de4e8c97dd2e11690e0
|
786bff0016c63ee71bd77a99d4c04ad3d52f3ff9
|
/little-env/bin/gunicorn
|
e58e32de01413d13d4fbdf4bff5952559034de27
|
[] |
no_license
|
VikrantAgrahari/py-anywhere
|
8341a1e2b9054f0849eb2a41352c4403047c6cfb
|
fdc093250bf3748518c946b321ef9da181aa5b9f
|
refs/heads/master
| 2023-08-15T05:47:11.975728
| 2020-05-08T03:45:21
| 2020-05-08T03:45:21
| 258,689,221
| 0
| 0
| null | 2021-09-22T18:55:41
| 2020-04-25T04:46:33
|
Python
|
UTF-8
|
Python
| false
| false
| 252
|
#!/home/ubuntu/py-anywhere/little-env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"ubuntu@ip-172-31-21-243.ap-southeast-1.compute.internal"
] |
ubuntu@ip-172-31-21-243.ap-southeast-1.compute.internal
|
|
62e41c6d0fd1c22ce6617734e914d6f476063740
|
9ad7bc0be637b629300f4e8a9f316e9e2dc35341
|
/Other/trinket.py
|
e4b6027baa5bb707930a48108310eb3261e07db3
|
[] |
no_license
|
ZachGonsiorowski44/Cloud-9---1
|
0e92fcf7783c9e903727cc4dec97a169bd43065a
|
ca950106722a79ec797662bb9c072c66eef1756e
|
refs/heads/master
| 2020-04-02T07:53:59.970045
| 2018-10-29T20:28:44
| 2018-10-29T20:28:44
| 154,219,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
y = 6
x=63
z = x+y
print(z)
|
[
"zachary.gonsiorowski@student.rjuhsd.us"
] |
zachary.gonsiorowski@student.rjuhsd.us
|
50264f6b8d8c30236c7bda25d40874115f7a362c
|
dcec7bc309c281f2d09d7d1e05270adfe1652586
|
/tests/override_config_table/test_override_config_table.py
|
7bd55f1d0ac5879ffebdd697300a023230d1f9ac
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
keboliu/sonic-mgmt
|
908a43b1beaaff098e60ae8a292fa4b5dd100b0f
|
06efba15557e81f5d840c1bf5ee1ff17da8f4ada
|
refs/heads/master
| 2022-12-08T12:10:17.233047
| 2022-11-30T10:17:31
| 2022-11-30T10:17:31
| 123,398,333
| 0
| 0
|
NOASSERTION
| 2019-11-20T06:26:33
| 2018-03-01T07:24:47
|
Python
|
UTF-8
|
Python
| false
| false
| 5,978
|
py
|
import json
import logging
import pytest
from tests.common.helpers.assertions import pytest_assert
from tests.common.config_reload import config_reload
from tests.common.utilities import skip_release
GOLDEN_CONFIG = "/etc/sonic/golden_config_db.json"
GOLDEN_CONFIG_BACKUP = "/etc/sonic/golden_config_db.json_before_override"
CONFIG_DB = "/etc/sonic/config_db.json"
CONFIG_DB_BACKUP = "/etc/sonic/config_db.json_before_override"
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.disable_loganalyzer,
]
@pytest.fixture(scope="module", autouse=True)
def check_image_version(duthost):
"""Skips this test if the SONiC image installed on DUT is older than 202111
Args:
duthost: DUT host object.
Returns:
None.
"""
skip_release(duthost, ["201811", "201911", "202012", "202106", "202111"])
def file_exists_on_dut(duthost, filename):
return duthost.stat(path=filename).get('stat', {}).get('exists', False)
@pytest.fixture(scope="module")
def golden_config_exists_on_dut(duthost):
return file_exists_on_dut(duthost, GOLDEN_CONFIG)
def backup_config(duthost, config, config_backup):
logger.info("Backup {} to {} on {}".format(
config, config_backup, duthost.hostname))
duthost.shell("cp {} {}".format(config, config_backup))
def restore_config(duthost, config, config_backup):
logger.info("Restore {} with {} on {}".format(
config, config_backup, duthost.hostname))
duthost.shell("mv {} {}".format(config_backup, config))
def get_running_config(duthost):
return json.loads(duthost.shell("sonic-cfggen -d --print-data")['stdout'])
def reload_minigraph_with_golden_config(duthost, json_data):
duthost.copy(content=json.dumps(json_data, indent=4), dest=GOLDEN_CONFIG)
config_reload(duthost, config_source="minigraph", safe_reload=True, override_config=True)
@pytest.fixture(scope="module")
def setup_env(duthost, golden_config_exists_on_dut):
"""
Setup/teardown
Args:
duthost: DUT.
golden_config_exists_on_dut: Check if golden config exists on DUT.
"""
# Backup configDB
backup_config(duthost, CONFIG_DB, CONFIG_DB_BACKUP)
# Backup Golden Config if exists.
if golden_config_exists_on_dut:
backup_config(duthost, GOLDEN_CONFIG, GOLDEN_CONFIG_BACKUP)
# Reload test env with minigraph
config_reload(duthost, config_source="minigraph", safe_reload=True)
running_config = get_running_config(duthost)
yield running_config
# Restore configDB after test.
restore_config(duthost, CONFIG_DB, CONFIG_DB_BACKUP)
# Restore Golden Config after test, else cleanup test file.
if golden_config_exists_on_dut:
restore_config(duthost, GOLDEN_CONFIG, GOLDEN_CONFIG_BACKUP)
else:
duthost.file(path=GOLDEN_CONFIG, state='absent')
# Restore config before test
config_reload(duthost)
def load_minigraph_with_golden_empty_input(duthost):
"""Test Golden Config with empty input
"""
initial_config = get_running_config(duthost)
empty_input = {}
reload_minigraph_with_golden_config(duthost, empty_input)
current_config = get_running_config(duthost)
pytest_assert(initial_config == current_config,
"Running config differs.")
def load_minigraph_with_golden_partial_config(duthost):
"""Test Golden Config with partial config.
Here we assume all config contain SYSLOG_SERVER table
"""
partial_config = {
"SYSLOG_SERVER": {
"10.0.0.100": {},
"10.0.0.200": {}
}
}
reload_minigraph_with_golden_config(duthost, partial_config)
current_config = get_running_config(duthost)
pytest_assert(
current_config['SYSLOG_SERVER'] == partial_config['SYSLOG_SERVER'],
"Partial config override fail: {}".format(current_config['SYSLOG_SERVER'])
)
def load_minigraph_with_golden_new_feature(duthost):
"""Test Golden Config with new feature
"""
new_feature_config = {
"NEW_FEATURE_TABLE": {
"entry": {
"field": "value",
"state": "disabled"
}
}
}
reload_minigraph_with_golden_config(duthost, new_feature_config)
current_config = get_running_config(duthost)
pytest_assert(
'NEW_FEATURE_TABLE' in current_config and
current_config['NEW_FEATURE_TABLE'] == new_feature_config['NEW_FEATURE_TABLE'],
"new feature config update fail: {}".format(current_config['NEW_FEATURE_TABLE'])
)
def load_minigraph_with_golden_full_config(duthost, full_config):
"""Test Golden Config fully override minigraph config
"""
# Test if the config has been override by full_config
reload_minigraph_with_golden_config(duthost, full_config)
current_config = get_running_config(duthost)
for table in full_config:
pytest_assert(
full_config[table] == current_config[table],
"full config override fail! {}".format(table)
)
def load_minigraph_with_golden_empty_table_removal(duthost):
"""Test Golden Config with empty table removal.
Here we assume all config contain SYSLOG_SERVER table
"""
empty_table_removal = {
"SYSLOG_SERVER": {
}
}
reload_minigraph_with_golden_config(duthost, empty_table_removal)
current_config = get_running_config(duthost)
pytest_assert(
current_config.get('SYSLOG_SERVER', None) is None,
"Empty table removal fail: {}".format(current_config)
)
def test_load_minigraph_with_golden_config(duthost, setup_env):
"""Test Golden Config override during load minigraph
"""
load_minigraph_with_golden_empty_input(duthost)
load_minigraph_with_golden_partial_config(duthost)
load_minigraph_with_golden_new_feature(duthost)
full_config = setup_env
load_minigraph_with_golden_full_config(duthost, full_config)
load_minigraph_with_golden_empty_table_removal(duthost)
|
[
"noreply@github.com"
] |
keboliu.noreply@github.com
|
4dd6a1aae29f351c7bd4bedc5f43210d26444a7f
|
7abd69a0c91eaacd93bccafa16733a31f6fb2d2d
|
/exercises_4/task_2/task_2_b.py
|
fcc669a0b2f8e42551152d56338015b645042bc8
|
[] |
no_license
|
mrmattuschka/python-lecture-mobi-17
|
da337adaf94a04f76094af8199592c20a3efc423
|
c76d6e348dcbf82c409b3f3ee508b379f592d10c
|
refs/heads/master
| 2021-05-07T19:13:13.036279
| 2019-04-29T12:46:46
| 2019-04-29T12:46:46
| 108,855,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
# Task 2b: split CpG content file into lower and upper quartile
import sys
import numpy as np
cpg_content_file_path = sys.argv[1]
lower_quartile_file = sys.argv[2]
upper_quartile_file = sys.argv[3]
# Read the promotor-wise CpG content file
with open(cpg_content_file_path, mode='r') as cpg_file:
cpg_data = list(cpg_file.readlines())
cpg_content = [float(line.split('\t')[3]) for line in cpg_data if not line[0] == '#']
# Use numpy's percentile function to get the respectiver percentiles
percentile_25 = np.percentile(cpg_content, 25)
percentile_75 = np.percentile(cpg_content, 75)
# Create list for upper and lower quartile by picking each promotor whose CpG is above/below the percentiles
cpg_lower_quartile = [line for line in cpg_data if float(line.split('\t')[3]) < percentile_25]
cpg_upper_quartile = [line for line in cpg_data if float(line.split('\t')[3]) > percentile_75]
# Create new output files for upper and lower quartile
with open(lower_quartile_file, mode='w') as fobj:
for line in cpg_lower_quartile:
print(line, file=fobj, end='')
with open(upper_quartile_file, mode='w') as fobj:
for line in cpg_upper_quartile:
print(line, file=fobj, end='')
|
[
"alexander.mattausch@embl.de"
] |
alexander.mattausch@embl.de
|
319a2728d4cbe69dbe65c4f19e5ff5c89d8506b6
|
d7649ec1d70dd4f973caf1a2f4b067c6ff6db327
|
/LU_LUP_Simplex/LU_LUP_Simplex.py
|
f759676974cc8c29b50ebd5719958ee120ca5bcd
|
[] |
no_license
|
suprajkolluri/Data-Structures-Python
|
bbcad67dbed652caeae3b2a3044d9e264a0a0fc3
|
b2e6bce07d5593f4ded9aaedae85ee3a41dd4149
|
refs/heads/master
| 2020-07-15T09:07:17.320954
| 2016-08-21T07:00:42
| 2016-08-21T07:00:42
| 66,185,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,566
|
py
|
# -*- coding: utf-8 -*-
# TODO Delete ALL TODO comments after you do them!
import sys
from mprint import printmat
NAME = '[Your Name Here]'
def lu(A):
n = len(A)
LU = [[A[i][j] for j in range(n)] for i in range(n)]
# TODO Implement (psueodocode is in the book)
return LU, 'solved'
def lup(A):
n = len(A)
LU = [[float(A[i][j]) for j in range(n)] for i in range(n)]
pi = range(n)
# TODO Implement (psueodocode is in the book)
return LU, pi, 'solved'
def lupsolve(LU, pi, b):
n = len(b)
x = [float(b[pi[i]]) for i in range(n)]
# TODO Implement (psueodocode is in the book)
return x
def pivot(A, row, col):
m = len(A)
n = len(A[0])
Ahat = [[0]*n for i in range(m)]
# TODO make column match the column of the identity, if possible.
return Ahat
#
# Do not modify below this point
# This is a freeby -- I will make future semesters implement this....
#
def simplex(A):
# Assume that the input is already put into a table.
# Assume that the input is feasible already (no need to check)
m = len(A)
n = len(A[0])
x = [0] * (n - 1)
wiggle = [0] * m
while min(A[0][1:-1]) < 0:
# find entering variable
col = A[0].index(min(A[0][1:-1]))
min_wiggle = float('inf')
row = None
for i in range(1,m):
if A[i][col] > 0:
wiggle = A[i][-1]/float(A[i][col])
if wiggle < min_wiggle:
min_wiggle = wiggle
row = i
if row is None:
return A, x, 'unbounded'
printmat(A, pos=(row, col), row=row, col=col)
A = pivot(A, row, col)
printmat(A, pos=(row, col), row=row, col=col)
# Look for the basic variables and copy into x
for j in range(0, n-1):
colvec = [A[i][j] for i in range(m)] # copy out the column
# Check if colvec is a column of the identity matrix.
if colvec.count(0.0) == m-1 and colvec.count(1.0) == 1:
x[j] = A[colvec.index(1)][-1]
else:
x[j] = 0
return A, x, 'solved'
LU_MATRIX = [[4, -5, 6],
[8, -6, 7],
[12, -7, 12]]
LUP_MATRIX = [[2, 0, 2, 0.6],
[3, 3, 4, -2],
[5, 5, 4, 2],
[-1, -2, 3.4, -1]]
SIMPLEX_MATRIX = [[1, -3, -1, -2, 0, 0, 0, 0],
[0, 1, 1, 3, 1, 0, 0, 30],
[0, 2, 2, 5, 0, 1, 0, 24],
[0, 4, 1, 2, 0, 0, 1, 36]]
LUPSOLVE_P = [2, 0, 1]
LUPSOLVE_MATRIX = [[5.0, 6.0, 3.0],
[0.2, 0.8, -0.6],
[0.6, 0.5, 2.5]]
LUPSOLVE_B = [3.0, 7.0, 8.0]
def check_lu():
A = LU_MATRIX
print "==========================="
print "Submitted by ", NAME
print "LU:"
print "Input:"
printmat(A)
print "Steps:"
LU, result = lu(A)
print "---------------------------"
print "Output:"
printmat(LU)
print 'result = ', result
print "---------------------------"
def check_lup():
A = LUP_MATRIX
print "==========================="
print "Submitted by ", NAME
print "LUP:"
print "---------------------------"
print "Input:"
printmat(A)
print "---------------------------"
print "Steps:"
LUP, p, result = lup(A)
print "---------------------------"
print "Output:"
printmat(LUP, perm=p)
print "---------------------------"
print "result = ", result
print "---------------------------"
def check_lupsolve():
pi = LUPSOLVE_P
LU = LUPSOLVE_MATRIX
b = LUPSOLVE_B
print "==========================="
print "Submitted by ", NAME
print "LUP-SOLVE:"
print "---------------------------"
print "Input:"
printmat(LU, pi)
print "RHS:"
print b
print "---------------------------"
x = lupsolve(LU, pi, b)
print "x = [" + ', '.join(['%3.1f' % z for z in x]) + ']'
print "---------------------------"
def check_simplex():
A = SIMPLEX_MATRIX
print "==========================="
print "Submitted by ", NAME
print "SIMPLEX:"
print "Input:"
printmat(A)
print "Steps:"
SIMP, x, result = simplex(A)
print "---------------------------"
print "Output:"
printmat(SIMP)
print "result = ", result
print " z = ", x[0]
print " x = ", x[1:]
print "---------------------------"
def check_all():
check_lu()
check_lup()
check_simplex()
check_lupsolve()
if __name__ == '__main__':
USE_COLORS = True
check_all()
|
[
"supcena@gmail.com"
] |
supcena@gmail.com
|
afa93c05589e4b0fa9ea61510c995d3ebcd0af95
|
54dcff764a6d4cdbd75cbe1d7020c786e24e80ff
|
/backend/cart/migrations/0002_auto_20210722_1710.py
|
bfaf932ddc06a5ab76bac64e031ae82f31935a56
|
[
"MIT"
] |
permissive
|
RifkiEr24/La-Virtuele
|
426649210cf7fa151c6b8aa845b393a985185d2f
|
0c770a4101b951603e80a0784354e2835725da4d
|
refs/heads/main
| 2023-07-08T22:45:07.212089
| 2021-08-14T05:15:01
| 2021-08-14T05:15:01
| 341,459,330
| 1
| 1
|
MIT
| 2021-08-14T05:15:02
| 2021-02-23T07:00:08
|
Python
|
UTF-8
|
Python
| false
| false
| 785
|
py
|
# Generated by Django 3.1.7 on 2021-07-22 17:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cart', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='productcart',
old_name='checked_out',
new_name='selected',
),
migrations.RemoveField(
model_name='cart',
name='products',
),
migrations.AddField(
model_name='productcart',
name='cart',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='product_cart', to='cart.cart'),
preserve_default=False,
),
]
|
[
"munawarhariz@gmail.com"
] |
munawarhariz@gmail.com
|
f16301f7289411ef2b100853fd451c4fdeb56d90
|
4a06144733285c614db2fff7744bd7d2b6a213d7
|
/ITP115_L8_Andrews_Dylan.py
|
c4556cbd3ac2f1e2b7b4a0a7f35d7f9391fd6071
|
[] |
no_license
|
dandrews19/CoinFlipperAverage
|
a11f592760882609a2bc0fd69678c92c0f21265d
|
0da49a5c0b7c81e2fdd42abc8bb85e7c3226f55c
|
refs/heads/main
| 2023-02-02T18:49:07.733569
| 2020-12-22T17:09:11
| 2020-12-22T17:09:11
| 323,687,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
# Dylan Andrews, dmandrew@usc.edu
# ITP 115, Fall 2020
# Lab 8
import random
# function flips a coin and determines it based on random number
def coin():
num = random.randrange(0,2)
if num == 0:
return "heads"
elif num == 1:
return "tails"
# function counts number of flips it takes to get three heads in a row
def experiment():
counter = 0
heads = 0
while heads < 3:
flip = coin()
if flip == "heads":
counter += 1
heads += 1
else:
counter += 1
heads = 0
return counter
# main runs the experiment 10 times
def main():
i = 0
sum = 0
while i < 10:
flips = experiment()
sum += flips
i += 1
print("The average for 3 heads in a row is:", str(sum/10))
main()
|
[
"noreply@github.com"
] |
dandrews19.noreply@github.com
|
de976350cb51fb36805bbbd5d8ff565050fe4c1d
|
c5f96ef33d31fb75c016f1477ff9482e4fcf9298
|
/elev_diff.py
|
22bc8949bcff1bd4e82df6e6e5e1362483734b5b
|
[
"MIT"
] |
permissive
|
Aidan-Bharath/code_and_stuffs
|
c2e6b86d89b934a165ebf29b0ad5d926b5dbd0e1
|
daa6e4056ebdb25860c3757adf51ba19fbd9da33
|
refs/heads/master
| 2021-04-22T13:28:06.801982
| 2014-05-27T19:45:42
| 2014-05-27T19:45:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,312
|
py
|
import numpy as np
import pandas as pd
from multiprocessing import Pool
from matplotlib import pyplot as plt
def load_panel(a):
a = pd.read_pickle(a)
return a
def time_index(a):
a = a.reindex(index=a.index.to_datetime())
return a
def submean(a):
b = a.mean()
a = a.sub(b)
return a
def resamp(a):
a = a.resample('1T')
return a
if __name__=="__main__":
'''
This calculates the differences between elevations from Pandas Series data
and plots the results.
'''
# Directory of the Elevation Series.
f0 = '/home/aidan/thesis/probe_data/panels/2013/fri_frames/june-july/BPb_el_f.01'
f1 = '/home/aidan/thesis/probe_data/panels/2013/fri_frames/june-july/BPb_el_f.0125'
f2 = '/home/aidan/thesis/probe_data/panels/2013/fri_frames/june-july/BPb_el_f.015'
f3 = '/home/aidan/thesis/probe_data/panels/2013/fri_frames/june-july/BPb_el_f.02'
f4 = '/home/aidan/thesis/probe_data/panels/2013/fri_frames/june-july/BPb_el_f.025'
f5 = '/home/aidan/thesis/probe_data/panels/2013/fri_frames/june-july/BPb_el_f.05'
fa = '/home/aidan/thesis/probe_data/panels/2013/fri_frames/june-july/BPb_el_adcp'
# Create a list of directories, needed for Pool().
f = [f0,f1,f2,f3,f4,f5,fa]
# Load Series
f = Pool().map(load_panel,f)
# Reindex the Series and convert date index to datetime objects. Not
# necessary if the index is already a datetime object. This is slow.
#f = Pool().map(time_index,f)
# Subtract the mean elevations from each dataset.
f = Pool().map(submean,f)
# Resample the raw data to mean data over a set time interval.
f = Pool().map(resamp,f)
# Rename data columns so they can be distinguished when joined together.
f0 = f[0].rename(columns={'FVCOM_el':'f0.01'})
f1 = f[1].rename(columns={'FVCOM_el':'f0.0125'})
f2 = f[2].rename(columns={'FVCOM_el':'f0.015'})
f3 = f[3].rename(columns={'FVCOM_el':'f0.02'})
f4 = f[4].rename(columns={'FVCOM_el':'f0.025'})
f5 = f[5].rename(columns={'FVCOM_el':'f0.05'})
f = [f0,f1,f2,f3,f4,f5,f[6]]
print f[6]
# Combine the Series into a Dataframe and then subtract the lowest friction
# value elevations from the others. The joining may not be necessary here
# and the subtract could be done directly from the previous step.
joined = pd.concat(f,axis=1)
j0 = np.abs(joined['f0.01'].sub(joined['f0.01']))
j1 = np.abs(joined['f0.0125'].sub(joined['f0.01']))
j2 = np.abs(joined['f0.015'].sub(joined['f0.01']))
j3 = np.abs(joined['f0.02'].sub(joined['f0.01']))
j4 = np.abs(joined['f0.025'].sub(joined['f0.01']))
j5 = np.abs(joined['f0.05'].sub(joined['f0.01']))
j = [j0,j1,j2,j3,j4,j5]
# Join the results from the subtraction into a dataframe. Joining the
# results back up makes plotting easier.
joined = pd.concat(j,axis=1)
# Name the columns for plot legend.
joined = joined.rename(columns={0:'f0.01',1:'f0.125',2:'f0.015',3:'f0.02',4:'f0.025',5:'f0.05'})
# Resample data to ten minute averages for plotting.
joined = joined.resample('10T')
joined = joined[j0.index[0]:j0.index[-1]]
# Plot the difference data.
plt.figure()
plt.rc('font',size='22')
joined.plot()
plt.ylabel('Elevation Difference (m)')
plt.show()
|
[
"aidanbharath@gmail.com"
] |
aidanbharath@gmail.com
|
4441818af298bbd24b2d5faa87a018fb8b876cc6
|
5aca09907f96cbae5ceb2f56d56ceffb789ecfa8
|
/nevermore/lightning/gradnorm.py
|
ad38b8060083c6d435940293e40f3d011ead89b8
|
[
"Apache-2.0"
] |
permissive
|
shadowy000/nevermore
|
84fb9d751f6ee82fb18ecb137e2e14b038a3b6b4
|
b46bc957dee283a02a19d1c6e6cb2d7b5eea86ca
|
refs/heads/main
| 2023-07-19T08:29:38.170685
| 2021-08-30T03:49:46
| 2021-08-30T03:49:46
| 397,844,969
| 0
| 0
|
Apache-2.0
| 2021-08-19T06:47:59
| 2021-08-19T06:47:59
| null |
UTF-8
|
Python
| false
| false
| 14,123
|
py
|
import os
import hydra
import pytorch_lightning as pl
import torch
import torchmetrics
import logging
from easydict import EasyDict as edict
from omegaconf import DictConfig
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.nn import functional as F
from torch.utils.data import DataLoader
from nevermore.dataset import NUM_CLASSES, NYUv2Dateset
from nevermore.metric import Abs_CosineSimilarity
from nevermore.model import SegNet
from nevermore.layers import GradLoss
logger = logging.getLogger(__name__)
class DataModule(pl.LightningDataModule):
def __init__(
self,
data_root=None,
batch_size=24,
input_size=None,
output_size=None
):
super().__init__()
self.data_root = data_root
self.train_list_file = os.path.join(data_root, "train.txt")
self.val_list_file = os.path.join(data_root, "val.txt")
self.img_dir = os.path.join(data_root, "images")
self.mask_dir = os.path.join(data_root, "segmentation")
self.depth_dir = os.path.join(data_root, "depths")
self.normal_dir = os.path.join(data_root, "normals")
self.batch_size = batch_size
self.input_size = input_size
self.output_size = output_size
# self.transform = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ])
def prepare_data(self):
pass
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == 'fit' or stage is None:
self.train_dataset = NYUv2Dateset(
list_file=self.train_list_file,
img_dir=os.path.join(self.img_dir, "train"),
mask_dir=os.path.join(self.mask_dir, "train"),
depth_dir=os.path.join(self.depth_dir, "train"),
normal_dir=os.path.join(self.normal_dir, "train"),
input_size=self.input_size,
output_size=self.output_size
)
self.val_dataset = NYUv2Dateset(
list_file=self.val_list_file,
img_dir=os.path.join(self.img_dir, "test"),
mask_dir=os.path.join(self.mask_dir, "test"),
depth_dir=os.path.join(self.depth_dir, "test"),
normal_dir=os.path.join(self.normal_dir, "test"),
input_size=self.input_size,
output_size=self.output_size
)
# Assign test dataset for use in dataloader(s)
if stage == 'test' or stage is None:
self.test_dataset = NYUv2Dateset(
list_file=self.val_list_file,
img_dir=os.path.join(self.img_dir, "test"),
mask_dir=os.path.join(self.mask_dir, "test"),
depth_dir=os.path.join(self.depth_dir, "test"),
normal_dir=os.path.join(self.normal_dir, "test"),
input_size=self.input_size,
output_size=self.output_size
)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=4
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=4
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=4
)
#########
# MODEL #
#########
class Model(pl.LightningModule):
def __init__(
self,
learning_rate,
task,
n_task,
alpha,
use_gradnorm
):
super().__init__()
self.save_hyperparameters()
self.segnet = SegNet(
input_channels=3,
seg_output_channels=NUM_CLASSES,
dep_output_channels=1,
nor_output_channels=3
)
allowed_task = ("segmentation", "depth", "normal", "multitask")
if task not in allowed_task:
raise ValueError(
f"Expected argument `tsak` to be one of "
f"{allowed_task} but got {task}"
)
self.task = task
self.n_task = n_task
self.alpha = alpha
self.gradloss = GradLoss(alpha=self.alpha,n_task=self.n_task)
self.miou = torchmetrics.IoU(
num_classes=NUM_CLASSES, ignore_index=0
)
self.rmse = torchmetrics.MeanSquaredError(squared=False)
self.cos = Abs_CosineSimilarity(reduction='abs')
self.use_gradnorm = use_gradnorm
def forward(self, x):
return self.segnet.forward(x)
def on_train_start(self):
if self.use_gradnorm:
self.initial_losses = torch.tensor([1,1,1]).cuda()
pass
def training_step(self, batch, batch_idx, optimizer_idx):
x = batch['image']
y_seg_hat, y_dep_hat, y_nor_hat, _ = self(x)
if self.task == 'multitask' or self.task == 'segmentation':
y_seg = batch['mask']
loss_seg = F.cross_entropy(y_seg_hat, y_seg)
if self.task == 'multitask' or self.task == 'depth':
y_dep = batch['depth']
y_dep_hat = y_dep_hat.squeeze()
loss_dep = F.mse_loss(y_dep_hat, y_dep)
if self.task == 'multitask' or self.task == 'normal':
y_nor = batch['normal'].flatten(start_dim=1)
y_nor_hat = y_nor_hat.flatten(
start_dim=1
)
loss_nor = torch.mean(F.cosine_similarity(y_nor_hat, y_nor))
if self.task == 'multitask':
if self.use_gradnorm and optimizer_idx == 1:
loss = self.gradloss.forward([loss_seg, loss_dep, loss_nor])
else:
loss = loss_seg + loss_dep + loss_nor
self.log('train_loss', loss)
self.log('train_loss_seg', loss_seg, prog_bar=True)
self.log('train_loss_dep', loss_dep, prog_bar=True)
self.log('train_loss_nor', loss_nor, prog_bar=True)
elif self.task == 'segmentation':
loss = loss_seg
self.log('train_loss', loss)
elif self.task == 'depth':
loss = loss_dep
self.log('train_loss', loss)
elif self.task == 'normal':
loss = loss_nor
self.log('train_loss', loss)
# gradnorm
if self.use_gradnorm:
# if self.segnet.weights.grad:
# self.segnet.weights.grad.data = self.segnet.weights.grad.data * 0.0
# get the gradient norms for each of the tasks
norms = []
W = self.segnet.decoder_convtr_01
gygw_seg = torch.autograd.grad(loss_seg, W.parameters(), retain_graph=True)
norms.append(torch.norm(torch.mul(self.gradloss.weights[0], gygw_seg[0])))
gygw_dep = torch.autograd.grad(loss_dep, W.parameters(), retain_graph=True)
norms.append(torch.norm(torch.mul(self.gradloss.weights[1], gygw_dep[0])))
gygw_nor = torch.autograd.grad(loss_nor, W.parameters(), retain_graph=True)
norms.append(torch.norm(torch.mul(self.gradloss.weights[2], gygw_nor[0])))
norms = torch.stack(norms)
# compute the inverse training rate r_i(t)
task_losses = torch.stack((loss_seg.clone().detach(),loss_dep.clone().detach(),loss_nor.clone().detach()))
loss_ratio = task_losses / self.initial_losses
inverse_train_rate = loss_ratio / torch.mean(loss_ratio)
# compute the mean norm \tilde{G}_w(t)
mean_norm = torch.mean(norms.clone().detach())
# compute the GradNorm loss
# this term has to remain constant
# constant_term = torch.tensor(mean_norm * (inverse_train_rate ** self.alpha), requires_grad=False)
constant_term = (mean_norm * (inverse_train_rate ** self.gradloss.alpha)).clone().detach().requires_grad_(False)
# this is the GradNorm loss itself
self.grad_norm_loss = torch.sum(torch.abs(norms - constant_term))
# compute the gradient for the weights
# self.weights_temp = torch.autograd.grad(grad_norm_loss, self.gradloss.weights)[0]
return loss
def backward(self, loss, optimizer, optimizer_idx):
if self.use_gradnorm:
if optimizer_idx == 0:
loss.backward()
if self.gradloss.weights.grad and optimizer_idx == 1:
self.gradloss.weights.grad.data = self.gradloss.weights.grad.data * 0.0
self.gradloss.weights.grad = torch.autograd.grad(self.grad_norm_loss, self.gradloss.weights)[0]
# print("grad:",self.gradloss.weights.grad)
else:
loss.backward()
# if self.use_gradnorm:
# self.weights.grad = self.weights_temp
# pass
def training_epoch_end(self, training_step_outputs):
print(self.trainer.lr_schedulers[0]['scheduler'].get_lr())
# print(self.trainer.lr_schedulers[1]['scheduler'].get_lr())
# print(self.gradloss.weights)
for out in training_step_outputs:
pass
def validation_step(self, batch, batch_idx):
x = batch['image']
y_seg_hat, y_dep_hat, y_nor_hat, _ = self(x)
if self.task == 'multitask' or self.task == 'segmentation':
y_seg = batch['mask']
loss_seg = F.cross_entropy(y_seg_hat, y_seg)
if self.task == 'multitask' or self.task == 'depth':
y_dep = batch['depth']
y_dep_hat = y_dep_hat.squeeze()
loss_dep = F.mse_loss(y_dep_hat, y_dep)
if self.task == 'multitask' or self.task == 'normal':
y_nor = batch['normal'].flatten(start_dim=1)
y_nor_hat = y_nor_hat.flatten(
start_dim=1
)
loss_nor = torch.mean(F.cosine_similarity(y_nor_hat, y_nor))
if self.task == 'multitask':
loss = loss_seg + loss_dep + loss_nor
self.log('val_loss', loss)
self.log('val_seg_iou_step', self.miou(y_seg_hat, y_seg))
self.log('val_dep_rmse_step', self.rmse(y_dep_hat, y_dep))
self.log('val_dep_cos_step', self.cos(y_nor_hat, y_nor))
elif self.task == 'segmentation':
loss = loss_seg
self.log('val_loss', loss)
self.log('val_seg_iou_step', self.miou(y_seg_hat, y_seg))
elif self.task == 'depth':
loss = loss_dep
self.log('val_loss', loss)
self.log('val_dep_rmse_step', self.rmse(y_dep_hat, y_dep))
elif self.task == 'normal':
loss = loss_nor
self.log('val_loss', loss)
self.log('val_dep_cos_step', self.cos(y_nor_hat, y_nor))
def validation_epoch_end(self, validation_step_outputs):
if self.task == 'segmentation' or self.task == 'multitask':
val_miou = self.miou.compute()
self.log('val_seg_iou', val_miou)
logger.info("val_seg_iou:", val_miou)
self.miou.reset()
if self.task == 'depth' or self.task == 'multitask':
val_rmse = self.rmse.compute()
self.log('val_dep_mse', val_rmse)
logger.info("val_dep_mse:", val_rmse)
self.rmse.reset()
if self.task == 'normal' or self.task == 'multitask':
val_cos = self.cos.compute()
self.log('val_nor_cos', val_cos)
logger.info("val_nor_cos:", val_cos)
self.cos.reset()
def test_step(self, batch, batch_idx):
x = batch['image']
y_seg_hat, y_dep_hat, y_nor_hat, _ = self(x)
pass
def configure_optimizers(self):
# optimizer = torch.optim.Adam(
# [
# {'params': self.segnet.parameters()},
# {'params': self.gradloss.parameters(), 'lr': 0.025}
# ]
# , lr=self.hparams.learning_rate
# )
optimizer_segnet = torch.optim.Adam(
self.segnet.parameters(), lr=2e-5
)
optimizer_gradloss = torch.optim.Adam(
self.gradloss.parameters(), lr=0.025
)
# lr_lambda = lambda epoch: 0.2 ** (
# epoch // 1
# ) if epoch > 1 else 1
# lr_schedule = torch.optim.lr_scheduler.LambdaLR(
# optimizer, lr_lambda, last_epoch=-1
# )
# lr_schedule = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.2)
lr_schedule_segnet = torch.optim.lr_scheduler.StepLR(optimizer_segnet, step_size=3, gamma=0.2)
lr_schedule_gradloss = torch.optim.lr_scheduler.StepLR(optimizer_gradloss, step_size=3, gamma=0.2)
optim_dict = ({'optimizer': optimizer_segnet, 'lr_scheduler': lr_schedule_segnet},
{'optimizer': optimizer_gradloss, 'lr_scheduler': lr_schedule_gradloss})
# optim_dict = {'optimizer': optimizer, 'lr_scheduler': lr_schedule}
# if self.task == 'multitask':
# return optimizer
# else:
return optim_dict
def main():
pl.seed_everything(3462)
INPUT_SIZE = (320,320)
OUTPUT_SIZE = (320,320)
if os.path.exists('/running_package'):
# run in remote, not local
data_root = "/cluster_home/custom_data/NYU"
save_dir ="/job_data"
else:
data_root ="/data/dixiao.wei/NYU"
save_dir ="/data/NYU/output"
dm = DataModule(
data_root=data_root,
batch_size=24,
input_size=INPUT_SIZE,
output_size=OUTPUT_SIZE
)
model = Model(
learning_rate=2e-5,
task='multitask',
n_task=3,
alpha=1.5,
use_gradnorm=True
)
trainer = pl.Trainer(
max_epochs=1540,
gpus=[0],
check_val_every_n_epoch=10,
accelerator="ddp",
log_every_n_steps=5,
num_sanity_val_steps=0,
precision=16
)
trainer.fit(model, dm)
pass
main()
|
[
"dixiao.wei@horizon.ai"
] |
dixiao.wei@horizon.ai
|
b7a4d4b92f34336c0dd2f52ba49bf8a6aa6ae0ff
|
aa892311407cb82530593f81832655face18edc5
|
/src/eval.py
|
5fff7fd0145b311684b5c4d69f9c5efd33c75ff7
|
[] |
no_license
|
shakti365/ResNet
|
2006fa32db455dbce56e3574fd892740d699af73
|
19f7e74735c5a8b5479a653dfa0918c1e6eb09fc
|
refs/heads/master
| 2022-12-08T19:35:50.032584
| 2020-08-23T06:42:44
| 2020-08-23T06:42:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,944
|
py
|
import torch
import torchvision
import torchvision.transforms as transforms
from absl import app
from absl import flags
from absl import logging
from sklearn.metrics import classification_report
from resnet import Net
torch.manual_seed(0)
FLAGS = flags.FLAGS
flags.DEFINE_string("data_path", "../data", "Path to store dataset")
flags.DEFINE_boolean("debug", False, "Runs in debug mode")
flags.DEFINE_integer("batch_size", 256, "Batch size")
flags.DEFINE_string("model_path", "../model/model_2.pth", "model path")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
classes = ['plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def download_test_data(data_path, transform=None):
"""
Downloads the CIFAR10 dataset to data_path.
Doesn't download if it already exists.
"""
# Get the CIFAR 10 data
testset = torchvision.datasets.CIFAR10(root=data_path, train=False,
transform=transform, download=True)
logging.debug(testset)
testloader = torch.utils.data.DataLoader(testset,
batch_size=FLAGS.batch_size,
shuffle=False, num_workers=1)
return testloader
def get_transform():
"""
conver into torch tensor
"""
transform = transforms.Compose([
transforms.ToTensor()
])
return transform
def accuracy(true,pred):
acc = (true == pred.argmax(-1)).float().detach().cpu().numpy()
return float(100 * acc.sum() / float(len(acc)))
def main(argv):
if FLAGS.debug:
logging.set_verbosity(logging.DEBUG)
else:
logging.set_verbosity(logging.INFO)
# Transform and Load the dataset
transform = get_transform()
testloader = download_test_data(FLAGS.data_path, transform)
# Get the model
model = Net()
checkpoint = torch.load(FLAGS.model_path)
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
if torch.cuda.is_available():
model = model.to(device)
total_acc = []
all_targets = []
all_pred = []
for i, data in enumerate(testloader):
# Take the inputs and labels
inputs, labels = data
all_targets.extend(labels.detach().numpy())
if torch.cuda.is_available():
inputs = inputs.to(device)
labels = labels.to(device)
with torch.no_grad():
outputs = model(inputs)
all_pred.extend(outputs.argmax(-1).detach().cpu().numpy())
acc_batch = accuracy(labels, outputs)
total_acc.append(acc_batch)
logging.info(f"Batch Accuracy: {acc_batch}")
avg_acc = sum(total_acc) / float(len(total_acc))
logging.info(f"Average Accuracy: {avg_acc}")
logging.info(classification_report(all_targets, all_pred,target_names=classes))
if __name__ == "__main__":
app.run(main)
|
[
"apekshapriya@gmail.com"
] |
apekshapriya@gmail.com
|
aa724d48a5eb10ad16015f27edb10999ffd61ea6
|
4125bad7406f9f44a5a83101cee4b81142c8de73
|
/paypro/paypro/doctype/salary_structure/test_salary_structure.py
|
180adbce1a2b3653db5baa148175ddf44a649a0b
|
[
"MIT"
] |
permissive
|
lightningmaharaja/payPRO
|
1eb3e271864d3d4b4c2473b61f65aac5c1ad39fe
|
15389ce24bd3b5825c65f91ad2f85a9a29342e5f
|
refs/heads/master
| 2023-03-15T14:18:27.097526
| 2020-11-09T10:35:31
| 2020-11-09T10:35:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Teampro and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestSalaryStructure(unittest.TestCase):
pass
|
[
"subash13579@gmail.com"
] |
subash13579@gmail.com
|
6c628eb392d664905091177ae7f0bdeba3c0f7c5
|
88ce88b6bd1094b36d6cde6e6f0468505874944b
|
/auth_management/apps.py
|
1722a794da53f82f3660eed1c2d91254b91a9e3b
|
[] |
no_license
|
ncadet-dev/spotify_app
|
7577c88cca8e62399ee608b0741e97f9edeed820
|
d96abf6e89794146844aa61339c8b4fe82af4e47
|
refs/heads/main
| 2023-08-06T11:19:52.067745
| 2021-10-06T15:05:23
| 2021-10-06T15:05:23
| 413,426,433
| 0
| 0
| null | 2021-10-05T09:07:02
| 2021-10-04T13:10:06
|
Python
|
UTF-8
|
Python
| false
| false
| 161
|
py
|
from django.apps import AppConfig
class AuthManagementConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'auth_management'
|
[
"c.nicolas334@gmail.com"
] |
c.nicolas334@gmail.com
|
2c3aa8da6fc5378639a403f5268a7692b3f85ed1
|
d96bc27d8d8143caf67a580cf669424c00cc4c16
|
/snippets/for-loop-sample.py
|
4a211a23af47845b7045f627d8ec576dd4c6cb2a
|
[
"MIT"
] |
permissive
|
coshkun/6.00.1x-MITx-Course-Training-Lab-Notes
|
1896dec4281eb0363ef2559a13a40952077096aa
|
63e755dc81fd50a7b1372074a4a73e50021a233b
|
refs/heads/master
| 2021-01-09T05:51:08.556277
| 2017-03-14T18:18:47
| 2017-03-14T18:19:16
| 80,845,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 118
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 11 19:39:13 2017
@author: coskun
"""
for n in range(5):
print(n)
|
[
"cskncnr@gmail.com"
] |
cskncnr@gmail.com
|
482253149dc58e3f0050fd0a45769c27e33d6d64
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/_REPO/MICROSOFT/computervision-recipes/utils_cv/tracking/references/fairmot/tracking_utils/log.py
|
e09c21e15029fef2f0eaa73fd3a0e9dc749ffd75
|
[
"BSD-3-Clause",
"LGPL-2.1-or-later",
"Apache-2.0",
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
import logging
def get_logger(name="root"):
formatter = logging.Formatter(
# fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s')
fmt="%(asctime)s [%(levelname)s]: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.WARNING) # EDITED
logger.addHandler(handler)
return logger
logger = get_logger("root")
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
ff5a41f53fea89f20b9ec01b5c556b2d0454c9b8
|
244e254aa1ede0337f5d5796781648da6c7dbc78
|
/apps/core/forms.py
|
6be261051d524db52168ed07a5a08f047d2444e6
|
[] |
no_license
|
huyhoang1996vn/custom_oscar
|
fb4968fb7c68a4651ddd55ea1fdf4962a73f4a0a
|
2f4efc3923428bcfddbe9aff6743f865e5eec125
|
refs/heads/master
| 2021-09-25T00:01:50.653334
| 2018-10-16T02:26:29
| 2018-10-16T02:26:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
from django.forms import ModelForm
from models import *
class PlanForm(ModelForm):
class Meta:
model = Payment_Plan
fields = ['name', 'description', 'price']
|
[
"huyhoang1996ha@gmail.com"
] |
huyhoang1996ha@gmail.com
|
ab24475ba292de13d6bd961ecf71301c9347a353
|
6e4ea04d30fec41f96736c9df34712c9407c7c08
|
/src/train.py
|
59fb208bf67b6fe057b9a90d1ff24cb42e26f9e7
|
[] |
no_license
|
sohj94/dl_study
|
b4e4ee44437487943fc6b83f05ec7f8cbd669091
|
46be7c4424ae0c79f92cc8375be7f39645ba34c8
|
refs/heads/master
| 2023-05-30T09:42:29.577469
| 2021-05-12T13:55:52
| 2021-05-12T13:55:52
| 366,718,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,236
|
py
|
import os, sys
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import torch
import argparse
from torch.utils.data import DataLoader
from torch import nn, optim
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import pandas as pd
from dataset_builder import load_data_set
from trainer import Trainer
# import model
from model.hw1_model import hw1_model
parser = argparse.ArgumentParser()
parser.add_argument('--data', dest='data', default="cifar-10")
parser.add_argument('--result_dir', dest='result_dir', default="../data/result/hw1_result_cifar-10.csv")
parser.add_argument('--model_dir', dest='model_dir', default="../data/temp/")
parser.add_argument('--epochs', dest='epochs', type=int, default=20)
parser.add_argument('--learning_rate', dest='learning_rate', type=float, default=0.001)
parser.add_argument('--wd', dest='wd', type=float, default=1e-5)
parser.add_argument('--batch_size', dest='batch_size', type=int, default=64)
parser.add_argument('--train', dest='train', action='store_false', default=True)
parser.add_argument('--continue_train', dest='continue_train', action='store_true', default=False)
parser.add_argument('--load_epoch', dest='load_epoch', type=int, default=29)
args = parser.parse_args()
# torch 초기 설정
use_cuda = torch.cuda.is_available()
torch.manual_seed(1)
device = torch.device("cuda:0" if use_cuda else "cpu")
kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
print("set vars and device done")
# 경로 생성
if not os.path.isdir(args.model_dir) :
os.makedirs(args.model_dir)
# writer = SummaryWriter('runs/alexnet')
# # Dataset, Dataloader 정의
train_dataset, test_dataset = load_data_set(args.data)
train_data = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
test_data = DataLoader(test_dataset, batch_size=1, shuffle=False, **kwargs)
accuracies = []
for width in range(10,151,10) :
tmp_accuracy = []
for depth in range(3,16) :
model = hw1_model(input_size = torch.numel(train_dataset[0][0]), width = width, depth = depth)
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.wd)
# set trainer
trainer = Trainer(model, criterion, optimizer, args)
#train
if args.train:
if args.continue_train :
# last_epoch = int(os.listdir(args.model_dir)[-1].split('epoch_')[1][:3])
last_epoch = 30
model.load_state_dict(torch.load(args.model_dir + args.data + "_width_{0:03}_depth_{1:03}.pth".format(width, depth)))
# 그 다음 epoch부터 학습 시작
trainer.fit(train_data, last_epoch+1)
else :
trainer.fit(train_data)
else:
model.load_state_dict(torch.load(args.model_dir + args.data + "_width_{0:03}_depth_{1:03}.pth".format(width, depth)))
torch.save(model.state_dict(), args.model_dir + args.data + "_width_{0:03}_depth_{1:03}.pth".format(width, depth))
accuracy = trainer.test(test_data)
print("accuracy of model with width {} depth {}: {}".format(width, depth, accuracy))
tmp_accuracy.append(accuracy)
accuracies.append(tmp_accuracy)
accuracies = np.array(accuracies)
print(accuracies)
result = pd.DataFrame(accuracies)
result.to_csv(args.result_dir, index = False)
|
[
"sohj94@gmail.com"
] |
sohj94@gmail.com
|
149aefe6e9472c86db11aa0650275067f15c22b2
|
d7d26c42cd541417edcd7b1992027286ecef7f04
|
/application/dataworld/collegeentranceexam/datacleaner/app_dataset_construction.py
|
e7501a699e4267ca7e71e6a73bd185d640e3d1c3
|
[] |
no_license
|
plutoese/pluto_archive
|
bfba8df48ee5639a2666b33432004519b93ecbf7
|
e6ea64aaf867fd0433714293eb65a18a28d3136d
|
refs/heads/master
| 2021-10-22T14:46:20.540770
| 2019-03-11T12:31:08
| 2019-03-11T12:31:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,844
|
py
|
# coding = UTF-8
import re
import pysal
from pymongo import ASCENDING
import pandas as pd
from lib.base.database.class_mongodb import MongoDB, MonCollection
from application.dataworld.admindivision.class_admindivision import AdminDivision
# 1. 数据库连接
mongo = MongoDB(conn_str='localhost:27017')
college_info_con = MonCollection(mongo, database='webdata', collection_name='college_info').collection
entrance_score_con = MonCollection(mongo, database='webdata', collection_name='gaokao_entrancescore').collection
# 2. 步骤参数设置
# a. 导出每年的高考分数数据
IS_EXPORT_RAW_EXAM_SCORE = False
# b. 导出高校信息数据
IS_EXPORT_RAW_COLLEGE_INFO = False
# c. 2011-2013年面板数据
IS_MERGE_INTO_PANEL = False
# d. 合并高校信息数据
IS_MERGE_COLLEGE_INFO = False
# e. 合并大学排名信息
IS_MERGE_COLLEGE_RATE = False
# f. 合并省级经济信息
IS_MERGE_PROVINCE_PERGDP = False
# g. 合并地级市信息
TEMP1 = False
TEMP2 = False
IS_MERGE_CITY_STAT = False
# h. 合并大学创立的年份
IS_MERGE_START_YEAR = False
# i. 添加本地和附近高校的虚拟变量
IS_ADD_LOCAL_VAR = False
IS_ADD_NEARBY_VAR = False
# j. 添加本地的人均实际GDP信息
IS_ADD_LOCAL_PERGDP = True
if IS_EXPORT_RAW_EXAM_SCORE:
for year in range(2010, 2018):
found = entrance_score_con.find({'年份':year, 'type':'文科', "录取批次" : "第一批"},
sort=[('regioncode',ASCENDING),('university',ASCENDING)])
raw_dataframe = pd.DataFrame(list(found))
raw_dataframe.to_excel(r'E:\cyberspace\worklot\college\dataset\raw\{}年高考文科第一批录取分数横截面数据.xlsx'.format(str(year)))
found = entrance_score_con.find({'年份': year, 'type': '文科', "录取批次": "第二批"},
sort=[('regioncode', ASCENDING), ('university', ASCENDING)])
raw_dataframe = pd.DataFrame(list(found))
raw_dataframe.to_excel(r'E:\cyberspace\worklot\college\dataset\raw\{}年高考文科第二批录取分数横截面数据.xlsx'.format(str(year)))
found = entrance_score_con.find({'年份': year, 'type': '理科', "录取批次" : "第一批"},
sort=[('regioncode', ASCENDING), ('university', ASCENDING)])
raw_dataframe = pd.DataFrame(list(found))
raw_dataframe.to_excel(r'E:\cyberspace\worklot\college\dataset\raw\{}年高考理科第一批录取分数横截面数据.xlsx'.format(str(year)))
found = entrance_score_con.find({'年份': year, 'type': '理科', "录取批次": "第二批"},
sort=[('regioncode', ASCENDING), ('university', ASCENDING)])
raw_dataframe = pd.DataFrame(list(found))
raw_dataframe.to_excel(r'E:\cyberspace\worklot\college\dataset\raw\{}年高考理科第二批录取分数横截面数据.xlsx'.format(str(year)))
if IS_EXPORT_RAW_COLLEGE_INFO:
found = college_info_con.find(sort=[('高校所在地行政代码',ASCENDING)])
raw_dataframe = pd.DataFrame(list(found))
raw_dataframe.to_excel(r'E:\cyberspace\worklot\college\dataset\raw\高校信息数据.xlsx')
if IS_MERGE_INTO_PANEL:
# 2011-2013理科第一批录取分数面板数据
exam_score_science_first_2011 = pd.read_excel(r'E:\cyberspace\worklot\college\dataset\process\2011年高考理科第一批录取分数横截面数据.xlsx')
exam_score_science_first_2012 = pd.read_excel(r'E:\cyberspace\worklot\college\dataset\process\2012年高考理科第一批录取分数横截面数据.xlsx')
exam_score_science_first_2013 = pd.read_excel(r'E:\cyberspace\worklot\college\dataset\process\2013年高考理科第一批录取分数横截面数据.xlsx')
pdataframe_science_first = pd.concat([exam_score_science_first_2011,
exam_score_science_first_2012,
exam_score_science_first_2013])
pdataframe_science_first.to_excel(r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据.xlsx')
# 2011-2013理科第一批录取分数面板数据
exam_score_science_second_2011 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011年高考理科第二批录取分数横截面数据.xlsx')
exam_score_science_second_2012 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2012年高考理科第二批录取分数横截面数据.xlsx')
exam_score_science_second_2013 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2013年高考理科第二批录取分数横截面数据.xlsx')
pdataframe_science_second = pd.concat([exam_score_science_second_2011,
exam_score_science_second_2012,
exam_score_science_second_2013])
pdataframe_science_second.to_excel(r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据.xlsx')
# 2011-2013文科第一批录取分数面板数据
exam_score_art_first_2011 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011年高考文科第一批录取分数横截面数据.xlsx')
exam_score_art_first_2012 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2012年高考文科第一批录取分数横截面数据.xlsx')
exam_score_art_first_2013 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2013年高考文科第一批录取分数横截面数据.xlsx')
pdataframe_art_first = pd.concat([exam_score_art_first_2011,
exam_score_art_first_2012,
exam_score_art_first_2013])
pdataframe_art_first.to_excel(r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据.xlsx')
# 2011-2013文科第二批录取分数面板数据
exam_score_art_second_2011 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011年高考文科第二批录取分数横截面数据.xlsx')
exam_score_art_second_2012 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2012年高考文科第二批录取分数横截面数据.xlsx')
exam_score_art_second_2013 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2013年高考文科第二批录取分数横截面数据.xlsx')
pdataframe_art_second = pd.concat([exam_score_art_second_2011,
exam_score_art_second_2012,
exam_score_art_second_2013])
pdataframe_art_second.to_excel(r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据.xlsx')
if IS_MERGE_COLLEGE_INFO:
university_info = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\高校信息第一次处理数据.xlsx')
pdataframe_science_first = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据.xlsx')
pdataframe_science_first_merged_info = pd.merge(pdataframe_science_first, university_info, how='left', on='university')
pdataframe_science_first_merged_info.to_excel(r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_添加大学信息.xlsx')
pdataframe_science_second = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据.xlsx')
pdataframe_science_second_merged_info = pd.merge(pdataframe_science_second, university_info, how='left',
on='university')
pdataframe_science_second_merged_info.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_添加大学信息.xlsx')
pdataframe_art_first = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据.xlsx')
pdataframe_art_first_merged_info = pd.merge(pdataframe_art_first, university_info, how='left',
on='university')
pdataframe_art_first_merged_info.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_添加大学信息.xlsx')
pdataframe_art_second = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据.xlsx')
pdataframe_art_second_merged_info = pd.merge(pdataframe_art_second, university_info, how='left',
on='university')
pdataframe_art_second_merged_info.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_添加大学信息.xlsx')
if IS_MERGE_COLLEGE_RATE:
university_rate_2011 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\raw\2011年校友会大学排名_v2.xlsx')
university_rate_2012 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\raw\2012年校友会大学排名_v2.xlsx')
university_rate_2013 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\raw\2013年校友会大学排名_v2.xlsx')
university_rate = pd.concat([university_rate_2011,
university_rate_2012,
university_rate_2013])
pdataframe_science_first_v1 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_添加大学信息.xlsx')
pdataframe_science_first_v1_merged_rate = pd.merge(pdataframe_science_first_v1, university_rate, how='left',
on=['university','年份'])
pdataframe_science_first_v1_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v2.xlsx')
pdataframe_science_second_v1 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_添加大学信息.xlsx')
pdataframe_science_second_v1_merged_rate = pd.merge(pdataframe_science_second_v1, university_rate, how='left',
on=['university', '年份'])
pdataframe_science_second_v1_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v2.xlsx')
pdataframe_art_first_v1 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_添加大学信息.xlsx')
pdataframe_art_first_v1_merged_rate = pd.merge(pdataframe_art_first_v1, university_rate, how='left',
on=['university', '年份'])
pdataframe_art_first_v1_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v2.xlsx')
pdataframe_art_second_v1 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_添加大学信息.xlsx')
pdataframe_art_second_v1_merged_rate = pd.merge(pdataframe_art_second_v1, university_rate, how='left',
on=['university', '年份'])
pdataframe_art_second_v1_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v2.xlsx')
if IS_MERGE_PROVINCE_PERGDP:
province_real_perGDP = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\raw\province_real_perGDP.xlsx')
pdataframe_science_first_v2 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v2.xlsx')
pdataframe_science_first_v2_merged_rate = pd.merge(pdataframe_science_first_v2, province_real_perGDP, how='left',
on=['高校所在地行政代码', '年份'])
pdataframe_science_first_v2_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v3.xlsx')
pdataframe_science_second_v2 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v2.xlsx')
pdataframe_science_second_v2_merged_rate = pd.merge(pdataframe_science_second_v2, province_real_perGDP, how='left',
on=['高校所在地行政代码', '年份'])
pdataframe_science_second_v2_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v3.xlsx')
pdataframe_art_first_v2 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v2.xlsx')
pdataframe_art_first_v2_merged_rate = pd.merge(pdataframe_art_first_v2, province_real_perGDP, how='left',
on=['高校所在地行政代码', '年份'])
pdataframe_art_first_v2_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v3.xlsx')
pdataframe_art_second_v2 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v2.xlsx')
pdataframe_art_second_v2_merged_rate = pd.merge(pdataframe_art_second_v2, province_real_perGDP, how='left',
on=['高校所在地行政代码', '年份'])
pdataframe_art_second_v2_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v3.xlsx')
if TEMP1:
city_raw_stat = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\raw\中国城市统计年鉴GDP数据.xlsx')
city_raw_stat['人均GDP2011'] = city_raw_stat['地区生产总值2011'].div(city_raw_stat['年末总人口2011'])
city_raw_stat['GDP2012'] = city_raw_stat['地区生产总值2011'].mul(1+city_raw_stat['地区生产总值增长率2012']/100)
city_raw_stat['人均GDP2012'] = city_raw_stat['GDP2012'].div(city_raw_stat['年末总人口2012'])
city_raw_stat['GDP2013'] = city_raw_stat['GDP2012'].mul(1 + city_raw_stat['地区生产总值增长率2013'] / 100)
city_raw_stat['人均GDP2013'] = city_raw_stat['GDP2013'].div(city_raw_stat['年末总人口2013'])
city_raw_stat.to_excel(
r'E:\cyberspace\worklot\college\dataset\raw\中国城市统计年鉴真实GDP数据.xlsx')
if TEMP2:
city_info = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\raw\colleges_with_city.xlsx')
adivision = AdminDivision(year='2012')
cities = list(city_info['city'])
city_code = []
for city in cities:
result = adivision[city]
city_code.append(result['acode'].values[0])
city_info['city_code'] = city_code
city_info.to_excel(
r'E:\cyberspace\worklot\college\dataset\raw\大学所在的地级城市.xlsx')
if IS_MERGE_CITY_STAT:
city_info = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\raw\大学所在的地级城市.xlsx')
city_stat = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\raw\中国城市统计数据v1.xlsx')
pdataframe_science_first_v3 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v3.xlsx')
pdataframe_science_first_v3_merged_rate = pd.merge(pdataframe_science_first_v3, city_info, how='left',
on='university')
pdataframe_science_first_v4_merged_rate = pd.merge(pdataframe_science_first_v3_merged_rate, city_stat, how='left',
on=['city_code','年份'])
pdataframe_science_first_v3_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v4.xlsx')
pdataframe_science_first_v4_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v5.xlsx')
pdataframe_science_second_v3 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v3.xlsx')
pdataframe_science_second_v3_merged_rate = pd.merge(pdataframe_science_second_v3, city_info, how='left',
on='university')
pdataframe_science_second_v4_merged_rate = pd.merge(pdataframe_science_second_v3_merged_rate, city_stat, how='left',
on=['city_code', '年份'])
pdataframe_science_second_v3_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v4.xlsx')
pdataframe_science_second_v4_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v5.xlsx')
pdataframe_art_first_v3 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v3.xlsx')
pdataframe_art_first_v3_merged_rate = pd.merge(pdataframe_art_first_v3, city_info, how='left',
on='university')
pdataframe_art_first_v4_merged_rate = pd.merge(pdataframe_art_first_v3_merged_rate, city_stat, how='left',
on=['city_code', '年份'])
pdataframe_art_first_v3_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v4.xlsx')
pdataframe_art_first_v4_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v5.xlsx')
pdataframe_art_second_v3 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v3.xlsx')
pdataframe_art_second_v3_merged_rate = pd.merge(pdataframe_art_second_v3, city_info, how='left',
on='university')
pdataframe_art_second_v4_merged_rate = pd.merge(pdataframe_art_second_v3_merged_rate, city_stat, how='left',
on=['city_code', '年份'])
pdataframe_art_second_v3_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v4.xlsx')
pdataframe_art_second_v4_merged_rate.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v5.xlsx')
if IS_MERGE_START_YEAR:
college_start_year = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\raw\colleges_start_date.xlsx')
pdataframe_science_first_v5 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v5.xlsx')
pdataframe_science_first_v5_merged_start_year = pd.merge(pdataframe_science_first_v5, college_start_year,
how='left',
on='university')
pdataframe_science_first_v5_merged_start_year.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v6.xlsx')
pdataframe_science_second_v5 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v5.xlsx')
pdataframe_science_second_v5_merged_start_year = pd.merge(pdataframe_science_second_v5, college_start_year,
how='left',
on='university')
pdataframe_science_second_v5_merged_start_year.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v6.xlsx')
pdataframe_art_first_v5 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v5.xlsx')
pdataframe_art_first_v5_merged_start_year = pd.merge(pdataframe_art_first_v5, college_start_year,
how='left',
on='university')
pdataframe_art_first_v5_merged_start_year.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v6.xlsx')
pdataframe_art_second_v5 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v5.xlsx')
pdataframe_art_second_v5_merged_start_year = pd.merge(pdataframe_art_second_v5, college_start_year,
how='left',
on='university')
pdataframe_art_second_v5_merged_start_year.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v6.xlsx')
if IS_ADD_LOCAL_VAR:
pdataframe_science_first_v6 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v6.xlsx')
pdataframe_science_first_v6['local'] = 0
pdataframe_science_first_v6.loc[pdataframe_science_first_v6['regioncode'].eq(pdataframe_science_first_v6['高校所在地行政代码']), 'local'] = 1
pdataframe_science_first_v6.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v7.xlsx')
pdataframe_science_second_v6 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v6.xlsx')
pdataframe_science_second_v6['local'] = 0
pdataframe_science_second_v6.loc[
pdataframe_science_second_v6['regioncode'].eq(pdataframe_science_second_v6['高校所在地行政代码']), 'local'] = 1
pdataframe_science_second_v6.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v7.xlsx')
pdataframe_art_first_v6 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v6.xlsx')
pdataframe_art_first_v6['local'] = 0
pdataframe_art_first_v6.loc[
pdataframe_art_first_v6['regioncode'].eq(pdataframe_art_first_v6['高校所在地行政代码']), 'local'] = 1
pdataframe_art_first_v6.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v7.xlsx')
pdataframe_art_second_v6 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v6.xlsx')
pdataframe_art_second_v6['local'] = 0
pdataframe_art_second_v6.loc[
pdataframe_art_second_v6['regioncode'].eq(pdataframe_art_second_v6['高校所在地行政代码']), 'local'] = 1
pdataframe_art_second_v6.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v7.xlsx')
if IS_ADD_NEARBY_VAR:
stata_txt = pysal.open(r'E:\cyberspace\worklot\college\dataset\raw\province2004W.txt', 'r',
'stata_text')
w = stata_txt.read()
stata_txt.close()
neighbors = dict()
for key in w.neighbors:
neighbors[key] = [item for item in w.neighbors[key]]
neighbors[460000] = [440000]
pdataframe_science_first_v7 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v7.xlsx')
pdataframe_science_first_v7['nearby'] = 0
for ind in pdataframe_science_first_v7.index:
exam_region = pdataframe_science_first_v7.loc[ind,'regioncode']
college_region = pdataframe_science_first_v7.loc[ind,'高校所在地行政代码']
if college_region in neighbors[exam_region]:
pdataframe_science_first_v7.loc[ind, 'nearby'] = 1
pdataframe_science_first_v7.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v8.xlsx')
pdataframe_science_second_v7 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v7.xlsx')
pdataframe_science_second_v7['nearby'] = 0
for ind in pdataframe_science_second_v7.index:
exam_region = pdataframe_science_second_v7.loc[ind, 'regioncode']
college_region = pdataframe_science_second_v7.loc[ind, '高校所在地行政代码']
if college_region in neighbors[exam_region]:
pdataframe_science_second_v7.loc[ind, 'nearby'] = 1
pdataframe_science_second_v7.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v8.xlsx')
pdataframe_art_first_v7 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v7.xlsx')
pdataframe_art_first_v7['nearby'] = 0
for ind in pdataframe_art_first_v7.index:
exam_region = pdataframe_art_first_v7.loc[ind, 'regioncode']
college_region = pdataframe_art_first_v7.loc[ind, '高校所在地行政代码']
if college_region in neighbors[exam_region]:
pdataframe_art_first_v7.loc[ind, 'nearby'] = 1
pdataframe_art_first_v7.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v8.xlsx')
pdataframe_art_second_v7 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v7.xlsx')
pdataframe_art_second_v7['nearby'] = 0
for ind in pdataframe_art_second_v7.index:
exam_region = pdataframe_art_second_v7.loc[ind, 'regioncode']
college_region = pdataframe_art_second_v7.loc[ind, '高校所在地行政代码']
if college_region in neighbors[exam_region]:
pdataframe_art_second_v7.loc[ind, 'nearby'] = 1
pdataframe_art_second_v7.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v8.xlsx')
if IS_ADD_LOCAL_PERGDP:
local_province_real_perGDP = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\raw\province_real_perGDP2.xlsx')
pdataframe_science_first_v8 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v8.xlsx')
pdataframe_science_first_v8_add_local_PERGDP = pd.merge(pdataframe_science_first_v8, local_province_real_perGDP, how='left',
on=['regioncode', '年份'])
pdataframe_science_first_v8_add_local_PERGDP.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第一批录取分数面板数据_v9.xlsx')
pdataframe_science_second_v8 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v8.xlsx')
pdataframe_science_second_v8_add_local_PERGDP = pd.merge(pdataframe_science_second_v8, local_province_real_perGDP,
how='left',
on=['regioncode', '年份'])
pdataframe_science_second_v8_add_local_PERGDP.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考理科第二批录取分数面板数据_v9.xlsx')
pdataframe_art_first_v8 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v8.xlsx')
pdataframe_art_first_v8_add_local_PERGDP = pd.merge(pdataframe_art_first_v8, local_province_real_perGDP,
how='left',
on=['regioncode', '年份'])
pdataframe_art_first_v8_add_local_PERGDP.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第一批录取分数面板数据_v9.xlsx')
pdataframe_art_second_v8 = pd.read_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v8.xlsx')
pdataframe_art_second_v8_add_local_PERGDP = pd.merge(pdataframe_art_second_v8, local_province_real_perGDP,
how='left',
on=['regioncode', '年份'])
pdataframe_art_second_v8_add_local_PERGDP.to_excel(
r'E:\cyberspace\worklot\college\dataset\process\2011-2013高考文科第二批录取分数面板数据_v9.xlsx')
|
[
"glen.zhang7@gmail.com"
] |
glen.zhang7@gmail.com
|
73c0269f5cbc46a8c2badeb9fe65e2342886f8cf
|
394deeb967bee4609f92c2dcb0cddb3cca5b639c
|
/tf_agents/bandits/policies/lin_ucb_policy.py
|
eb3a7ccc8db9640149e358470b888213f92cdef3
|
[
"Apache-2.0"
] |
permissive
|
tagomatech/agents
|
8234e286576d287c4f2cdabe461f342d2fe1cda4
|
4a624694f6ef05b8c92805decc23732e625ff9af
|
refs/heads/master
| 2020-05-30T18:04:29.062082
| 2019-11-11T20:53:32
| 2019-11-11T20:53:32
| 189,889,368
| 0
| 0
|
Apache-2.0
| 2019-11-10T14:02:41
| 2019-06-02T19:58:43
|
Python
|
UTF-8
|
Python
| false
| false
| 10,586
|
py
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linear UCB Policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.bandits.policies import linalg
from tf_agents.bandits.policies import policy_utilities
from tf_agents.policies import tf_policy
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
tfd = tfp.distributions
PolicyInfo = collections.namedtuple('PolicyInfo', # pylint: disable=invalid-name
(policy_step.CommonFields.LOG_PROBABILITY,
'predicted_rewards'))
PolicyInfo.__new__.__defaults__ = ((),) * len(PolicyInfo._fields)
class LinearUCBPolicy(tf_policy.Base):
"""Linear UCB Policy.
Implements the Linear UCB Policy from the following paper:
"A Contextual Bandit Approach to Personalized News Article Recommendation",
Lihong Li, Wei Chu, John Langford, Robert Schapire, WWW 2010.
"""
def __init__(self,
action_spec,
cov_matrix,
data_vector,
num_samples,
time_step_spec=None,
alpha=1.0,
eig_vals=(),
eig_matrix=(),
tikhonov_weight=1.0,
expose_predicted_rewards=False,
emit_log_probability=False,
observation_and_action_constraint_splitter=None,
name=None):
"""Initializes `LinUCBPolicy`.
The `a` and `b` arguments may be either `Tensor`s or `tf.Variable`s.
If they are variables, then any assignements to those variables will be
reflected in the output of the policy.
Args:
action_spec: `TensorSpec` containing action specification.
cov_matrix: list of the covariance matrices A in the paper. There exists
one A matrix per arm.
data_vector: list of the b vectors in the paper. The b vector is a
weighted sum of the observations, where the weight is the corresponding
reward. Each arm has its own vector b.
num_samples: list of number of samples per arm.
time_step_spec: A `TimeStep` spec of the expected time_steps.
alpha: a float value used to scale the confidence intervals.
eig_vals: list of eigenvalues for each covariance matrix (one per arm).
eig_matrix: list of eigenvectors for each covariance matrix (one per arm).
tikhonov_weight: (float) tikhonov regularization term.
expose_predicted_rewards: (bool) Whether to expose the predicted rewards
in the policy info field under the name 'predicted_rewards'.
emit_log_probability: Whether to emit log probabilities.
observation_and_action_constraint_splitter: A function used for masking
valid/invalid actions with each state of the environment. The function
takes in a full observation and returns a tuple consisting of 1) the
part of the observation intended as input to the bandit policy and 2)
the mask. The mask should be a 0-1 `Tensor` of shape
`[batch_size, num_actions]`. This function should also work with a
`TensorSpec` as input, and should output `TensorSpec` objects for the
observation and mask.
name: The name of this policy.
"""
if not isinstance(cov_matrix, (list, tuple)):
raise ValueError('cov_matrix must be a list of matrices (Tensors).')
self._cov_matrix = cov_matrix
if not isinstance(data_vector, (list, tuple)):
raise ValueError('data_vector must be a list of vectors (Tensors).')
self._data_vector = data_vector
if not isinstance(num_samples, (list, tuple)):
raise ValueError('num_samples must be a list of vectors (Tensors).')
self._num_samples = num_samples
if not isinstance(eig_vals, (list, tuple)):
raise ValueError('eig_vals must be a list of vectors (Tensors).')
self._eig_vals = eig_vals
if not isinstance(eig_matrix, (list, tuple)):
raise ValueError('eig_matrix must be a list of vectors (Tensors).')
self._eig_matrix = eig_matrix
self._alpha = alpha
self._use_eigendecomp = False
if eig_matrix:
self._use_eigendecomp = True
self._tikhonov_weight = tikhonov_weight
if len(cov_matrix) != len(data_vector):
raise ValueError('The size of list cov_matrix must match the size of '
'list data_vector. Got {} for cov_matrix and {} '
'for data_vector'.format(
len(self._cov_matrix), len((data_vector))))
if len(num_samples) != len(cov_matrix):
raise ValueError('The size of num_samples must match the size of '
'list cov_matrix. Got {} for num_samples and {} '
'for cov_matrix'.format(
len(self._num_samples), len((cov_matrix))))
if tf.nest.is_nested(action_spec):
raise ValueError('Nested `action_spec` is not supported.')
self._num_actions = action_spec.maximum + 1
if self._num_actions != len(cov_matrix):
raise ValueError(
'The number of elements in `cov_matrix` ({}) must match '
'the number of actions derived from `action_spec` ({}).'.format(
len(cov_matrix), self._num_actions))
if observation_and_action_constraint_splitter is not None:
context_shape = observation_and_action_constraint_splitter(
time_step_spec.observation)[0].shape.as_list()
else:
context_shape = time_step_spec.observation.shape.as_list()
self._context_dim = (
tf.compat.dimension_value(context_shape[0]) if context_shape else 1)
cov_matrix_dim = tf.compat.dimension_value(cov_matrix[0].shape[0])
if self._context_dim != cov_matrix_dim:
raise ValueError('The dimension of matrix `cov_matrix` must match '
'context dimension {}.'
'Got {} for `cov_matrix`.'.format(
self._context_dim, cov_matrix_dim))
data_vector_dim = tf.compat.dimension_value(data_vector[0].shape[0])
if self._context_dim != data_vector_dim:
raise ValueError('The dimension of vector `data_vector` must match '
'context dimension {}. '
'Got {} for `data_vector`.'.format(
self._context_dim, data_vector_dim))
self._dtype = self._data_vector[0].dtype
self._expose_predicted_rewards = expose_predicted_rewards
if expose_predicted_rewards:
info_spec = PolicyInfo(
predicted_rewards=tensor_spec.TensorSpec(
[self._num_actions], dtype=self._dtype))
else:
info_spec = ()
super(LinearUCBPolicy, self).__init__(
time_step_spec=time_step_spec,
action_spec=action_spec,
info_spec=info_spec,
emit_log_probability=emit_log_probability,
observation_and_action_constraint_splitter=(
observation_and_action_constraint_splitter),
name=name)
def _variables(self):
all_vars = (self._cov_matrix + self._data_vector + self._num_samples +
list(self._eig_matrix) + list(self._eig_vals))
return [v for v in all_vars if isinstance(v, tf.Variable)]
def _distribution(self, time_step, policy_state):
observation = time_step.observation
observation_and_action_constraint_splitter = (
self.observation_and_action_constraint_splitter)
if observation_and_action_constraint_splitter is not None:
observation, mask = observation_and_action_constraint_splitter(
observation)
# Check the shape of the observation matrix. The observations can be
# batched.
if not observation.shape.is_compatible_with([None, self._context_dim]):
raise ValueError('Observation shape is expected to be {}. Got {}.'.format(
[None, self._context_dim], observation.shape.as_list()))
observation = tf.reshape(observation, [-1, self._context_dim])
observation = tf.cast(observation, dtype=self._dtype)
p_values = []
est_rewards = []
for k in range(self._num_actions):
if self._use_eigendecomp:
q_t_b = tf.matmul(
self._eig_matrix[k],
tf.linalg.matrix_transpose(observation),
transpose_a=True)
lambda_inv = tf.divide(
tf.ones_like(self._eig_vals[k]),
self._eig_vals[k] + self._tikhonov_weight)
a_inv_x = tf.matmul(
self._eig_matrix[k], tf.einsum('j,jk->jk', lambda_inv, q_t_b))
else:
a_inv_x = linalg.conjugate_gradient_solve(
self._cov_matrix[k] +
self._tikhonov_weight * tf.eye(self._context_dim),
tf.linalg.matrix_transpose(observation))
est_mean_reward = tf.einsum('j,jk->k', self._data_vector[k], a_inv_x)
est_rewards.append(est_mean_reward)
ci = tf.reshape(
tf.linalg.tensor_diag_part(tf.matmul(observation, a_inv_x)),
[-1, 1])
p_values.append(
tf.reshape(est_mean_reward, [-1, 1]) + self._alpha * tf.sqrt(ci))
# Keeping the batch dimension during the squeeze, even if batch_size == 1.
optimistic_reward_estimates = tf.squeeze(
tf.stack(p_values, axis=-1), axis=[1])
if observation_and_action_constraint_splitter is not None:
chosen_actions = policy_utilities.masked_argmax(
optimistic_reward_estimates,
mask,
output_type=self._action_spec.dtype)
else:
chosen_actions = tf.argmax(
optimistic_reward_estimates,
axis=-1,
output_type=self._action_spec.dtype)
action_distributions = tfp.distributions.Deterministic(loc=chosen_actions)
if self._expose_predicted_rewards:
policy_info = PolicyInfo(
predicted_rewards=tf.stack(est_rewards, axis=-1))
else:
policy_info = ()
return policy_step.PolicyStep(
action_distributions, policy_state, policy_info)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
4f917d5c34fdf3ab06bc5065b47c2f5a2c5a1d88
|
066f031b8c9fcfd825c99a199e8d8692a3eead46
|
/Completed/Day01/part02.py
|
a8d682f0c77412294410acf41a0cb9d0fde85ba8
|
[] |
no_license
|
VonGriff/AdventofCode-15
|
55c9500e4994e4ea8a8a5366dadf862bed1fe157
|
8509c9e420ae9a7094eabfef3fcc002d703dc5dd
|
refs/heads/master
| 2020-06-19T08:37:57.647695
| 2016-10-12T12:08:17
| 2016-10-12T12:08:17
| 68,147,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
def read(file):
level = 0
pos = 0
text = open(file, 'r')
while (True):
pos += 1
c = text.read(1)
if c == '(':
level += 1
elif c == ')':
level -= 1
if level == -1:
return pos
print(read("input.txt"))
|
[
"autumndiscord@gmail.com"
] |
autumndiscord@gmail.com
|
bc3fb50cd23946e054b837d284f6a00a6320bad7
|
49dc88dbabab84e53496a058daf9e30987fd46a7
|
/sketches/py2b/python_classes/controllers.py
|
e3a13196d499a734280036f0151dc0a261d0f7c7
|
[
"MIT"
] |
permissive
|
sauloal/arduino
|
4f97f6e4d9822dc1516c395f7e140c061d6c319f
|
e8acbf53b5fb23be02c168dc70ee897f345f4c76
|
refs/heads/master
| 2020-05-28T07:43:02.545545
| 2013-07-29T19:40:27
| 2013-07-29T19:40:27
| 3,607,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,667
|
py
|
##########################################
###### PYTHON 2B #########################
##########################################
# #
# MARCO SANGALLI marco.sangalli@ovas.it #
# ALEX RIGAMONTI alex.rigamonti@ovas.it #
# #
##########################################
from ser import *
from threading import Thread
from portManager import *
import time
import math
#############################################
class pwmTest(Thread):
def __init__(self,s,pin,resolution=10):
Thread.__init__(self)
#resolution
self.wait=1.0/float(resolution)
#
self.pwmport=outPort(s,pin)
#cnt
self.cnt=0.0
self.maxpwm=255.0
#start
self.start()
def run(self):
while 1:
self.cnt+=0.05
#pass
val=abs(int(math.sin(self.cnt)*self.maxpwm))
#print "%s=========>" % (val)
#10 values at time
self.pwmport.setValue(val)
time.sleep(self.wait)
################################################
class analogPort(object):
def __init__(self,s,pinIn,pinOut):
self.portIn=inPort(s,pinIn,self.onData)
self.portOut=outPort(s,pinOut)
def onData(self,val):
#@@print "A%s" % val
self.portOut.setValue(val)
################################################
class digitalPort(object):
def __init__(self,s,pinIn,pinOut):
self.portIn=inPort(s,pinIn,self.onData)
self.portOut=outPort(s,pinOut)
def onData(self,val):
#@@print "D%s" % val
self.portOut.setValue(val)
#start serial manager####################
if __name__ == "__main__":
s=ser()
#DIGITAL
dp=digitalPort(s,DIN0,DOUT2)
#PWM
pwm=pwmTest(s,PWM1,15)
#ANALOG
ap=analogPort(s,AIN1,PWM2)
|
[
"sauloal@yahoo.com.br"
] |
sauloal@yahoo.com.br
|
e2d4d26c96c44db8eed437d662ba29726d3d0ad2
|
d555e3d318512964b95d4cd3d7489e380ec04e99
|
/api/quickstart/dbrouters.py
|
5376acf152c4f519a01cc78d997ac260f88592a4
|
[] |
no_license
|
andrewadcock/django-mssql-api
|
9fa1bd3ae324726a23cca3706b270abae5957cb0
|
636d4caa3d86242dca75abed45a7c789a916ca7e
|
refs/heads/develop
| 2022-11-29T08:00:45.762445
| 2020-05-07T12:23:08
| 2020-05-07T12:23:08
| 262,082,415
| 0
| 0
| null | 2020-08-08T00:37:08
| 2020-05-07T15:04:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 261
|
py
|
class DbRouter(object):
def db_for_read(self, model, **hints):
if model._meta.app_label in ['quickstart']:
return 'SCHIPAnnualReports'
# Returning None is no opinion, defer to other routers or default database
return None
|
[
"ssacher@collabralink.com"
] |
ssacher@collabralink.com
|
07a9ec4ee5447d27de723c82a64c24ab000b21eb
|
c62700cfb1e17fd314ebbbef3d99104575b8bad9
|
/tests/TestInstallation.py
|
7a5502d20cb6cb15b2f213cce53680ae3929d023
|
[
"Apache-2.0"
] |
permissive
|
ellipsys/urh
|
0f245b4c142d670f31cea5791346f26e978f2807
|
3a7735f3e09fec29d728fd0313a7ade21ec59a4c
|
refs/heads/master
| 2021-01-13T03:42:21.241267
| 2016-12-23T14:05:50
| 2016-12-23T14:05:50
| 77,269,092
| 1
| 0
| null | 2016-12-24T06:12:29
| 2016-12-24T06:12:29
| null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
import unittest
from tests.docker import docker_util
class TestInstallation(unittest.TestCase):
def test_archlinux(self):
self.assertTrue(docker_util.run_image("archlinux"))
def test_debian8(self):
self.assertTrue(docker_util.run_image("debian8"))
def test_ubuntu1404(self):
self.assertTrue(docker_util.run_image("ubuntu1404"))
def test_ubuntu1604(self):
self.assertTrue(docker_util.run_image("ubuntu1604"))
def test_kali(self):
self.assertTrue(docker_util.run_image("kali"))
def tearDown(self):
docker_util.remove_containers()
|
[
"Johannes.Pohl90@gmail.com"
] |
Johannes.Pohl90@gmail.com
|
09cf60c899cb9e513e5dbe6f4ab8529de16236a8
|
c7169415ae8abedd29ab83cddbcccb6768663062
|
/graph_processing/dataset_a/gephi_lists/flickr/create_flickr_gephi_lists.py
|
1343e8d14f66d7261853eff3037648b031c52867
|
[] |
no_license
|
chrisWWU/cross_platform_feature_analysis
|
26c33dd2adc00b7d8fbc24bfef45d6757b81ae1a
|
572732554c73bdcb22f31bce5718fdf8beb77bd8
|
refs/heads/master
| 2022-12-29T11:11:18.910805
| 2020-10-16T11:38:04
| 2020-10-16T11:38:04
| 296,666,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,073
|
py
|
import pandas as pd
import os
def clear_filename(filename):
return filename.replace('.csv', '')
def get_fl_nodelist(path_fl, path_connection, path_fl_nodelist, path_fl_core_nodelist, csv):
"""
creates complete nodelist with labels 'core' and 'follow' readable by Gephi
creates core nodelist containing flickrids usernames labels etc.
"""
nsids = pd.Series()
core_ids = []
# iterate all .csv 'following' files, each file belongs to one core user
for filename in os.listdir(path_fl):
# append id to core user list
core_ids.append(clear_filename(filename))
# also append core user to complete ids
nsids = nsids.append(pd.Series(clear_filename(filename)))
# read following info
df = pd.read_csv(path_fl + filename, index_col=0)
if not df.empty:
# append friend (following) contacts to complete id series
nsids = nsids.append(df['nsid'])
nsids = nsids.unique()
# create nodelist
nodelist = pd.DataFrame(columns=['id', 'label', 'timeset', 'relevant'])
# fill complete ids
nodelist['id'] = nsids
# read connection info
connect = pd.read_csv(path_connection, index_col=0).drop(['twitterid'], axis=1)
# rename flickrid for merge
connect.rename(columns={'flickrid': 'id'}, inplace=True)
# label complete list as core or follow node
nodelist.loc[nodelist['id'].isin(core_ids), 'relevant'] = 'core'
nodelist['relevant'].fillna('follow', inplace=True)
nodelist['label'] = nodelist['relevant']
# create core nodelist by merging complete nodelist with connection df
core_nodelist = pd.merge(nodelist, connect, on='id')
core_nodelist['label'] = core_nodelist['flickrusername']
if csv:
#nodelist.to_csv(path_fl_nodelist, index=False)
core_nodelist.to_csv(path_fl_core_nodelist, index=False)
def get_fl_edgelist(path_fl, path_connection, path_fl_edgelist, path_fl_core_edgelist, csv):
"""
creates complete edgelist
creates core edgelist
"""
# read connection info
connect = pd.read_csv(path_connection, index_col=0)
core_ids = []
edge_list = pd.DataFrame(columns=['source', 'target'])
# only keep files that are in connect
filenames = [x for x in os.listdir(path_fl) if clear_filename(x) in connect['flickrid'].values]
# iterate through all twitter follow files
for filename in filenames:
# each file name is a core node name
core_ids.append(clear_filename(filename))
df = pd.read_csv(path_fl + filename, index_col=0)
if not df.empty:
# name of file is source node
source_id = pd.Series(clear_filename(filename))
# create df containing all edges of respective file
inter_edge_list = pd.DataFrame(columns=['source', 'target'])
# repeat source node to length of df
inter_edge_list['source'] = source_id.repeat(len(df)).reset_index(drop=True)
# add content of df as target column
inter_edge_list['target'] = df['nsid']
edge_list = edge_list.append(inter_edge_list)
# create core edgelist by selecting all rows where target node is a core node
core_edgelist = edge_list[edge_list['target'].isin(core_ids)]
if csv:
#edge_list.to_csv(path_fl_edgelist, index=False)
core_edgelist.to_csv(path_fl_core_edgelist, index=False)
if __name__ == '__main__':
dataset = 'dataset_a'
path_fl = '/Users/kiki/sciebo/personality_trait_paper/flickr_and_twitter/flickr/following_flickr/'
path_fl_nodelist = 'flickr_nodelist.csv'
path_fl_core_nodelist = 'flickr_core_nodelist.csv'
path_fl_edgelist = 'flickr_edgelist.csv'
path_fl_core_edgelist = 'flickr_core_edgelist.csv'
path_connection = f'../../../../data/{dataset}/connection.csv'
csv = False
get_fl_edgelist(path_fl, path_connection, path_fl_edgelist, path_fl_core_edgelist, csv)
#get_fl_nodelist(path_fl, path_connection, path_fl_nodelist, path_fl_core_nodelist, csv)
|
[
"christian28bewerbung@gmail.com"
] |
christian28bewerbung@gmail.com
|
8f1dc0297e4d67ad83ca5b3c971095c587a3c3f3
|
78be63927dbc43c4977e690e47e38c4bdad74421
|
/ring_sim_classes.py
|
d7f6f68511f58cc411f1ff74cf49267cee0c1822
|
[] |
no_license
|
thomape/RingSim
|
012dfa00119a9de4c22cbf6f4e43d3f3c2c12a8e
|
497edc8c065235d7e288bdac9ca587169f83398d
|
refs/heads/main
| 2023-04-07T00:12:04.544407
| 2021-04-23T20:39:13
| 2021-04-23T20:39:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,831
|
py
|
"""All Classes for RingSim"""
class Address():
"""Base address class"""
def __init__(self, complete_address):
is_empty = False
if complete_address == ():
is_empty = True
if is_empty:
self.home = None
self.complete_address = ()
else:
self.home = complete_address[len(complete_address) - 1]
self.complete_address = complete_address
# Getters and Setters
def get_complete_address(self):
"""Returns complete address"""
return self.complete_address
def get_home(self):
"""The home symbol is know as the last of 7 symbols."""
return self.complete_address[6]
def get_digit(self, position):
"""Pass in integer to return specific location of symbol."""
return self.complete_address[position - 1]
def get_address_id(self):
"""Returns id of address"""
return self.complete_address[-1]
def set_complete_address(self, complete_address):
"""Pass in tuple to create new address."""
self.complete_address = complete_address
# Basic methods
def print_address(self):
"""Prints address"""
print(self.complete_address)
def print_home(self):
"""Prints home symbol"""
print(self.complete_address[6])
class AddressBook(Address):
"""Inherits from address"""
# This will only be used if you want to manipulate multiple address for an
# instance of the app. It can be used like a local db instead of querying the db frequently
# will implement last, not sure if needed.
class Ring():
"""Base class for Ring object"""
__symbol_set = {"base" : ("Es", "Cla", "Shi", "O", "UL", "Wex",
"Fin", "Pi", "Sa", "Zi", "Tar", "Desh",
"Cor", "Jyn", "Ra", "Nas", "Han", "Rey",
"Jo", "Jav", "Vel", "En", "Kech", "Bo",
"Ste", "Va", "Ta", "Bre", "Rush", "Yar",
"De", "Ka", "Pro", "The", "Gil", "Les", "Mu")}
def __init__(self, origin):
if origin is None:
self.origin = {}
else:
self.origin = origin
self.__symbol_set.update(origin)
def get_base_symbol_set(self):
"""Returns the base symbols"""
return self.__symbol_set["base"]
# Get/Set for Point of Origin
def get_origin(self):
"""Returns the origin dict"""
return self.origin
def set_origin(self, origin):
"""Pass in dictionary with "origin" key and string name"""
self.origin = origin
self.__symbol_set.update(origin)
# Get/Set for complete set
def get_complete_set(self):
"""Returns the entire set"""
return self.__symbol_set
|
[
"tom.errington58@gmail.com"
] |
tom.errington58@gmail.com
|
9da9edd9709412b6adedf84d8e54d35bedc1478e
|
ea638660eba44752ea4292c40c0903e70fc624b9
|
/0x0F-python-object_relational_mapping/101-relationship_states_cities_list.py
|
9492b1668e3fca3fbc49e6437daff08eb0ff6f31
|
[] |
no_license
|
mounirchebbi/holbertonschool-higher_level_programming
|
38df80f1ddb291badb31968ff766e1ebfe23cb57
|
5bbf6265c1c661d9f4de8cc451b67100e8b91fa3
|
refs/heads/master
| 2023-03-31T15:58:47.837219
| 2021-04-03T00:01:49
| 2021-04-03T00:01:49
| 319,455,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
#!/usr/bin/python3
"""
List States and corresponding Cities in the database hbtn_0e_101_usa
"""
import sys
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from relationship_state import State
from relationship_city import City
if __name__ == "__main__":
engine = create_engine("mysql+mysqldb://{}:{}@localhost/{}"
.format(sys.argv[1], sys.argv[2], sys.argv[3]),
pool_pre_ping=True)
Session = sessionmaker(bind=engine)
session = Session()
for state in session.query(State).order_by(State.id):
print("{}: {}".format(state.id, state.name))
for city in state.cities:
print(" {}: {}".format(city.id, city.name))
|
[
"2157@holbertonschool.com"
] |
2157@holbertonschool.com
|
f195541a1499432e5009441129ca7466983f7fda
|
fff80f77c7af53a45bc8d613c017444e09bef195
|
/Mysite/website/models.py
|
2a4738493970f5992e1de042d9ebec611de26f03
|
[] |
no_license
|
saikiran1111/Djangoprojects
|
1b34967feafc09a7a1f6343b5d373e3d7114a908
|
fc4194c4d748ee28d126cf78b055ba30eb89c85d
|
refs/heads/master
| 2020-04-11T00:05:46.986621
| 2018-12-12T14:25:02
| 2018-12-12T14:25:02
| 161,375,102
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
from django.db import models
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
class Choice(models.Model):
question = models.ForeignKey('Question', on_delete=models.PROTECT)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
|
[
"noreply@github.com"
] |
saikiran1111.noreply@github.com
|
720e11eb69115bbfd802814514dac27e48340bb1
|
be2083eb77e8626fe0ef729e3ac958d7da07ca83
|
/qa/rpc-tests/multi_rpc.py
|
233eb2990f8582d8eb6e133123ae45a7fdda8554
|
[
"MIT"
] |
permissive
|
dpayne9000/Rubixz-Coin
|
ad7516868664fbd9b42746992b617eeeaa77871f
|
13dccf942e0fa8bc6ea27b66c49120dc0e21762f
|
refs/heads/master
| 2021-01-22T01:48:36.957123
| 2017-09-23T05:38:47
| 2017-09-23T05:38:47
| 102,241,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,578
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test multiple rpc user config option rpcauth
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "rubixzcoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
[
"daniel.payne.unlimited@gmail.com"
] |
daniel.payne.unlimited@gmail.com
|
1bed8b2a8bccfba906c396a6ef545f2133988e2e
|
20fb3ca196eb3cd09515853e29d39d784a7da536
|
/Example5/first_parameter_example.py
|
d33cfcdfa8ff234ba9c54d1f3adf413d26d221dd
|
[] |
no_license
|
mdurmuss/ROS-FUNDAMENTALS
|
2645fadf8dd934fe7f831184cbff3aa88dc15c7d
|
4ebd77a137fd8c28d6a03432fa96d782f2522f57
|
refs/heads/master
| 2020-06-18T04:52:18.986655
| 2020-01-16T09:34:16
| 2020-01-16T09:34:16
| 196,169,834
| 0
| 2
| null | 2019-07-25T08:25:56
| 2019-07-10T08:55:06
|
Python
|
UTF-8
|
Python
| false
| false
| 924
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Mustafa Durmuş
import rospy
from std_msgs.msg import Int64
NODE_NAME = "parameter_publisher"
PUB_TOPIC_NAME = "/parameter_number"
PARAM_NAMES = ["/rosversion", "/rosdistro", "/another_param"]
def parameter():
"""
creates a ros node and publisher.
gets a parameter and publishes.
sets a parameter.
"""
rospy.init_node(NODE_NAME, anonymous=True)
pub = rospy.Publisher(PUB_TOPIC_NAME, Int64, queue_size=10)
publish_frequency = rospy.get_param(PARAM_NAMES[0])
# getting the frequency parameter from ros
rate = rospy.Rate(2)
number = rospy.get_param(PARAM_NAMES[1])
# create another parameter
rospy.set_param(PARAM_NAMES[2], "HelloROS")
while not rospy.is_shutdown():
msg = Int64()
msg.data = number
pub.publish(msg)
rate.sleep()
if __name__ == "__main__":
parameter()
|
[
"noreply@github.com"
] |
mdurmuss.noreply@github.com
|
faa1322d9a2ff06b525ec03bdd104773a67826d6
|
14762129284bea08150e90cdfb283fb0859d6b04
|
/dist/snippets/maps_http_geocode_zero_results/maps_http_geocode_zero_results.py
|
0ca399d82c51f1e549b046051adac9e4df40ebfc
|
[
"Apache-2.0"
] |
permissive
|
shabbir135/openapi-specification
|
7f5e83916fc6f6ca06cf65f7cda0b986faa18b93
|
94a6c4a0d886d192544d8072c0fdff6c628325b1
|
refs/heads/main
| 2023-07-19T06:40:33.016144
| 2021-08-25T15:16:05
| 2021-08-25T15:16:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
# [START maps_http_geocode_zero_results]
import requests
url = "https://maps.googleapis.com/maps/api/geocode/json?latlng=0,0&key=YOUR_API_KEY"
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
# [END maps_http_geocode_zero_results]
|
[
"noreply@github.com"
] |
shabbir135.noreply@github.com
|
ae72de755b3e6a32ab643baf8d96bb9e853ce7fa
|
3a4c5860ba3f2e88d4315bc27eb2277219235c13
|
/save_sample_image.py
|
487cc24ffcf58e0d66c01ec231c10e80ead99aa6
|
[] |
no_license
|
Mrsonwden/Artificial-Potential-Field
|
8ba617499e0ee192863cf5dbf68b6d91112d43ba
|
972256d94122ee2928eb1fe0af15a57a82a2ff2b
|
refs/heads/master
| 2020-09-24T00:41:27.227147
| 2017-03-31T13:38:01
| 2017-03-31T13:38:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
import cv2
cap = cv2.VideoCapture(1)
_, im = cap.read()
cv2.imwrite('sample1.jpg', im)
|
[
"iiita.coder@gmail.com"
] |
iiita.coder@gmail.com
|
368a8d6fb2f983a54efc388552ecd4085fcd7acd
|
f43d030b05519e74584a8d72e8f67cd5a632be84
|
/table.py
|
6c94d328a6b46b2115973603230b8694c26aa536
|
[] |
no_license
|
AndresFlorez/countries
|
a2928162c0eac9b12e591fca9ac1eca25543f448
|
98c2ec3288ae0437d36e03a9170e1c95fdc80ea9
|
refs/heads/main
| 2023-06-28T19:24:40.741091
| 2021-08-02T05:22:44
| 2021-08-02T05:22:44
| 345,413,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,124
|
py
|
import hashlib
import sqlite3 as sql
import time
from collections import defaultdict
from json import loads
import pandas as pd
from bd import Database
from get_data import get_data
class Table:
def __init__(self):
self.database = Database()
self.regions_df = None
self.countries_df = None
self.table_df = None
self.countries = []
self.times = defaultdict(lambda: 0)
self.languages = defaultdict(lambda: '')
def build_data(self):
self.get_regions()
self.get_country_by_region()
self.concat_coutries()
self.set_rows_language()
self.set_times()
self.set_languages()
self.build_table()
def get_regions(self):
""" Obtener todos los paises con su región
"""
regions_str = get_data("regions")
regions = loads(regions_str)
self.regions_df = pd.DataFrame(regions)
def get_country_by_region(self):
""" Se obtienen regiones de los paises obtenidos, por cada una se consultan
los paises y se toma el primero y se guarda un dataframe en self.countries
"""
if self.regions_df is None or self.regions_df.empty:
return None
region_list = self.regions_df["region"].value_counts().keys().to_list()
for region in region_list:
start_time = time.time()
if not region: continue
countries_str = get_data("countries", region=region)
countries = loads(countries_str)
self.countries.append(pd.DataFrame([countries[0]]))
self.times[region] += time.time() - start_time
def set_rows_language(self):
""" Por cada país se consulta en
https://restcountries.eu/rest/v2/name/{country_code}?fields=languages
sus lenguajes y se guardan en self.languages por región.
Se acumula el tiempo que toma consultar el primer lenguaje de cada
país y guardar el nombre en countries_df en self.times
"""
if self.countries_df is None or self.countries_df.empty:
return None
columns = list(self.countries_df)
for index, row in self.countries_df.iterrows():
start_time = time.time()
language_str = get_data("language", country_code=row['alpha2Code'])
language = loads(language_str)
if language:
language = language[0]["languages"][0]["name"]
language = hashlib.sha1(language.encode('UTF-8')).hexdigest()
self.languages[row['region']] = language
else:
self.languages[row['region']] = ''
self.times[row['region']] += time.time() - start_time
def concat_coutries(self):
""" Se une el dataframe de cada país en self.countries_df
(self.countries es una lista con dataframes)
Se acumula el tiempo que toma consultar cada país en self.times
"""
self.countries_df = pd.concat(self.countries)
def start_database(self, name):
self.database.open(name)
def create_db_table(self):
fields = [
'`index` INTEGER PRIMARY KEY AUTOINCREMENT',
'`region` TEXT',
'`name` TEXT',
'`language_sha1` TEXT',
'`time` REAL',
]
self.database.create_table('countries', fields)
def insert_dataframe_db(self):
self.database.datraframe_to_db(
'database.db', 'countries', self.table_df)
def generate_json_file(self):
self.table_df.to_json(r'data.json', orient='records')
def set_times(self):
""" Se agrega el tiempo a cada fila en self.countries_df
"""
self.countries_df['time'] = self.countries_df['region'].map(self.times)
def set_languages(self):
""" Se agrega el lenguaje encriptado con SHA1 a cada fila en self.countries_df
"""
self.countries_df['language_sha1'] = self.countries_df['region'].map(
self.languages)
def build_table(self):
""" Apartir de self.countries_df se arma un dataframe con los datos requeridos:
región, nombre del país, lenguaje y tiempo que toma armar cada fila en
"""
self.table_df = self.countries_df[[
'region', 'name', 'language_sha1', 'time']].copy()
self.table_df.reset_index(drop=True, inplace=True)
def show_times(self):
""" Muestra el tiempo total, promedio, minimo y maximo con funciones de pandas
"""
if self.table_df is None or self.table_df.empty:
return None
times_str = ""\
"-------------- Tiempos --------------\n"\
"Tiempo total: {total}\n"\
"Tiempo promedio: {mean}\n"\
"Tiempo minimo: {minimum}\n"\
"Tiempo maximo: {maximum}".format(
total=self.table_df['time'].sum(),
mean=self.table_df['time'].mean(),
minimum=self.table_df['time'].min(),
maximum=self.table_df['time'].max()
)
print(times_str)
|
[
"aflorez@opalo.com.co"
] |
aflorez@opalo.com.co
|
ef704d3459a64430e7458dbb263dc784250106cf
|
dbafbea874751231428ab451520f79bb4656196d
|
/hw1/models.py
|
8ff5d7e1c611e923c9e72f62701f89506e873fa2
|
[
"MIT"
] |
permissive
|
vermouth1992/CS294-158-homework
|
1953e813a392181f01e4012d7d6a01d21de625b4
|
1c35f07374f4bb5d31cff1e2b12f37ae81f838be
|
refs/heads/master
| 2020-05-16T22:48:35.705960
| 2019-08-01T16:33:23
| 2019-08-01T16:33:23
| 183,345,149
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,083
|
py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from torchlib.common import FloatTensor
from torchlib.dataset.utils import create_data_loader
from torchlib.generative_model.made import MADE
class WarmUpModel(nn.Module):
def __init__(self, n=100):
super(WarmUpModel, self).__init__()
self.n = n
self.theta = nn.Parameter(torch.randn(1, self.n))
def forward(self, x):
return self.theta.repeat((x.shape[0], 1))
@property
def pmf(self):
return F.softmax(self.theta[0].cpu().detach(), dim=-1).numpy()
def sample(self, shape):
p = self.pmf
return np.random.choice(np.arange(self.n), size=shape, p=p)
class MLP(nn.Module):
def __init__(self, n, nn_size=32, n_layers=3):
super(MLP, self).__init__()
self.n = n
self.embedding = nn.Embedding(n, nn_size)
models = []
models.append(self.embedding)
models.append(nn.Dropout(0.5))
for i in range(n_layers - 1):
models.append(nn.Linear(nn_size, nn_size))
models.append(nn.ReLU())
models.append(nn.Linear(nn_size, n))
self.model = nn.Sequential(*models)
def forward(self, x1):
"""
Args:
x1: The condition variable x1. of shape (batch_size). Encoded as one hot vector.
Returns: a logits over x2
"""
return self.model.forward(x1)
class TwoDimensionModel(nn.Module):
def __init__(self, n=200):
super(TwoDimensionModel, self).__init__()
self.x2_cond_x1 = MLP(n=n)
self.x1_model = WarmUpModel(n=n)
def forward(self, x):
x1 = x[:, 0]
return self.x1_model.forward(x1), self.x2_cond_x1.forward(x1)
def sample(self, num_samples):
self.eval()
with torch.no_grad():
x1 = self.x1_model.sample(num_samples)
x2_temp = []
data_loader = create_data_loader((x1,), batch_size=1000, drop_last=False, shuffle=False)
for data in data_loader:
data = data[0]
x2_logits = self.x2_cond_x1.forward(data)
x2_prob = F.softmax(x2_logits, dim=-1)
distribution = Categorical(probs=x2_prob)
x2 = distribution.sample().cpu().numpy()
x2_temp.append(x2)
x2 = np.concatenate(x2_temp, axis=0)
self.train()
return x1, x2
class TwoDimensionMADE(nn.Module):
def __init__(self):
super(TwoDimensionMADE, self).__init__()
self.model = MADE(nin=2, hidden_sizes=[32], nout=2 * 200, natural_ordering=True)
def forward(self, x):
x = x.type(FloatTensor)
x = (x - 99.5) / 99.5
output = self.model.forward(x)
return output[:, 0::2], output[:, 1::2]
def sample(self, num_samples):
self.eval()
batch_size = 1000
left_samples = num_samples
result = []
while left_samples > 0:
current_size = min(batch_size, left_samples)
with torch.no_grad():
input = np.random.randint(0, 200, (current_size, 2))
input = torch.from_numpy(input)
x1_logits, _ = self.forward(input)
x1_prob = F.softmax(x1_logits, dim=-1)
distribution = Categorical(probs=x1_prob)
x1_hat = distribution.sample().cpu().numpy()
x2 = np.random.randint(0, 200, current_size)
input = np.stack((x1_hat, x2), axis=-1)
input = torch.from_numpy(input)
_, x2_logits = self.forward(input)
x2_prob = F.softmax(x2_logits, dim=-1)
distribution = Categorical(probs=x2_prob)
x2_hat = distribution.sample().cpu().numpy()
result.append(np.stack((x1_hat, x2_hat), axis=-1))
left_samples -= current_size
result = np.concatenate(result, axis=0)
self.train()
return result[:, 0], result[:, 1]
|
[
"czhangseu@gmail.com"
] |
czhangseu@gmail.com
|
677aec012ef12d32a26d2ebdfe96a27b5ab5b49f
|
b9c33f67fa66839ee18930e2679ac8f3a1b450fe
|
/build/ur3_with_tool/catkin_generated/pkg.develspace.context.pc.py
|
14e370313c4c093b87ce01ecdbd297a3a1a5358f
|
[] |
no_license
|
Haoran-Zhao/Ultrasound_and_UR3
|
e397e66207789c50b8fe7ca7c7be9ac9dfa6e2da
|
bb0e4b19216a4b21a1af4b6524f4ed98fee8d83c
|
refs/heads/master
| 2023-01-07T13:46:56.723360
| 2020-11-11T01:14:18
| 2020-11-11T01:14:18
| 274,579,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur3_with_tool"
PROJECT_SPACE_DIR = "/home/haoran/UR_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"zhaohaorandl@gmail.com"
] |
zhaohaorandl@gmail.com
|
8cafe3e0b5fdd25079d26d4a50092aa8d191e8fc
|
814b28ecc858ceb08d1ccb11a6ad7449f5916b45
|
/MiniProject3.py
|
f6f1302cf0877c2e8aeeda162787c85177c85304
|
[] |
no_license
|
CBASoftwareDevolopment2020/searching-shakespeare
|
1a21532c70c99b2f0b871fe957a404860bd8dd00
|
1fd5cdd2df187ff0b9e9be471e3f0bd8c478cc7f
|
refs/heads/master
| 2022-04-18T22:04:46.883315
| 2020-04-15T09:24:28
| 2020-04-15T09:24:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,602
|
py
|
import tkinter as tk
from time import time
from suffix_tree_v1 import SuffixTree as st_v1
from suffix_tree_v2 import SuffixTree as st_v2
def read_file(path):
with open(path, encoding="utf-8-sig") as f:
return f.read()
def time_create_suffix_tree(string: str, out: str, case_insensitive=False):
start = time()
# st = st_v1(string)
st = st_v2(string, case_insensitive=case_insensitive)
end = time()
time_elapsed = end - start
print(out, time_elapsed)
return st
def search_text():
global text
global bible_st
global search
global output
substring = search.get()
index_start = bible_st.find_substring(substring)
index_end = index_start + 100
msg = text[index_start:index_end] if index_start != -1 else 'Not found in text'
output.configure(text=msg)
if __name__ == '__main__':
file = 'king-james-bible.txt'
text = read_file(file)
bible_st = time_create_suffix_tree(text, file, True)
main = tk.Tk()
in_frame = tk.Frame(main)
in_frame.pack()
out_frame = tk.Frame(main)
out_frame.pack()
close_frame = tk.Frame(main)
close_frame.pack()
tk.Label(in_frame, text=f'Search {file[:-4]}').grid(row=0)
search = tk.Entry(in_frame)
search.grid(row=0, column=2)
confirm = tk.Button(in_frame, text='Search', width=10, command=search_text)
confirm.grid(row=0, column=3)
output = tk.Message(out_frame, text=text[:1000], width=1920)
output.grid()
button = tk.Button(close_frame, text='Close', width=25, command=main.destroy)
button.grid()
main.mainloop()
|
[
"supernikolaj@hotmail.com"
] |
supernikolaj@hotmail.com
|
b53db18cdb6f5ae52e0cc31ad767bf570fd56925
|
60b37c0eee280aea6ddc5f61f704568af08c4cd9
|
/infrastructure/models/user.py
|
5c11914a3ddb6d3af050d14d24a47827e1da72bc
|
[] |
no_license
|
radostkali/arena-battle-tg-bot
|
2cc318d28617e40a2596a58bcc1b2ef288a08aaa
|
98f1d51c73a70f7ac3588a699b30ae3e0dc4884f
|
refs/heads/master
| 2023-04-11T10:14:37.156137
| 2021-04-25T14:53:28
| 2021-04-25T14:53:28
| 352,330,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
from sqlalchemy import Boolean, Column, Enum, Integer, String
from infrastructure.models.base import Base
from domain.entities import RankChoices
DB_TABLE_NAME_USER = 'user'
class User(Base):
__tablename__ = DB_TABLE_NAME_USER
id = Column(Integer, primary_key=True)
user_id = Column(Integer)
chat_id = Column(Integer)
username = Column(String)
rate = Column(Integer, default=1)
rank = Column(Enum(RankChoices), default=RankChoices.salaga)
wins = Column(Integer, default=0)
looses = Column(Integer, default=0)
admin = Column(Boolean, default=False)
def __repr__(self):
return '<User(id={}, username={})>'.format(
self.id,
self.username,
)
|
[
"t.rodionov@admitad.com"
] |
t.rodionov@admitad.com
|
68399a70f47dcd936d87e925bc60570a7ebb3791
|
81f06670e9e2e5e9e0641c3963d91bef45612d84
|
/model.py
|
012d3b71cc66170d5c12098a9a5f588c3850db3e
|
[] |
no_license
|
cindyvillanuevads/individual_project
|
44672255265b1ce8d10e9945f2975a1b24cf6f84
|
64d2f7c9b91a85915b88571046ee95e6414a3edd
|
refs/heads/main
| 2023-06-20T21:35:38.609727
| 2021-07-14T14:41:09
| 2021-07-14T14:41:09
| 384,486,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,045
|
py
|
import pandas as pd
from sklearn.tree import DecisionTreeClassifier, plot_tree, export_text
from io import StringIO
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.dummy import DummyClassifier
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
from IPython.display import display, display_html
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.feature_selection import RFE
def select_rfe (X_df, y_df, n_features, method):
'''
Takes in the predictors, the target, and the number of features to select (k) ,
and returns the names of the top k selected features based on the Recursive Feature Elimination (RFE)
X_df : the predictors
y_df : the target
n_features : the number of features to select (k)
method : LinearRegression, LassoLars, TweedieRegressor
Example
select_rfe(X_train_scaled, y_train, 2, LinearRegression())
'''
lm = method
rfe = RFE(estimator=lm, n_features_to_select= n_features)
rfe.fit(X_df, y_df)
top_rfe = list(X_df.columns[rfe.support_])
print(f'The top {n_features} selected feautures based on the the RFE class class are: {top_rfe}' )
print(pd.Series(dict(zip(X_df.columns, rfe.ranking_))).sort_values())
return top_rfe
def select_kbest (X_df, y_df, n_features):
'''
Takes in the predictors, the target, and the number of features to select (k),
and returns the names of the top k selected features based on the SelectKBest class
X_df : the predictors
y_df : the target
n_features : the number of features to select (k)
Example
select_kbest(X_train_scaled, y_train, 2)
'''
f_selector = SelectKBest(score_func=f_classif, k= n_features)
f_selector.fit(X_df, y_df)
mask = f_selector.get_support()
X_df.columns[mask]
top = list(X_df.columns[mask])
print(f'The top {n_features} selected feautures based on the SelectKBest class are: {top}' )
return top
def model_performs (X_df, y_df, model):
'''
Take in a X_df, y_df and model and fit the model , make a prediction, calculate score (accuracy),
confusion matrix, rates, clasification report.
X_df: train, validate or test. Select one
y_df: it has to be the same as X_df.
model: name of your model that you prevously created
Example:
mmodel_performs (X_train, y_train, model1)
'''
#prediction
pred = model.predict(X_df)
#score = accuracy
acc = model.score(X_df, y_df)
#conf Matrix
conf = confusion_matrix(y_df, pred)
mat = pd.DataFrame ((confusion_matrix(y_df, pred )),index = ['actual_no_approved','actual_approved'], columns =['pred_no_approved','pred_approved' ])
rubric_df = pd.DataFrame([['True Negative', 'False positive'], ['False Negative', 'True Positive']], columns=mat.columns, index=mat.index)
cf = rubric_df + ': ' + mat.values.astype(str)
#assign the values
tp = conf[1,1]
fp =conf[0,1]
fn= conf[1,0]
tn =conf[0,0]
#calculate the rate
tpr = tp/(tp+fn)
fpr = fp/(fp+tn)
tnr = tn/(tn+fp)
fnr = fn/(fn+tp)
#classification report
clas_rep =pd.DataFrame(classification_report(y_df, pred, output_dict=True)).T
clas_rep.rename(index={'0': "No Aproved", '1': "Approved"}, inplace = True)
print(f'''
The accuracy for our model is {acc:.4%}
The True Positive Rate is {tpr:.3%}, The False Positive Rate is {fpr:.3%},
The True Negative Rate is {tnr:.3%}, The False Negative Rate is {fnr:.3%}
________________________________________________________________________________
''')
print('''
The positive is 'Loan Approved '
Confusion Matrix
''')
display(cf)
print('''
________________________________________________________________________________
Classification Report:
''')
display(clas_rep)
|
[
"cindy.villanueva.ds@gmail.com"
] |
cindy.villanueva.ds@gmail.com
|
8a0e475d0df5ec6c3c698e4578b5126acfb7b6c3
|
ad7e5e17e60dbcc5ff1e79196dd3a4c2dd4c4535
|
/utils/eval_speed.py
|
f14f8c8cf66fb5b7bcb1ff845df8cdb58355542a
|
[] |
no_license
|
silkylove/Pytorch-ImageSegmentation
|
103e92630b9f0109808cc9c994e6ebaa6b68978c
|
f674a6ccfb4eb83a926f6f589045aadf166c0051
|
refs/heads/master
| 2020-04-10T11:57:52.052158
| 2018-12-23T10:43:22
| 2018-12-23T10:43:22
| 161,007,681
| 10
| 3
| null | 2019-11-07T07:34:27
| 2018-12-09T05:30:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,034
|
py
|
# -*- coding: utf-8 -*-
import torch
import time
import numpy as np
from models import DeepLabv3_plus
from models.backbone import shufflenet_v2, mobilenet_v2
def get_time(model, h, w):
run_time = list()
for i in range(0, 100):
input = torch.randn(1, 3, h, w).cuda()
torch.cuda.synchronize()
torch.cuda.synchronize()
start = time.perf_counter()
with torch.no_grad():
_ = model(input)
torch.cuda.synchronize() # wait for mm to finish
end = time.perf_counter()
run_time.append(end - start)
run_time.pop(0)
print('Mean running time is ', np.mean(run_time))
m1 = DeepLabv3_plus(3, 19, 'mobilenet_v2').cuda().eval()
m2 = DeepLabv3_plus(3, 19, 'shufflenet_v2').cuda().eval()
m3 = mobilenet_v2().cuda().eval()
m4 = shufflenet_v2().cuda().eval()
get_time(m1, 512, 512)
get_time(m2, 512, 512)
get_time(m3, 512, 512)
get_time(m4, 512, 512)
get_time(m1, 512, 1024)
get_time(m2, 512, 1024)
get_time(m3, 512, 1024)
get_time(m4, 512, 1024)
|
[
"353837214@qq.com"
] |
353837214@qq.com
|
947cfb012f911f411350834b9df978314ccbec9f
|
53ce4546455f71462ab7f190ca4242c92812bd96
|
/CCC '01/CCC '01 J2 - Mod Inverse.py
|
b495052cfcb69987f8deb6df43b2c3474ba561f9
|
[] |
no_license
|
Ri-Hong/CCC-Solutions
|
a1fb1f6eaabd72395590993672b2b8080b5d931c
|
91e700c4c3b85490c41f3f26dbe2f96165dd0f61
|
refs/heads/master
| 2023-05-04T17:59:04.599290
| 2021-05-21T18:32:26
| 2021-05-21T18:32:26
| 280,291,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
'''
Author: Ri Hong
Date AC'd: Jan. 29, 2020
'''
#Explanation
'''
*Note that I will be using variable names that are identical to those provided by the problem statment.
We can loop through all numbers from 1 to 100 to and let us call each of those numbers n. If the remainder upon dividing (x * n) by m is 1, then
n is the modulus inverse. In other words if (n * x) % m = 1, then n is the modulus inverse. If by the end of the 100 ns we have not found a modulus inverse
yet, then we can assume that no such integer exits to satisfy the equation (n * x) % m = 1.
'''
x = int(input()) #get x as an integer
m = int(input()) #get m as an integer
modulusInverseFound = False #this stores whether a modulus inverse has been found
for n in range(100): #loop through all the possible values of n
if (n * x) % m == 1: #if the remainder upon dividing (x * n) by m is 1
modulusInverseFound = True #set modulusInverseFound to true because we have found a modulus inverse
print(n) #print n
break #exit the for loop
if modulusInverseFound == False: #if a modulus inverse has not been found after all the looping
print("No such integer exists.") #print No such integer exists.
|
[
"65200215+Ri-Hong@users.noreply.github.com"
] |
65200215+Ri-Hong@users.noreply.github.com
|
e319df8e0d4da45127c328c085f2b0b42699b796
|
76c58dd979770472bea3fc6d59986d6bee4e5b66
|
/week_1/get_frames.py
|
ff1caf3847da319adf35fa35b302b7c77eaaf5cb
|
[] |
no_license
|
alexkhrystoforov/It-Jim-Internship
|
253c9a29763d52e1d1837f8591c26d8f3d10ca02
|
904c699bdf9d992568766b57d0b3690166869ff9
|
refs/heads/master
| 2023-01-30T05:50:14.391267
| 2020-12-09T08:33:21
| 2020-12-09T08:33:21
| 278,134,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
import cv2
input_video = 'input_video.avi'
cap = cv2.VideoCapture(input_video)
# number of frames in the video
print(cap.get(cv2.CAP_PROP_FRAME_COUNT))
current_frame = 0
ret, frame = cap.read()
while ret:
cv2.imwrite('frames/frame' + str(current_frame) + '.jpg', frame)
current_frame += 1
ret, frame = cap.read()
cap.release()
|
[
"noreply@github.com"
] |
alexkhrystoforov.noreply@github.com
|
fca0f8c92bc29fc97b02f6f75e34fa9a9f4344f8
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/EDataBook/BookMetaBackend.py
|
203648556f38a3d240028a1ddfee58e1ab34c6ad
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554
| 2020-06-06T04:15:00
| 2020-06-06T04:15:00
| 269,693,287
| 8
| 2
| null | 2020-06-05T15:57:54
| 2020-06-05T15:57:54
| null |
UTF-8
|
Python
| false
| false
| 46,398
|
py
|
# encoding: utf-8
# module gi.repository.EDataBook
# from /usr/lib64/girepository-1.0/EDataBook-1.2.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.EBackend as __gi_repository_EBackend
import gi.repository.EDataServer as __gi_repository_EDataServer
import gi.repository.Gio as __gi_repository_Gio
import gobject as __gobject
from .BookBackendSync import BookBackendSync
class BookMetaBackend(BookBackendSync):
"""
:Constructors:
::
BookMetaBackend(**properties)
"""
def add_view(self, view): # real signature unknown; restored from __doc__
""" add_view(self, view:EDataBook.DataBookView) """
pass
def bind_property(self, *args, **kwargs): # real signature unknown
pass
def bind_property_full(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def chain(self, *args, **kwargs): # real signature unknown
pass
def compat_control(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def configure_direct(self, config): # real signature unknown; restored from __doc__
""" configure_direct(self, config:str) """
pass
def connect(self, *args, **kwargs): # real signature unknown
pass
def connect_after(self, *args, **kwargs): # real signature unknown
pass
def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect
"""
Connect a callback to the given signal with optional user data.
:param str detailed_signal:
A detailed signal to connect to.
:param callable handler:
Callback handler to connect to the signal.
:param *data:
Variable data which is passed through to the signal handler.
:param GObject.ConnectFlags connect_flags:
Flags used for connection options.
:returns:
A signal id which can be used with disconnect.
"""
pass
def connect_object(self, *args, **kwargs): # real signature unknown
pass
def connect_object_after(self, *args, **kwargs): # real signature unknown
pass
def connect_sync(self, credentials=None, cancellable=None): # real signature unknown; restored from __doc__
""" connect_sync(self, credentials:EDataServer.NamedParameters=None, cancellable:Gio.Cancellable=None) -> bool, out_auth_result:EDataServer.SourceAuthenticationResult, out_certificate_pem:str, out_certificate_errors:Gio.TlsCertificateFlags """
return False
def create_contacts(self, vcards, opflags, cancellable=None): # real signature unknown; restored from __doc__
""" create_contacts(self, vcards:str, opflags:int, cancellable:Gio.Cancellable=None) -> bool, out_contacts:list """
return False
def create_contacts_finish(self, result, out_contacts): # real signature unknown; restored from __doc__
""" create_contacts_finish(self, result:Gio.AsyncResult, out_contacts:GLib.Queue) -> bool """
return False
def create_contacts_sync(self, vcards, opflags, out_contacts, cancellable=None): # real signature unknown; restored from __doc__
""" create_contacts_sync(self, vcards:str, opflags:int, out_contacts:GLib.Queue, cancellable:Gio.Cancellable=None) -> bool """
return False
def create_cursor(self, sort_fields, sort_types, n_fields): # real signature unknown; restored from __doc__
""" create_cursor(self, sort_fields:EBookContacts.ContactField, sort_types:EBookContacts.BookCursorSortType, n_fields:int) -> EDataBook.DataBookCursor """
pass
def credentials_required(self, reason, certificate_pem, certificate_errors, op_error=None, cancellable=None, callback=None, user_data=None): # real signature unknown; restored from __doc__
""" credentials_required(self, reason:EDataServer.SourceCredentialsReason, certificate_pem:str, certificate_errors:Gio.TlsCertificateFlags, op_error:error=None, cancellable:Gio.Cancellable=None, callback:Gio.AsyncReadyCallback=None, user_data=None) """
pass
def credentials_required_finish(self, result): # real signature unknown; restored from __doc__
""" credentials_required_finish(self, result:Gio.AsyncResult) -> bool """
return False
def credentials_required_sync(self, reason, certificate_pem, certificate_errors, op_error=None, cancellable=None): # real signature unknown; restored from __doc__
""" credentials_required_sync(self, reason:EDataServer.SourceCredentialsReason, certificate_pem:str, certificate_errors:Gio.TlsCertificateFlags, op_error:error=None, cancellable:Gio.Cancellable=None) -> bool """
return False
def delete_cursor(self, cursor): # real signature unknown; restored from __doc__
""" delete_cursor(self, cursor:EDataBook.DataBookCursor) -> bool """
return False
def disconnect(*args, **kwargs): # reliably restored by inspect
""" signal_handler_disconnect(instance:GObject.Object, handler_id:int) """
pass
def disconnect_by_func(self, *args, **kwargs): # real signature unknown
pass
def disconnect_sync(self, cancellable=None): # real signature unknown; restored from __doc__
""" disconnect_sync(self, cancellable:Gio.Cancellable=None) -> bool """
return False
def do_authenticate_sync(self, *args, **kwargs): # real signature unknown
""" authenticate_sync(self, credentials:EDataServer.NamedParameters, out_certificate_pem:str, out_certificate_errors:Gio.TlsCertificateFlags, cancellable:Gio.Cancellable=None) -> EDataServer.SourceAuthenticationResult """
pass
def do_closed(self, *args, **kwargs): # real signature unknown
""" closed(self, sender:str) """
pass
def do_connect_sync(self, *args, **kwargs): # real signature unknown
""" connect_sync(self, credentials:EDataServer.NamedParameters=None, cancellable:Gio.Cancellable=None) -> bool, out_auth_result:EDataServer.SourceAuthenticationResult, out_certificate_pem:str, out_certificate_errors:Gio.TlsCertificateFlags """
pass
def do_disconnect_sync(self, *args, **kwargs): # real signature unknown
""" disconnect_sync(self, cancellable:Gio.Cancellable=None) -> bool """
pass
def do_get_changes_sync(self, *args, **kwargs): # real signature unknown
""" get_changes_sync(self, last_sync_tag:str=None, is_repeat:bool, cancellable:Gio.Cancellable=None) -> bool, out_new_sync_tag:str, out_repeat:bool, out_created_objects:list, out_modified_objects:list, out_removed_objects:list """
pass
def do_get_destination_address(self, *args, **kwargs): # real signature unknown
""" get_destination_address(self) -> bool, host:str, port:int """
pass
def do_get_ssl_error_details(self, *args, **kwargs): # real signature unknown
""" get_ssl_error_details(self) -> bool, out_certificate_pem:str, out_certificate_errors:Gio.TlsCertificateFlags """
pass
def do_impl_configure_direct(self, *args, **kwargs): # real signature unknown
""" impl_configure_direct(self, config:str) """
pass
def do_impl_create_contacts(self, *args, **kwargs): # real signature unknown
""" impl_create_contacts(self, book:EDataBook.DataBook, opid:int, cancellable:Gio.Cancellable=None, vcards:str, opflags:int) """
pass
def do_impl_delete_cursor(self, *args, **kwargs): # real signature unknown
""" impl_delete_cursor(self, cursor:EDataBook.DataBookCursor) -> bool """
pass
def do_impl_dup_locale(self, *args, **kwargs): # real signature unknown
""" impl_dup_locale(self) -> str """
pass
def do_impl_get_backend_property(self, *args, **kwargs): # real signature unknown
""" impl_get_backend_property(self, prop_name:str) -> str """
pass
def do_impl_get_contact(self, *args, **kwargs): # real signature unknown
""" impl_get_contact(self, book:EDataBook.DataBook, opid:int, cancellable:Gio.Cancellable=None, id:str) """
pass
def do_impl_get_contact_list(self, *args, **kwargs): # real signature unknown
""" impl_get_contact_list(self, book:EDataBook.DataBook, opid:int, cancellable:Gio.Cancellable=None, query:str) """
pass
def do_impl_get_contact_list_uids(self, *args, **kwargs): # real signature unknown
""" impl_get_contact_list_uids(self, book:EDataBook.DataBook, opid:int, cancellable:Gio.Cancellable=None, query:str) """
pass
def do_impl_modify_contacts(self, *args, **kwargs): # real signature unknown
""" impl_modify_contacts(self, book:EDataBook.DataBook, opid:int, cancellable:Gio.Cancellable=None, vcards:str, opflags:int) """
pass
def do_impl_notify_update(self, *args, **kwargs): # real signature unknown
""" impl_notify_update(self, contact:EBookContacts.Contact) """
pass
def do_impl_open(self, *args, **kwargs): # real signature unknown
""" impl_open(self, book:EDataBook.DataBook, opid:int, cancellable:Gio.Cancellable=None) """
pass
def do_impl_refresh(self, *args, **kwargs): # real signature unknown
""" impl_refresh(self, book:EDataBook.DataBook, opid:int, cancellable:Gio.Cancellable=None) """
pass
def do_impl_remove_contacts(self, *args, **kwargs): # real signature unknown
""" impl_remove_contacts(self, book:EDataBook.DataBook, opid:int, cancellable:Gio.Cancellable=None, uids:str, opflags:int) """
pass
def do_impl_set_locale(self, *args, **kwargs): # real signature unknown
""" impl_set_locale(self, locale:str, cancellable:Gio.Cancellable=None) -> bool """
pass
def do_impl_start_view(self, *args, **kwargs): # real signature unknown
""" impl_start_view(self, view:EDataBook.DataBookView) """
pass
def do_impl_stop_view(self, *args, **kwargs): # real signature unknown
""" impl_stop_view(self, view:EDataBook.DataBookView) """
pass
def do_list_existing_sync(self, *args, **kwargs): # real signature unknown
""" list_existing_sync(self, cancellable:Gio.Cancellable=None) -> bool, out_new_sync_tag:str, out_existing_objects:list """
pass
def do_load_contact_sync(self, *args, **kwargs): # real signature unknown
""" load_contact_sync(self, uid:str, extra:str=None, cancellable:Gio.Cancellable=None) -> bool, out_contact:EBookContacts.Contact, out_extra:str """
pass
def do_open_sync(self, *args, **kwargs): # real signature unknown
""" open_sync(self, cancellable:Gio.Cancellable=None) -> bool """
pass
def do_prepare_shutdown(self, *args, **kwargs): # real signature unknown
""" prepare_shutdown(self) """
pass
def do_refresh_sync(self, *args, **kwargs): # real signature unknown
""" refresh_sync(self, cancellable:Gio.Cancellable=None) -> bool """
pass
def do_remove_contact_sync(self, *args, **kwargs): # real signature unknown
""" remove_contact_sync(self, conflict_resolution:EDataServer.ConflictResolution, uid:str, extra:str=None, object:str=None, opflags:int, cancellable:Gio.Cancellable=None) -> bool """
pass
def do_requires_reconnect(self, *args, **kwargs): # real signature unknown
""" requires_reconnect(self) -> bool """
pass
def do_save_contact_sync(self, *args, **kwargs): # real signature unknown
""" save_contact_sync(self, overwrite_existing:bool, conflict_resolution:EDataServer.ConflictResolution, contact:EBookContacts.Contact, extra:str=None, opflags:int, cancellable:Gio.Cancellable=None) -> bool, out_new_uid:str, out_new_extra:str """
pass
def do_search_sync(self, *args, **kwargs): # real signature unknown
""" search_sync(self, expr:str=None, meta_contact:bool, cancellable:Gio.Cancellable=None) -> bool, out_contacts:list """
pass
def do_search_uids_sync(self, *args, **kwargs): # real signature unknown
""" search_uids_sync(self, expr:str=None, cancellable:Gio.Cancellable=None) -> bool, out_uids:list """
pass
def do_shutdown(self, *args, **kwargs): # real signature unknown
""" shutdown(self) """
pass
def do_source_changed(self, *args, **kwargs): # real signature unknown
""" source_changed(self) """
pass
def dup_cache_dir(self): # real signature unknown; restored from __doc__
""" dup_cache_dir(self) -> str """
return ""
def dup_locale(self): # real signature unknown; restored from __doc__
""" dup_locale(self) -> str """
return ""
def dup_sync_tag(self): # real signature unknown; restored from __doc__
""" dup_sync_tag(self) -> str or None """
return ""
def emit(self, *args, **kwargs): # real signature unknown
pass
def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect
""" Deprecated, please use stop_emission_by_name. """
pass
def empty_cache_sync(self, cancellable=None): # real signature unknown; restored from __doc__
""" empty_cache_sync(self, cancellable:Gio.Cancellable=None) -> bool """
return False
def ensure_connected_sync(self, cancellable=None): # real signature unknown; restored from __doc__
""" ensure_connected_sync(self, cancellable:Gio.Cancellable=None) -> bool """
return False
def ensure_online_state_updated(self, cancellable=None): # real signature unknown; restored from __doc__
""" ensure_online_state_updated(self, cancellable:Gio.Cancellable=None) """
pass
def ensure_source_status_connected(self): # real signature unknown; restored from __doc__
""" ensure_source_status_connected(self) """
pass
def find_property(self, property_name): # real signature unknown; restored from __doc__
""" find_property(self, property_name:str) -> GObject.ParamSpec """
pass
def force_floating(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def foreach_view(self, func=None, user_data=None): # real signature unknown; restored from __doc__
""" foreach_view(self, func:EDataBook.BookBackendForeachViewFunc=None, user_data=None) -> bool """
return False
def foreach_view_notify_progress(self, only_completed_views, percent, message=None): # real signature unknown; restored from __doc__
""" foreach_view_notify_progress(self, only_completed_views:bool, percent:int, message:str=None) """
pass
def freeze_notify(self): # reliably restored by inspect
"""
Freezes the object's property-changed notification queue.
:returns:
A context manager which optionally can be used to
automatically thaw notifications.
This will freeze the object so that "notify" signals are blocked until
the thaw_notify() method is called.
.. code-block:: python
with obj.freeze_notify():
pass
"""
pass
def getv(self, names, values): # real signature unknown; restored from __doc__
""" getv(self, names:list, values:list) """
pass
def get_backend_property(self, prop_name): # real signature unknown; restored from __doc__
""" get_backend_property(self, prop_name:str) -> str """
return ""
def get_cache_dir(self): # real signature unknown; restored from __doc__
""" get_cache_dir(self) -> str """
return ""
def get_capabilities(self): # real signature unknown; restored from __doc__
""" get_capabilities(self) -> str """
return ""
def get_changes_sync(self, last_sync_tag=None, is_repeat, cancellable=None): # real signature unknown; restored from __doc__
""" get_changes_sync(self, last_sync_tag:str=None, is_repeat:bool, cancellable:Gio.Cancellable=None) -> bool, out_new_sync_tag:str, out_repeat:bool, out_created_objects:list, out_modified_objects:list, out_removed_objects:list """
return False
def get_connected_writable(self): # real signature unknown; restored from __doc__
""" get_connected_writable(self) -> bool """
return False
def get_contact(self, uid, cancellable=None): # real signature unknown; restored from __doc__
""" get_contact(self, uid:str, cancellable:Gio.Cancellable=None) -> EBookContacts.Contact """
pass
def get_contact_finish(self, result): # real signature unknown; restored from __doc__
""" get_contact_finish(self, result:Gio.AsyncResult) -> EBookContacts.Contact """
pass
def get_contact_list(self, query, cancellable=None): # real signature unknown; restored from __doc__
""" get_contact_list(self, query:str, cancellable:Gio.Cancellable=None) -> bool, out_contacts:list """
return False
def get_contact_list_finish(self, result, out_contacts): # real signature unknown; restored from __doc__
""" get_contact_list_finish(self, result:Gio.AsyncResult, out_contacts:GLib.Queue) -> bool """
return False
def get_contact_list_sync(self, query, out_contacts, cancellable=None): # real signature unknown; restored from __doc__
""" get_contact_list_sync(self, query:str, out_contacts:GLib.Queue, cancellable:Gio.Cancellable=None) -> bool """
return False
def get_contact_list_uids(self, query, cancellable=None): # real signature unknown; restored from __doc__
""" get_contact_list_uids(self, query:str, cancellable:Gio.Cancellable=None) -> bool, out_uids:list """
return False
def get_contact_list_uids_finish(self, result, out_uids): # real signature unknown; restored from __doc__
""" get_contact_list_uids_finish(self, result:Gio.AsyncResult, out_uids:GLib.Queue) -> bool """
return False
def get_contact_list_uids_sync(self, query, out_uids, cancellable=None): # real signature unknown; restored from __doc__
""" get_contact_list_uids_sync(self, query:str, out_uids:GLib.Queue, cancellable:Gio.Cancellable=None) -> bool """
return False
def get_contact_sync(self, uid, cancellable=None): # real signature unknown; restored from __doc__
""" get_contact_sync(self, uid:str, cancellable:Gio.Cancellable=None) -> EBookContacts.Contact """
pass
def get_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def get_destination_address(self): # real signature unknown; restored from __doc__
""" get_destination_address(self) -> bool, host:str, port:int """
return False
def get_direct_book(self): # real signature unknown; restored from __doc__
""" get_direct_book(self) -> EDataBook.DataBookDirect or None """
pass
def get_ever_connected(self): # real signature unknown; restored from __doc__
""" get_ever_connected(self) -> bool """
return False
def get_online(self): # real signature unknown; restored from __doc__
""" get_online(self) -> bool """
return False
def get_properties(self, *args, **kwargs): # real signature unknown
pass
def get_property(self, *args, **kwargs): # real signature unknown
pass
def get_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def get_registry(self): # real signature unknown; restored from __doc__
""" get_registry(self) -> EDataServer.SourceRegistry """
pass
def get_source(self): # real signature unknown; restored from __doc__
""" get_source(self) -> EDataServer.Source """
pass
def get_ssl_error_details(self): # real signature unknown; restored from __doc__
""" get_ssl_error_details(self) -> bool, out_certificate_pem:str, out_certificate_errors:Gio.TlsCertificateFlags """
return False
def get_user_prompter(self): # real signature unknown; restored from __doc__
""" get_user_prompter(self) """
pass
def get_writable(self): # real signature unknown; restored from __doc__
""" get_writable(self) -> bool """
return False
def handler_block(obj, handler_id): # reliably restored by inspect
"""
Blocks the signal handler from being invoked until
handler_unblock() is called.
:param GObject.Object obj:
Object instance to block handlers for.
:param int handler_id:
Id of signal to block.
:returns:
A context manager which optionally can be used to
automatically unblock the handler:
.. code-block:: python
with GObject.signal_handler_block(obj, id):
pass
"""
pass
def handler_block_by_func(self, *args, **kwargs): # real signature unknown
pass
def handler_disconnect(*args, **kwargs): # reliably restored by inspect
""" signal_handler_disconnect(instance:GObject.Object, handler_id:int) """
pass
def handler_is_connected(*args, **kwargs): # reliably restored by inspect
""" signal_handler_is_connected(instance:GObject.Object, handler_id:int) -> bool """
pass
def handler_unblock(*args, **kwargs): # reliably restored by inspect
""" signal_handler_unblock(instance:GObject.Object, handler_id:int) """
pass
def handler_unblock_by_func(self, *args, **kwargs): # real signature unknown
pass
def inline_local_photos_sync(self, contact, cancellable=None): # real signature unknown; restored from __doc__
""" inline_local_photos_sync(self, contact:EBookContacts.Contact, cancellable:Gio.Cancellable=None) -> bool """
return False
def install_properties(self, pspecs): # real signature unknown; restored from __doc__
""" install_properties(self, pspecs:list) """
pass
def install_property(self, property_id, pspec): # real signature unknown; restored from __doc__
""" install_property(self, property_id:int, pspec:GObject.ParamSpec) """
pass
def interface_find_property(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def interface_install_property(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def interface_list_properties(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def is_destination_reachable(self, cancellable=None): # real signature unknown; restored from __doc__
""" is_destination_reachable(self, cancellable:Gio.Cancellable=None) -> bool """
return False
def is_floating(self): # real signature unknown; restored from __doc__
""" is_floating(self) -> bool """
return False
def is_opened(self): # real signature unknown; restored from __doc__
""" is_opened(self) -> bool """
return False
def is_readonly(self): # real signature unknown; restored from __doc__
""" is_readonly(self) -> bool """
return False
def list_existing_sync(self, cancellable=None): # real signature unknown; restored from __doc__
""" list_existing_sync(self, cancellable:Gio.Cancellable=None) -> bool, out_new_sync_tag:str, out_existing_objects:list """
return False
def list_properties(self): # real signature unknown; restored from __doc__
""" list_properties(self) -> list, n_properties:int """
return []
def list_views(self): # real signature unknown; restored from __doc__
""" list_views(self) -> list """
return []
def load_contact_sync(self, uid, extra=None, cancellable=None): # real signature unknown; restored from __doc__
""" load_contact_sync(self, uid:str, extra:str=None, cancellable:Gio.Cancellable=None) -> bool, out_contact:EBookContacts.Contact, out_extra:str """
return False
def modify_contacts(self, vcards, opflags, cancellable=None): # real signature unknown; restored from __doc__
""" modify_contacts(self, vcards:str, opflags:int, cancellable:Gio.Cancellable=None) -> bool, out_contacts:list """
return False
def modify_contacts_finish(self, result): # real signature unknown; restored from __doc__
""" modify_contacts_finish(self, result:Gio.AsyncResult) -> bool """
return False
def modify_contacts_sync(self, vcards, opflags, cancellable=None): # real signature unknown; restored from __doc__
""" modify_contacts_sync(self, vcards:str, opflags:int, cancellable:Gio.Cancellable=None) -> bool """
return False
def newv(self, object_type, parameters): # real signature unknown; restored from __doc__
""" newv(object_type:GType, parameters:list) -> GObject.Object """
pass
def notify(self, property_name): # real signature unknown; restored from __doc__
""" notify(self, property_name:str) """
pass
def notify_by_pspec(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def notify_complete(self): # real signature unknown; restored from __doc__
""" notify_complete(self) """
pass
def notify_error(self, message): # real signature unknown; restored from __doc__
""" notify_error(self, message:str) """
pass
def notify_property_changed(self, prop_name, prop_value=None): # real signature unknown; restored from __doc__
""" notify_property_changed(self, prop_name:str, prop_value:str=None) """
pass
def notify_remove(self, id): # real signature unknown; restored from __doc__
""" notify_remove(self, id:str) """
pass
def notify_update(self, contact): # real signature unknown; restored from __doc__
""" notify_update(self, contact:EBookContacts.Contact) """
pass
def open(self, cancellable=None): # real signature unknown; restored from __doc__
""" open(self, cancellable:Gio.Cancellable=None) -> bool """
return False
def open_finish(self, result): # real signature unknown; restored from __doc__
""" open_finish(self, result:Gio.AsyncResult) -> bool """
return False
def open_sync(self, cancellable=None): # real signature unknown; restored from __doc__
""" open_sync(self, cancellable:Gio.Cancellable=None) -> bool """
return False
def override_property(self, property_id, name): # real signature unknown; restored from __doc__
""" override_property(self, property_id:int, name:str) """
pass
def prepare_for_completion(self, opid, result_queue): # real signature unknown; restored from __doc__
""" prepare_for_completion(self, opid:int, result_queue:GLib.Queue) -> Gio.SimpleAsyncResult """
pass
def prepare_shutdown(self): # real signature unknown; restored from __doc__
""" prepare_shutdown(self) """
pass
def process_changes_sync(self, created_objects=None, modified_objects=None, removed_objects=None, cancellable=None): # real signature unknown; restored from __doc__
""" process_changes_sync(self, created_objects:list=None, modified_objects:list=None, removed_objects:list=None, cancellable:Gio.Cancellable=None) -> bool """
return False
def ref(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def refresh(self, cancellable=None): # real signature unknown; restored from __doc__
""" refresh(self, cancellable:Gio.Cancellable=None) -> bool """
return False
def refresh_finish(self, result): # real signature unknown; restored from __doc__
""" refresh_finish(self, result:Gio.AsyncResult) -> bool """
return False
def refresh_sync(self, cancellable=None): # real signature unknown; restored from __doc__
""" refresh_sync(self, cancellable:Gio.Cancellable=None) -> bool """
return False
def ref_cache(self): # real signature unknown; restored from __doc__
""" ref_cache(self) -> EDataBook.BookCache """
pass
def ref_connectable(self): # real signature unknown; restored from __doc__
""" ref_connectable(self) -> Gio.SocketConnectable or None """
pass
def ref_data_book(self): # real signature unknown; restored from __doc__
""" ref_data_book(self) -> EDataBook.DataBook or None """
pass
def ref_main_context(self): # real signature unknown; restored from __doc__
""" ref_main_context(self) -> GLib.MainContext """
pass
def ref_proxy_resolver(self): # real signature unknown; restored from __doc__
""" ref_proxy_resolver(self) -> Gio.ProxyResolver or None """
pass
def ref_sink(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def remove_contacts(self, uids, opflags, cancellable=None): # real signature unknown; restored from __doc__
""" remove_contacts(self, uids:str, opflags:int, cancellable:Gio.Cancellable=None) -> bool, out_removed_uids:list """
return False
def remove_contacts_finish(self, result): # real signature unknown; restored from __doc__
""" remove_contacts_finish(self, result:Gio.AsyncResult) -> bool """
return False
def remove_contacts_sync(self, uids, opflags, cancellable=None): # real signature unknown; restored from __doc__
""" remove_contacts_sync(self, uids:str, opflags:int, cancellable:Gio.Cancellable=None) -> bool """
return False
def remove_contact_sync(self, conflict_resolution, uid, extra=None, p_object=None, opflags, cancellable=None): # real signature unknown; restored from __doc__
""" remove_contact_sync(self, conflict_resolution:EDataServer.ConflictResolution, uid:str, extra:str=None, object:str=None, opflags:int, cancellable:Gio.Cancellable=None) -> bool """
return False
def remove_view(self, view): # real signature unknown; restored from __doc__
""" remove_view(self, view:EDataBook.DataBookView) """
pass
def replace_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def replace_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def requires_reconnect(self): # real signature unknown; restored from __doc__
""" requires_reconnect(self) -> bool """
return False
def run_dispose(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def save_contact_sync(self, overwrite_existing, conflict_resolution, contact, extra=None, opflags, cancellable=None): # real signature unknown; restored from __doc__
""" save_contact_sync(self, overwrite_existing:bool, conflict_resolution:EDataServer.ConflictResolution, contact:EBookContacts.Contact, extra:str=None, opflags:int, cancellable:Gio.Cancellable=None) -> bool, out_new_uid:str, out_new_extra:str """
return False
def schedule_authenticate(self, credentials=None): # real signature unknown; restored from __doc__
""" schedule_authenticate(self, credentials:EDataServer.NamedParameters=None) """
pass
def schedule_credentials_required(self, reason, certificate_pem, certificate_errors, op_error=None, cancellable=None, who_calls=None): # real signature unknown; restored from __doc__
""" schedule_credentials_required(self, reason:EDataServer.SourceCredentialsReason, certificate_pem:str, certificate_errors:Gio.TlsCertificateFlags, op_error:error=None, cancellable:Gio.Cancellable=None, who_calls:str=None) """
pass
def schedule_custom_operation(self, use_cancellable=None, func, user_data=None): # real signature unknown; restored from __doc__
""" schedule_custom_operation(self, use_cancellable:Gio.Cancellable=None, func:EDataBook.BookBackendCustomOpFunc, user_data=None) """
pass
def schedule_refresh(self): # real signature unknown; restored from __doc__
""" schedule_refresh(self) """
pass
def search_sync(self, expr=None, meta_contact, cancellable=None): # real signature unknown; restored from __doc__
""" search_sync(self, expr:str=None, meta_contact:bool, cancellable:Gio.Cancellable=None) -> bool, out_contacts:list """
return False
def search_uids_sync(self, expr=None, cancellable=None): # real signature unknown; restored from __doc__
""" search_uids_sync(self, expr:str=None, cancellable:Gio.Cancellable=None) -> bool, out_uids:list """
return False
def set_cache(self, cache): # real signature unknown; restored from __doc__
""" set_cache(self, cache:EDataBook.BookCache) """
pass
def set_cache_dir(self, cache_dir): # real signature unknown; restored from __doc__
""" set_cache_dir(self, cache_dir:str) """
pass
def set_connectable(self, connectable): # real signature unknown; restored from __doc__
""" set_connectable(self, connectable:Gio.SocketConnectable) """
pass
def set_connected_writable(self, value): # real signature unknown; restored from __doc__
""" set_connected_writable(self, value:bool) """
pass
def set_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def set_data_book(self, data_book): # real signature unknown; restored from __doc__
""" set_data_book(self, data_book:EDataBook.DataBook) """
pass
def set_ever_connected(self, value): # real signature unknown; restored from __doc__
""" set_ever_connected(self, value:bool) """
pass
def set_locale(self, locale, cancellable=None): # real signature unknown; restored from __doc__
""" set_locale(self, locale:str, cancellable:Gio.Cancellable=None) -> bool """
return False
def set_online(self, online): # real signature unknown; restored from __doc__
""" set_online(self, online:bool) """
pass
def set_properties(self, *args, **kwargs): # real signature unknown
pass
def set_property(self, *args, **kwargs): # real signature unknown
pass
def set_writable(self, writable): # real signature unknown; restored from __doc__
""" set_writable(self, writable:bool) """
pass
def split_changes_sync(self, objects, cancellable=None): # real signature unknown; restored from __doc__
""" split_changes_sync(self, objects:list, cancellable:Gio.Cancellable=None) -> bool, objects:list, out_created_objects:list, out_modified_objects:list, out_removed_objects:list """
return False
def start_view(self, view): # real signature unknown; restored from __doc__
""" start_view(self, view:EDataBook.DataBookView) """
pass
def steal_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def steal_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def stop_emission(self, detailed_signal): # reliably restored by inspect
""" Deprecated, please use stop_emission_by_name. """
pass
def stop_emission_by_name(*args, **kwargs): # reliably restored by inspect
""" signal_stop_emission_by_name(instance:GObject.Object, detailed_signal:str) """
pass
def stop_view(self, view): # real signature unknown; restored from __doc__
""" stop_view(self, view:EDataBook.DataBookView) """
pass
def store_inline_photos_sync(self, contact, cancellable=None): # real signature unknown; restored from __doc__
""" store_inline_photos_sync(self, contact:EBookContacts.Contact, cancellable:Gio.Cancellable=None) -> bool """
return False
def sync(self): # real signature unknown; restored from __doc__
""" sync(self) """
pass
def thaw_notify(self): # real signature unknown; restored from __doc__
""" thaw_notify(self) """
pass
def trust_prompt(self, parameters, cancellable=None, callback=None, user_data=None): # real signature unknown; restored from __doc__
""" trust_prompt(self, parameters:EDataServer.NamedParameters, cancellable:Gio.Cancellable=None, callback:Gio.AsyncReadyCallback=None, user_data=None) """
pass
def trust_prompt_finish(self, result): # real signature unknown; restored from __doc__
""" trust_prompt_finish(self, result:Gio.AsyncResult) -> EDataServer.TrustPromptResponse """
pass
def trust_prompt_sync(self, parameters, cancellable=None): # real signature unknown; restored from __doc__
""" trust_prompt_sync(self, parameters:EDataServer.NamedParameters, cancellable:Gio.Cancellable=None) -> EDataServer.TrustPromptResponse """
pass
def unref(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def watch_closure(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def weak_ref(self, *args, **kwargs): # real signature unknown
pass
def _force_floating(self, *args, **kwargs): # real signature unknown
""" force_floating(self) """
pass
def _ref(self, *args, **kwargs): # real signature unknown
""" ref(self) -> GObject.Object """
pass
def _ref_sink(self, *args, **kwargs): # real signature unknown
""" ref_sink(self) -> GObject.Object """
pass
def _unref(self, *args, **kwargs): # real signature unknown
""" unref(self) """
pass
def _unsupported_data_method(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def _unsupported_method(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __deepcopy__(self, *args, **kwargs): # real signature unknown
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, **properties): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
g_type_instance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
priv = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
qdata = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ref_count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__gpointer__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__grefcount__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
props = None # (!) real value is '<gi._gi.GProps object at 0x7f09d4187070>'
__class__ = None # (!) real value is "<class 'gi.types.GObjectMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': ObjectInfo(BookMetaBackend), '__module__': 'gi.repository.EDataBook', '__gtype__': <GType EBookMetaBackend (94654337915056)>, '__doc__': None, '__gsignals__': {}, 'connect_sync': gi.FunctionInfo(connect_sync), 'disconnect_sync': gi.FunctionInfo(disconnect_sync), 'dup_sync_tag': gi.FunctionInfo(dup_sync_tag), 'empty_cache_sync': gi.FunctionInfo(empty_cache_sync), 'ensure_connected_sync': gi.FunctionInfo(ensure_connected_sync), 'get_capabilities': gi.FunctionInfo(get_capabilities), 'get_changes_sync': gi.FunctionInfo(get_changes_sync), 'get_connected_writable': gi.FunctionInfo(get_connected_writable), 'get_ever_connected': gi.FunctionInfo(get_ever_connected), 'get_ssl_error_details': gi.FunctionInfo(get_ssl_error_details), 'inline_local_photos_sync': gi.FunctionInfo(inline_local_photos_sync), 'list_existing_sync': gi.FunctionInfo(list_existing_sync), 'load_contact_sync': gi.FunctionInfo(load_contact_sync), 'process_changes_sync': gi.FunctionInfo(process_changes_sync), 'ref_cache': gi.FunctionInfo(ref_cache), 'refresh_sync': gi.FunctionInfo(refresh_sync), 'remove_contact_sync': gi.FunctionInfo(remove_contact_sync), 'requires_reconnect': gi.FunctionInfo(requires_reconnect), 'save_contact_sync': gi.FunctionInfo(save_contact_sync), 'schedule_refresh': gi.FunctionInfo(schedule_refresh), 'search_sync': gi.FunctionInfo(search_sync), 'search_uids_sync': gi.FunctionInfo(search_uids_sync), 'set_cache': gi.FunctionInfo(set_cache), 'set_connected_writable': gi.FunctionInfo(set_connected_writable), 'set_ever_connected': gi.FunctionInfo(set_ever_connected), 'split_changes_sync': gi.FunctionInfo(split_changes_sync), 'store_inline_photos_sync': gi.FunctionInfo(store_inline_photos_sync), 'do_connect_sync': gi.VFuncInfo(connect_sync), 'do_disconnect_sync': gi.VFuncInfo(disconnect_sync), 'do_get_changes_sync': gi.VFuncInfo(get_changes_sync), 'do_get_ssl_error_details': gi.VFuncInfo(get_ssl_error_details), 'do_list_existing_sync': gi.VFuncInfo(list_existing_sync), 'do_load_contact_sync': gi.VFuncInfo(load_contact_sync), 'do_remove_contact_sync': gi.VFuncInfo(remove_contact_sync), 'do_requires_reconnect': gi.VFuncInfo(requires_reconnect), 'do_save_contact_sync': gi.VFuncInfo(save_contact_sync), 'do_search_sync': gi.VFuncInfo(search_sync), 'do_search_uids_sync': gi.VFuncInfo(search_uids_sync), 'do_source_changed': gi.VFuncInfo(source_changed), 'parent': <property object at 0x7f09d41b5a90>, 'priv': <property object at 0x7f09d41b5b80>})"
__gdoc__ = "Object EBookMetaBackend\n\nSignals from EBookMetaBackend:\n refresh-completed ()\n source-changed ()\n\nProperties from EBookMetaBackend:\n cache -> EBookCache: Cache\n Book Cache\n\nSignals from EBookBackend:\n closed (gchararray)\n shutdown ()\n\nProperties from EBookBackend:\n cache-dir -> gchararray: Cache Dir\n The backend's cache directory\n proxy-resolver -> GProxyResolver: Proxy Resolver\n The proxy resolver for this backend\n registry -> ESourceRegistry: Registry\n Data source registry\n writable -> gboolean: Writable\n Whether the backend will accept changes\n\nProperties from EBackend:\n connectable -> GSocketConnectable: Connectable\n Socket endpoint of a network service\n main-context -> GMainContext: Main Context\n The main loop context on which to attach event sources\n online -> gboolean: Online\n Whether the backend is online\n source -> ESource: Source\n The data source being acted upon\n user-prompter -> EUserPrompter: User Prompter\n User prompter instance\n\nSignals from GObject:\n notify (GParam)\n\n"
__gsignals__ = {}
__gtype__ = None # (!) real value is '<GType EBookMetaBackend (94654337915056)>'
__info__ = ObjectInfo(BookMetaBackend)
|
[
"ttys3@outlook.com"
] |
ttys3@outlook.com
|
47577a79e715e84a832d15d60bde6c109b72c080
|
b89ec524bd793305ff1400abca95520a939274ef
|
/Graph.py
|
9687f14afcef7d21a0fb4bc6225e3c68ab165ed7
|
[] |
no_license
|
HarryBatchelor/CM1103-Problem-solving-with-Python
|
251b8e6102eca6e9dd53190cbd9fe6d72a343952
|
ba4bd5b34e12aa3f95e0c632ba481a8770f85362
|
refs/heads/master
| 2021-10-08T15:06:13.965621
| 2018-12-13T22:20:30
| 2018-12-13T22:20:30
| 160,398,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
import matplotlib.pyplot as plt
import csv
from numpy import random as r
from random import randint
from collections import Counter
from Q1_1816377 import *
def gen_counts(race_standing):
count = []
count_output = []
for s in race_standing:
count.append(int(race_standing[s]))
data = (Counter(count))
for numbers in data:
count_output.append([numbers,data[numbers]])
return count_output
def add_sailor_to_graph(loops):
with open('sailor_performances.csv', mode = 'w', newline="") as f:
writer = csv.writer(f)
for i in range(loops):
writer.writerow(['Example'+str(i), randint(0,100),20])
def plot():
race_standing = (generate_performances(read_sailor_data()))
sailors = read_sailor_data()
x = []; y = []
standing_count = sorted(gen_counts(race_standing))
print(standing_count)
for items in standing_count:
x.append(items[0])
y.append(items[1])
plt.plot(x, y, 'ro')
plt.axis([min(x),max(x)*1.1, min(y),max(y)*1.1])
plt.xlabel('Skill')
plt.ylabel('Amount of people that got the race score')
plt.title('Score')
plt.show()
add_sailor_to_graph(1000000)
plot()
|
[
"hmbatchelor8@gmail.com"
] |
hmbatchelor8@gmail.com
|
8a0f06f60dc724d159edb416c6dfeb83e5234638
|
5a5264e34854ba744905728e5473fb9e942bd12d
|
/4 karakterli sifre.py
|
908250bea095622fce1e2130cc4625ad61bbe2d6
|
[] |
no_license
|
yasmingurses/Esra_Hocanin_Odevleri
|
665b0d8b3accf1c16b852a1c46bba255cfd92b8f
|
ea75de210f5aacd7cf066abcf99eaffd51334b29
|
refs/heads/main
| 2023-02-28T23:04:04.654972
| 2021-02-02T21:18:17
| 2021-02-02T21:18:17
| 306,127,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
#Guncelleme
#Rakamlardan olusup olusmadıgını da kontrol ediyor
#4 karakterli sifre olusup olusmadıgını kontrol etme
x = str(input("Dört karakterli bir şifre giriniz: "))
while len(x) != 4 or x.isdigit() == False :
print("4 karakterli ve rakamlardan oluşan bir şifre olmalı")
x = str(input("Dört karakterli bir şifre giriniz: "))
else:
print("Girilen şifre: " , x)
|
[
"noreply@github.com"
] |
yasmingurses.noreply@github.com
|
98657318f92b25aa704e2127697efcaa68dc3808
|
81fbede20d9963915fa7ad53385c8becf0795a8c
|
/hyperparameters_GridSearch_scripts/algorithm_3/6_1_tuning_SVR_Sigmoid.py
|
c90f7cd8120c5b4b2c1e130dab6c7f1c2128f698
|
[] |
no_license
|
MarinaKrivova/DrugProfiles
|
777dd9ca81264a06a2b9da2dbca63a015d245611
|
14b503d650a7d816f3b461cbc8d08db48066123c
|
refs/heads/master
| 2023-01-11T19:23:54.556073
| 2020-11-14T23:01:21
| 2020-11-14T23:01:21
| 269,201,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,928
|
py
|
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
from sklearn.svm import SVR
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
np.random.seed(123)
_FOLDER = "/home/acq18mk/master/results/results/"
# _FOLDER = "../drug_results/"
### Coding Part
with open(_FOLDER + "drug_ids_50.txt", 'r') as f:
drug_ids_50 = [np.int32(line.rstrip('\n')) for line in f]
#columns to normalise:
with open(_FOLDER+"columns_to_normalise.txt", 'r') as f:
columns_to_normalise = [line.rstrip('\n') for line in f]
# *****************************************
with open(_FOLDER+"X_features_cancer_cell_lines.txt", 'r') as f:
X_cancer_cell_lines = [line.rstrip('\n') for line in f]
# *****************************************
with open(_FOLDER+"X_PubChem_properties.txt", 'r') as f:
X_PubChem_properties = [line.rstrip('\n') for line in f]
# *****************************************
with open(_FOLDER+"X_features_Targets.txt", 'r') as f:
X_targets = [line.rstrip('\n') for line in f]
# *****************************************
with open(_FOLDER+"X_features_Target_Pathway.txt", 'r') as f:
X_target_pathway = [line.rstrip('\n') for line in f]
# *****************************************
all_columns = X_cancer_cell_lines + X_PubChem_properties + X_targets + X_target_pathway +["MAX_CONC"]
train_df = pd.read_csv(_FOLDER+"train08_merged_fitted_sigmoid4_123_with_drugs_properties_min10.csv").drop(["Unnamed: 0","Unnamed: 0.1"], axis=1)
train_df_50 = train_df.set_index("DRUG_ID").loc[drug_ids_50, :].copy()
train_drug = pd.DataFrame()
for i in range(10):
train_drug = pd.concat([train_drug, train_df_50[["COSMIC_ID", "fd_num_"+str(i), "norm_cells_"+str(i)]+all_columns].rename(
columns={"fd_num_"+str(i): "scaled_x",
"norm_cells_"+str(i): "norm_y"})],
axis=0, ignore_index = True)
X_columns = ["scaled_x"] + ["MAX_CONC"] + X_PubChem_properties + X_targets + X_target_pathway + X_cancer_cell_lines
scaler = MinMaxScaler().fit(train_drug[X_columns])
Xtrain_drug = scaler.transform(train_drug[X_columns])
y_train_drug = train_drug["norm_y"].values
print("Sigmoid SVR")
param_tested_epsilon = [0.001, 0.01, 0.1, 1]
param_tested_C = [0.1, 1, 5, 10, 100, 500]
param_tested_coef0 = [0.01, 0.1, 1]
param_grid = dict(C = param_tested_C, epsilon = param_tested_epsilon, coef0 = param_tested_coef0)
splitter_loo = LeaveOneOut()
grid = GridSearchCV(SVR(kernel = "sigmoid"), param_grid = param_grid, cv = splitter_loo, scoring= "neg_mean_absolute_error")
grid.fit(Xtrain_drug, y_train_drug)
print("Dataset:4, best C:", grid.best_params_["C"])
print("Dataset:4, best_epsilon", grid.best_params_["epsilon"])
print("Dataset:4, best_coef0", grid.best_params_["coef0"])
|
[
"mg.krivova@gmail.com"
] |
mg.krivova@gmail.com
|
c91378501f60fc5562c8f55e39f53a99e42da299
|
419a5b8b8e64771b5e82b39c16fd861194aa9023
|
/test3.py
|
6672865fb6dae5a91406717d179e4a334c8e02cd
|
[] |
no_license
|
clauortellado/Python-Excel-OpenPyXl
|
b21642670834b9df75910018a5ce7af1fb2ce99a
|
78bfde57a5f138d70fec03d2d53aafe6f8c68078
|
refs/heads/master
| 2023-03-10T05:13:03.148027
| 2021-02-19T18:21:05
| 2021-02-19T18:21:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
# https://medium.com/aubergine-solutions/working-with-excel-sheets-in-python-using-openpyxl-4f9fd32de87f
# Working with Excel sheets in Python using openpyxl
# Appeding Group of Values at the botton of the current Sheet
from openpyxl import Workbook
wb = Workbook()
filepath = "C:/Users/Klau/Documents/Python/XLS"
file1 = "demo3.xlsx"
sheet = wb.active
data = [('Id','Name', 'Seguro'),
(5001,'Claudia','OSDE'),
(5002,'Juan','SETIA'),
(1002,'Clarita','AOT')]
for row in data:
sheet.append(row)
wb.save(filepath+'/'+file1)
|
[
"noreply@github.com"
] |
clauortellado.noreply@github.com
|
81fd386201a963b93c59c98e682a986d83412d2c
|
207bc9a3e7a9f035353963876757745ddbcfc384
|
/knet/tests/utils.py
|
8180c9f6b22b12c2a88baff8cbada1aeee2b8f36
|
[] |
no_license
|
oddbird/knet
|
e6322cbca0350dc78d2a4e824a84d81f42960878
|
30e41c37dd608cbc8f1bd794cb30c7d935cf6723
|
refs/heads/master
| 2021-01-25T07:18:53.337897
| 2013-07-27T20:19:33
| 2013-07-27T20:19:33
| 9,507,222
| 0
| 0
| null | 2013-07-27T20:43:07
| 2013-04-17T20:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 910
|
py
|
from django.template.loader import render_to_string
from bs4 import BeautifulSoup
def redirects_to(response):
"""Assert that the given response redirects to the given URL."""
return response.headers['location'].replace('http://localhost:80', '')
def render_to_soup(*args, **kwargs):
"""Render a template and return a BeautifulSoup instance."""
html = render_to_string(*args, **kwargs)
return BeautifulSoup(html)
def innerhtml(element):
"""Return whitespace-stripped inner HTML of a BeautifulSoup element."""
return element.decode_contents().strip()
def is_deleted(instance):
"""Return ``True`` if given model instance has been deleted in the db."""
return not type(instance)._base_manager.filter(pk=instance.pk).exists()
def refresh(instance):
"""Refresh given model instance from the database."""
return type(instance)._base_manager.get(pk=instance.pk)
|
[
"carl@oddbird.net"
] |
carl@oddbird.net
|
7e4361ba053743d636cdbf3e110a861ed69299a9
|
f0cf8eb77c87083ad8e02b17183dc966593e8a93
|
/Codility/FrogRiverOne.py
|
3f77c95d7155400771f532678b70441a39f13374
|
[] |
no_license
|
adityaalifn/Coding-Excercise
|
b1fbf25a486619c5b7c55a6a559f052a893a6188
|
35d250afdb3affce0e95010d1b89417929c64cdb
|
refs/heads/master
| 2021-09-08T18:00:32.654940
| 2018-03-11T16:30:20
| 2018-03-11T16:30:20
| 117,181,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
def solution(X, A):
sol = [i for i in range(1,X+1)]
now = []
for i in range(len(A)):
if A[i] not in now:
now.append(A[i])
if sorted(now) == sol:
return i
return -1
print(solution(5, [1, 3, 1, 4, 2, 3, 5, 4]))
|
[
"adityaalifnugraha@gmail.com"
] |
adityaalifnugraha@gmail.com
|
b84c5640716e3f238fc73bd5c8712f058e57eb8a
|
bfb6ebdb9c6f9e7f5dca0befc8085f6d8156e68a
|
/bims/utils/gbif.py
|
a5abcbee3dc1fbf5348e3072a8b531b659b15684
|
[
"MIT"
] |
permissive
|
ismailsunni/django-bims
|
128dbdb21cc35f7651e6ead5dc774f9fb929af86
|
b13df4ce9f632102e54b45aff89fd9c36adc6c23
|
refs/heads/develop
| 2020-03-21T02:24:58.800186
| 2018-06-20T10:19:15
| 2018-06-20T10:25:18
| 137,997,261
| 0
| 0
|
MIT
| 2018-08-14T07:19:09
| 2018-06-20T07:40:13
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,506
|
py
|
# coding: utf-8
from requests.exceptions import HTTPError
from pygbif import species
from bims.models import Taxon
def update_taxa():
"""Get all taxon, then update the data bimsd on the gbif id.
"""
taxa = Taxon.objects.all()
for taxon in taxa:
print('Update taxon for %s with gbif id %s' % (
taxon.common_name, taxon.gbif_id
))
try:
response = species.name_usage(key=taxon.gbif_id)
if response:
if 'canonicalName' in response:
taxon.common_name = response['canonicalName']
if 'scientificName' in response:
taxon.scientific_name = response['scientificName']
if 'authorship' in response:
taxon.author = response['authorship']
taxon.save()
print('Taxon updated')
except HTTPError as e:
print('Taxon not updated')
print(e)
def find_species(original_species_name):
"""
Find species from gbif with lookup query.
:param original_species_name: the name of species we want to find
:return: List of species
"""
print('Find species : %s' % original_species_name)
list_of_species = []
try:
response = species.name_lookup(
q=original_species_name,
limit=3,
offset=2
)
if 'results' in response:
results = response['results']
for result in results:
if 'nubKey' in result:
list_of_species.append(result)
except HTTPError:
print('Species not found')
return list_of_species
def update_fish_collection_record(fish_collection):
"""
Update taxon for a fish collection.
:param fish_collection: Fish collection record model
"""
results = find_species(fish_collection.original_species_name)
for result in results:
if 'nubKey' in result:
taxon, created = Taxon.objects.get_or_create(
gbif_id=result['nubKey'])
if 'canonicalName' in result:
taxon.common_name = result['canonicalName']
if 'scientificName' in result:
taxon.scientific_name = result['scientificName']
if 'authorship' in result:
taxon.author = result['authorship']
taxon.save()
fish_collection.taxon_gbif_id = taxon
fish_collection.save()
continue
|
[
"dimas.ciputra@gmail.com"
] |
dimas.ciputra@gmail.com
|
f26d9651a600872cdc23c0b36f306d9823a6910a
|
10f71154b2fb62eda33062d8e9f111d78cedabe7
|
/PrimeMinistersByPython/primeministers/io.py
|
d2f8e35f1d946c922608a612eb0383bb971f6bde
|
[] |
no_license
|
PrimeMinisters/PrimeMinisters
|
d19498faf376b51530b31dc1d3e6f2e673659ecf
|
d730f3935f90969dccbcaeea02f73b3275fb2c74
|
refs/heads/master
| 2020-06-05T13:44:26.259827
| 2014-12-25T10:27:04
| 2014-12-25T10:27:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import csv
class IO(object):
"""入出力:リーダ・ダウンローダ・ライタを抽象する。"""
def read_csv(self, filename):
"""指定されたファイルをCSVとして読み込む。"""
print "[io]readの起動を確認"
rows = []
with open(filename,'rU') as file:
reader = csv.reader(file)
#header = next(reader)
for row in reader:
rows.append(row)
#print rows
return rows
def write_csv(self, filename, rows):
"""指定されたファイルにCSVとして行たち(rows)を書き出す。"""
return
#def test(self):
# return 'test'
|
[
"g1244163@cse.kyoto-su.ac.jp"
] |
g1244163@cse.kyoto-su.ac.jp
|
a95e9da619c93fa85d008be7f17687ca23cf69b5
|
309d86a579ac76cfd4387a39268c99deed75b3bb
|
/usr/share/pygobject/2.0/codegen/code-coverage.py
|
0d0b22979bfbda0469d5c57f302d2891a0d0092d
|
[] |
no_license
|
adiabuk/arch-tf701t
|
2288a54887ab75944aba9327b05802622ed036a1
|
07d45b901fe2fa572b9aff4ccc1f46885e20216f
|
refs/heads/master
| 2022-11-06T01:00:08.564774
| 2015-09-07T14:37:03
| 2015-09-07T14:37:03
| 40,411,267
| 0
| 1
| null | 2022-10-11T13:32:09
| 2015-08-08T17:45:37
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,329
|
py
|
#! /usr/bin/env python2
from __future__ import generators
import sys, os
def read_symbols(file, type=None, dynamic=0):
if dynamic:
cmd = 'nm -D %s' % file
else:
cmd = 'nm %s' % file
for line in os.popen(cmd, 'r'):
if line[0] != ' ': # has an address as first bit of line
while line[0] != ' ':
line = line[1:]
while line[0] == ' ':
line = line[1:]
# we should be up to "type symbolname" now
sym_type = line[0]
symbol = line[1:].strip()
if not type or type == sym_type:
yield symbol
def main():
if len(sys.argv) != 3:
sys.stderr.write('usage: coverage-check library.so wrapper.so\n')
sys.exit(1)
library = sys.argv[1]
wrapper = sys.argv[2]
# first create a dict with all referenced symbols in the wrapper
# should really be a set, but a dict will do ...
wrapper_symbols = {}
for symbol in read_symbols(wrapper, type='U', dynamic=1):
wrapper_symbols[symbol] = 1
# now go through the library looking for matches on the defined symbols:
for symbol in read_symbols(library, type='T', dynamic=1):
if symbol[0] == '_': continue
if symbol not in wrapper_symbols:
print symbol
if __name__ == '__main__':
main()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
22ec0c6ae1aff85bf5b7069cd5eb229537ee79bf
|
281b8ccf5632af32bc0cfdb32a8ee7e7d570add9
|
/GPFileConversionTools.pyt
|
abfcd59fa4bef76d1a04613b89ee2393292f36da
|
[] |
no_license
|
gistom/GPFileConversionTools
|
408b6660250c62436ca4e65b7b7d7c971e0b49ee
|
7a38f037f1c66af31782253b8f53bd71691bf90d
|
refs/heads/master
| 2020-09-29T11:12:08.944524
| 2019-12-10T04:10:01
| 2019-12-10T04:10:01
| 227,026,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,564
|
pyt
|
import arcpy, os, zipfile
class Toolbox(object):
def __init__(self):
"""Define the toolbox (the name of the toolbox is the name of the
.pyt file)."""
self.label = "File Conversion Toolbox"
self.alias = ""
# List of tool classes associated with this toolbox
self.tools = [CsvToTable, ZipShapeFileToFC]
class CsvToTable(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "CSV to Table"
self.description = "Converts a CSV file to a table"
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
paramInCsvFile = arcpy.Parameter(
displayName='CSV File',
name='in_csvFile',
datatype='DEFile',
parameterType='Required',
direction='Input')
paramInCsvFile.filter.list = ['csv']
paramOutTable = arcpy.Parameter(
displayName='Output Table',
name='out_csvTable',
datatype='DETable',
parameterType='Required',
direction='Output')
params = [paramInCsvFile, paramOutTable]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
inputCSV = parameters[0].valueAsText
outputTable = parameters[1].valueAsText
outPathTuple = os.path.split(outputTable)
arcpy.TableToTable_conversion(inputCSV, outPathTuple[0], outPathTuple[1], None)
return
class ZipShapeFileToFC(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "ZIP Shapefile to Feature Class"
self.description = "Convert a ZIP file with one ShapeFile into a feature class"
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
paramInZipFile = arcpy.Parameter(
displayName='ZIP File',
name='in_zipFile',
datatype='DEFile',
parameterType='Required',
direction='Input')
paramUnzipFolder = arcpy.Parameter(
displayName='Unzip Folder',
name='out_zipfolder',
datatype='DEFolder',
parameterType='Required',
direction='Output')
paramOutFC = arcpy.Parameter(
displayName='Output Feature Class',
name='out_FeatureClass',
datatype='DEFeatureClass',
parameterType='Derived',
direction='Output')
params = [paramInZipFile, paramUnzipFolder, paramOutFC]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
inputZIP = parameters[0].valueAsText
outputFolder = parameters[1].valueAsText
shapeFileName = ''
with zipfile.ZipFile(inputZIP, 'r') as zip_ref:
listOfFileNames = zip_ref.namelist()
for fileName in listOfFileNames:
if fileName.endswith('.shp'):
shapeFileName = fileName
break
zip_ref.extractall(outputFolder)
fullShapePath = os.path.join(outputFolder, shapeFileName)
arcpy.SetParameterAsText(2, fullShapePath)
return
|
[
"noreply@github.com"
] |
gistom.noreply@github.com
|
262e061a5df73cade4ad74e389275c63001c40b4
|
475ae6c9fd4eb95c5d63f4dc4230baebbb15e2ac
|
/{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/taskapp/celery.py
|
6ba32f5660f8721dfe4c71ea784463202845f2a3
|
[
"MIT"
] |
permissive
|
yulio94/cookiecutter-django-rest
|
9151bfc5a8fa3dc66fb204420d6ce10d4df247a5
|
f8ad695f2a4526bf2247508c1d127b35313761ae
|
refs/heads/main
| 2023-02-04T14:26:04.788515
| 2020-12-23T05:59:29
| 2020-12-23T05:59:29
| 321,573,045
| 7
| 0
|
MIT
| 2020-12-23T05:59:30
| 2020-12-15T06:23:26
|
Python
|
UTF-8
|
Python
| false
| false
| 976
|
py
|
"""Celery app config"""
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# Set the default Django setttings module for 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
app = Celery('\{\{cookiecutter.project_slug\}\}')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
class CeleryAppConfig(AppConfig):
name = 'taskapp'
verbose_name = 'Celery Config'
def ready(self):
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
@app.task(bind=True)
def debug_task(self):
print(f'Request: {self.request!r}')
|
[
"j.cesarestrada@hotmail.com"
] |
j.cesarestrada@hotmail.com
|
b2c8cd4368cc92c27d271b4a5c833297ccad6217
|
2a330c4cd0933f14eaff2ffd045a6b377b7a28d3
|
/attic/python-serial-test.py
|
bdc85801daf3cccb375d11e29128fd207b737482
|
[] |
no_license
|
insc/txt-api
|
eeab2b2a8474625eff51b40b59710f767d6e800d
|
5446ec5781dac1271e7ea852e117609d8b56280a
|
refs/heads/master
| 2018-08-27T21:55:55.268719
| 2018-06-03T11:06:16
| 2018-06-03T11:06:16
| 116,029,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
from __future__ import print_function
import serial
import threading
import struct
import time
import binascii
print('Connect to /dev/ttyO2')
ser_ms = serial.Serial('/dev/ttyO2', 230000, timeout=1)
fmtstr = '<BBB BBBB H BBBBBB'
fields = [81, 1, 0, 17, 17, 17, 17, 0, 0, 0, 0, 0, 0, 0]
buflen = struct.calcsize(fmtstr)
buf = struct.pack(fmtstr, *fields)
print('fields >', fields)
print('buf len', len(buf))
print('buf w>', binascii.hexlify(buf))
while True:
print('Write to serial port')
ser_ms.write(buf)
print('Read from serial port')
data = ser_ms.read(len(buf))
print('buf r>', binascii.hexlify(data))
time.sleep(2)
|
[
"11855979+insc@users.noreply.github.com"
] |
11855979+insc@users.noreply.github.com
|
7e6d1a99bc62c0ba75bb7443ee7953270cc9acf7
|
ba02e988a9506342c7f354178261a95f89545c94
|
/version_downloader.py
|
1bc5e54fe91d5bcb7b2db0f6f8f81be22c3edcb0
|
[] |
no_license
|
Team-IF/MC_version_downloader
|
3ef37853f52482b9c13ef83a7b9b9e1ccfd327b1
|
7a7ee598e519525dc12150986272f4e333d806ab
|
refs/heads/master
| 2020-12-04T03:04:17.574635
| 2019-08-07T16:26:36
| 2019-08-07T16:26:36
| 231,583,560
| 0
| 0
| null | 2020-02-24T17:06:48
| 2020-01-03T12:26:42
| null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
import wget
import json
import os
import requests
def L_Json_download(MC_version):
url = "https://launchermeta.mojang.com/mc/game/version_manifest.json"
jsons = requests.get(url).json()['versions']
for dic in jsons:
for value in dic.values():
if MC_version == value:
url = dic['url']
L_Jar_download(MC_version,requests.get(url).json())
def L_Jar_download(MC_version,mcjsons):
url = mcjsons['downloads']['client']['url']
wget.download(url)
os.rename("client.jar", MC_version+".jar")
def L_download(MC_version):
os.mkdir(MC_version)
os.chdir(MC_version)
L_Json_download(MC_version)
if __name__=="__main__":
L_download("1.12.2")
|
[
"noreply@github.com"
] |
Team-IF.noreply@github.com
|
2332a83c5e6252a6e781bde2cc4ce08d3f9390f1
|
d9b5f8745855806605e1d87ea92e7dd5a328d6d9
|
/scrapely/extraction/similarity.py
|
9346f160c4d69f163d1338a1ce3cd5992e6ee7be
|
[] |
no_license
|
lianghongjie/scrapely_extract
|
d6a5ce1d371b4a24b42f5b3d78b4f4c3099b9c4e
|
fe169243cc72660accd709f508b96ddbfd94db35
|
refs/heads/master
| 2020-03-13T12:14:55.708150
| 2018-04-26T07:19:06
| 2018-04-26T07:19:06
| 131,115,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,803
|
py
|
# -*- coding: utf-8 -*-
"""
Similarity calculation for Instance based extraction algorithm.
"""
from itertools import count
from six.moves import zip as izip, xrange
from operator import itemgetter
from heapq import nlargest
try:
# For typical use cases (small sequences and patterns) the naive approach
# actually runs faster than KMP algorithm
from . _similarity import naive_match_length
except ImportError:
def naive_match_length(to_search, subsequence, range_start, range_end):
startval = subsequence[0]
return ((i, common_prefix_length(to_search[i:], subsequence))
for i in xrange(range_start, range_end)
if startval == to_search[i])
def common_prefix_length(a, b):
"""Calculate the length of the common prefix in both sequences passed.
For example, the common prefix in this example is [1, 3]
>>> common_prefix_length([1, 3, 4], [1, 3, 5, 1])
2
If there is no common prefix, 0 is returned
>>> common_prefix_length([1], [])
0
"""
i = -1
for i, x, y in izip(count(), a, b):
if x != y:
return i
return i + 1
def common_prefix(*sequences):
"""determine the common prefix of all sequences passed
For example:
>>> common_prefix('abcdef', 'abc', 'abac')
['a', 'b']
"""
prefix = []
for sample in izip(*sequences):
first = sample[0]
if all(x == first for x in sample[1:]):
prefix.append(first)
else:
break
return prefix
def longest_unique_subsequence(to_search, subsequence, range_start=0,
range_end=None):
"""Find the longest unique subsequence of items in an array or string. This
searches to_search looking for the longest overlapping
match with subsequence. If the largest match is unique (there is no other
match of equivalent length), the index and length of match is returned. If
there is no match, (None, None) is returned.
Please see section 3.2 of Extracting Web Data Using Instance-Based
Learning by Yanhong Zhai and Bing Liu
For example, the longest match occurs at index 2 and has length 3
>>> import numpy as np
>>> to_search = np.array([6, 3, 2, 4, 3, 2, 5])
>>> longest_unique_subsequence(to_search, np.array([2, 4, 3]))
(2, 3)
When there are two equally long subsequences, it does not generate a match
>>> longest_unique_subsequence(to_search, np.array([3, 2]))
(None, None)
range_start and range_end specify a range in which the match must begin
>>> longest_unique_subsequence(to_search, np.array([3, 2]), 3)
(4, 2)
>>> longest_unique_subsequence(to_search, np.array([3, 2]), 0, 2)
(1, 2)
"""
if range_end is None:
range_end = len(to_search)
matches = naive_match_length(to_search, subsequence, range_start, range_end)
best2 = nlargest(2, matches, key=itemgetter(1))
# if there is a single unique best match, return that
if len(best2) == 1 or len(best2) == 2 and best2[0][1] != best2[1][1]:
return best2[0][0], best2[0][1]
return None, None
def first_longest_subsequence(to_search, subsequence, range_start=0, range_end=None):
"""Find the first longest subsequence of the items in a list or array.
range_start and range_end specify a range in which the match must begin.
For example, the longest match occurs at index 2 and has length 3
>>> to_search = [6, 3, 2, 4, 3, 2, 5]
>>> first_longest_subsequence(to_search, [2, 4, 3])
(2, 3)
When there are two equally long subsequences, it return the nearest one)
>>> first_longest_subsequence(to_search, [3, 2])
(1, 2)
>>> first_longest_subsequence([], [3, 2])
(None, None)
"""
startval = subsequence[0]
if range_end is None:
range_end = len(to_search)
# the comparison to startval ensures only matches of length >= 1 and
# reduces the number of calls to the common_length function
matches = [(i, common_prefix_length(to_search[i:], subsequence))
for i in xrange(range_start, range_end) if startval == to_search[i]]
if not matches:
return None, None
# secondary sort on position and prefer the smaller one (near)
return max(matches, key=lambda x: (x[1], -x[0]))
def similar_region(extracted_tokens, template_tokens, labelled_region,
range_start=0, range_end=None, best_match=longest_unique_subsequence, **kwargs):
"""Given a labelled section in a template, identify a similar region
in the extracted tokens.
The start and end index of the similar region in the extracted tokens
is returned.
This will return a tuple containing:
(match score, start index, end index)
where match score is the sum of the length of the matching prefix and
suffix. If there is no unique match, (0, None, None) will be returned.
start_index and end_index specify a range in which the match must begin
"""
data_length = len(extracted_tokens)
if range_end is None:
range_end = data_length
# calculate the prefix score by finding a longest subsequence in
# reverse order
reverse_prefix = template_tokens[labelled_region.start_index::-1]
reverse_tokens = extracted_tokens[::-1]
(rpi, pscore) = best_match(reverse_tokens, reverse_prefix,
data_length - range_end, data_length - range_start)
# None means nothing extracted. Index 0 means there cannot be a suffix.
if not rpi:
return 0, None, None
# convert to an index from the start instead of in reverse
prefix_index = len(extracted_tokens) - rpi - 1
if labelled_region.end_index is None:
return pscore, prefix_index, None
elif kwargs.get("suffix_max_length", None) == 0:
return pscore, prefix_index, range_start + 1
suffix = template_tokens[labelled_region.end_index:]
# if it's not a paired tag, use the best match between prefix & suffix
if labelled_region.start_index == labelled_region.end_index:
(match_index, sscore) = best_match(extracted_tokens,
suffix, prefix_index, range_end)
if match_index == prefix_index:
return (pscore + sscore, prefix_index, match_index)
elif pscore > sscore:
return pscore, prefix_index, prefix_index
elif sscore > pscore:
return sscore, match_index, match_index
return 0, None, None
# calculate the suffix match on the tokens following the prefix. We could
# consider the whole page and require a good match.
(match_index, sscore) = best_match(extracted_tokens,
suffix, prefix_index + 1, range_end)
if match_index is None:
return 0, None, None
return (pscore + sscore, prefix_index, match_index)
|
[
"abc650301032@qq.com"
] |
abc650301032@qq.com
|
47ce6d1b721fe3f23abb7c8cfde7e7292e28bdb8
|
d4ff743ac22529f167e4bd59204ea41e825f5f6c
|
/MachineLearning.py
|
7cb7c4feef8e0997b563e105a66cc168286b95b8
|
[] |
no_license
|
sanjitmathew/Heart-Disease-Application
|
7b3fa5ffafbd82dcaed91e6c348ef78cf0348174
|
f7e7a677f75ff46753f1324fbec1c7079b963726
|
refs/heads/master
| 2020-05-05T13:20:45.886845
| 2019-04-30T08:41:44
| 2019-04-30T08:41:44
| 180,072,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,636
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 6 15:28:24 2019
@author: sanjith
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('heart.csv')
X = dataset.iloc[:,0:13].values
y = dataset.iloc[:, 13].values
#Splitting datasets
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=42)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Classification
from sklearn.svm import SVC
classifier = SVC(C=7,kernel= 'linear',random_state=0) #kernel='rbf' for kernelsvm
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
# Valuation
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test,y_pred)
from sklearn.model_selection import cross_val_score
accuracy=cross_val_score(estimator=classifier,X=X_train,y=y_train,cv=10)
std=accuracy.std()
accuracy.mean()
from sklearn.model_selection import GridSearchCV
parameters=[
{ 'C' : [7,8,9,10,11,12] },
]
grid_search = GridSearchCV(estimator=classifier,
scoring='accuracy',
param_grid = parameters,
cv=10,
n_jobs=-1)
grid_search = grid_search.fit(X_train,y_train)
best_accuracy = grid_search.best_score_
best_params = grid_search.best_params_
#to save an object
from joblib import dump
dump(classifier,'heart.joblib')
|
[
"noreply@github.com"
] |
sanjitmathew.noreply@github.com
|
1cf3552063d04ba50348712d499056478ca00102
|
78835cac758e1a901b1a00843e20cf6a70d1867c
|
/plotting.py
|
c930d62b09876fa25d8292406d93d77c08960a74
|
[
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
sverros/Correlation_parallel
|
d5d36b7372b2cf3b4ea094d27d2eb3cf4f2d2780
|
44dd23970e7826d8d64b1c5b1463771403458e6a
|
refs/heads/master
| 2016-09-16T12:16:03.082173
| 2015-08-10T17:58:01
| 2015-08-10T17:58:01
| 37,331,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,087
|
py
|
import cartopy
import matplotlib.pyplot as plt
import numpy as np
import scipy
from neicio.gmt import GMTGrid
from matplotlib import cm
WATER_COLOR = [.47,.60,.81]
def plot(out, variables, voi, shakemap, stationdata):
# maxdata = np.amax(out['data_new'])
attributes = shakemap.getAttributes()
intensity = stationdata['name']
SM = []
IN = []
for value in enumerate(intensity):
if ((value == 'UNCERTAINTY')or(value == 'DYFI')or(value == 'MMI')or(value == 'CIIM')):
IN.append(value[0])
else:
SM.append(value[0])
sm_station_lons = [stationdata['lon'][j] for j in SM]
sm_station_lats = [stationdata['lat'][j] for j in SM]
in_station_lats = [stationdata['lat'][j] for j in IN]
in_station_lons = [stationdata['lon'][j] for j in IN]
palette = cm.jet
fig = plt.figure(figsize=(10,10))
proj = cartopy.crs.PlateCarree()
ax = plt.axes(projection=proj)
cartopy.feature.COASTLINE.scale = '50m'
cartopy.feature.LAND.scale = '50m'
cartopy.feature.OCEAN.scale = '50m'
ax.add_feature(cartopy.feature.OCEAN,facecolor=WATER_COLOR)
ax.add_feature(cartopy.feature.COASTLINE)
ax.add_feature(cartopy.feature.BORDERS, linestyle=':',zorder=10)
ax.gridlines(crs=proj, draw_labels=True, linestyle='-')
ax.set_extent(shakemap.getRange())
map = ax.imshow(out['cor'],extent=shakemap.getRange(), origin='upper',cmap=palette)
plt.plot(sm_station_lons, sm_station_lats, 'g>', markersize = 6)
plt.plot(in_station_lons, in_station_lats, 'r^', markersize = 6)
locstr = attributes['event']['event_description']
mag = attributes['event']['magnitude']
datestr = attributes['event']['event_timestamp'].strftime('%b %d, %Y %H:%M:%S')
th = plt.title('Correlation Matrix for %s - %s M%.1f, (epsilon)' % (locstr,datestr,mag), y = 1.08)
ch=plt.colorbar(map, shrink=0.7)
plt.show(map)
fig = plt.figure(figsize = (10,10))
proj = cartopy.crs.PlateCarree()
ax = plt.axes(projection=proj)
cartopy.feature.COASTLINE.scale = '50m'
cartopy.feature.LAND.scale = '50m'
cartopy.feature.OCEAN.scale = '50m'
ax.add_feature(cartopy.feature.OCEAN,facecolor=WATER_COLOR)
ax.add_feature(cartopy.feature.COASTLINE)
ax.add_feature(cartopy.feature.BORDERS, linestyle=':',zorder=10)
ax.gridlines(crs=proj, draw_labels=True, linestyle='-')
ax.set_extent(shakemap.getRange())
map = ax.imshow(variables['data'],extent=shakemap.getRange(), origin='upper',cmap=palette)
plt.plot(sm_station_lons, sm_station_lats, 'g>', markersize = 6)
plt.plot(in_station_lons, in_station_lats, 'r^', markersize = 6)
locstr = attributes['event']['event_description']
mag = attributes['event']['magnitude']
datestr = attributes['event']['event_timestamp'].strftime('%b %d, %Y %H:%M:%S')
th = plt.title('ShakeMap for %s - %s M%.1f, (epsilon)' % (locstr,datestr,mag), y = 1.08)
ch=plt.colorbar(map, shrink=0.7)
plt.show(map)
fig = plt.figure(figsize = (10,10))
proj = cartopy.crs.PlateCarree()
ax = plt.axes(projection=proj)
cartopy.feature.COASTLINE.scale = '50m'
cartopy.feature.LAND.scale = '50m'
cartopy.feature.OCEAN.scale = '50m'
ax.add_feature(cartopy.feature.OCEAN,facecolor=WATER_COLOR)
ax.add_feature(cartopy.feature.COASTLINE)
ax.add_feature(cartopy.feature.BORDERS, linestyle=':',zorder=10)
ax.gridlines(crs=proj, draw_labels=True, linestyle='-')
ax.set_extent(shakemap.getRange())
map = ax.imshow(out['data_new'],extent=shakemap.getRange(), origin='upper',cmap=palette)
plt.plot(sm_station_lons, sm_station_lats, 'g>', markersize = 6)
plt.plot(in_station_lons, in_station_lats, 'r^', markersize = 6)
locstr = attributes['event']['event_description']
mag = attributes['event']['magnitude']
datestr = attributes['event']['event_timestamp'].strftime('%b %d, %Y %H:%M:%S')
th = plt.title('Avg Adj Matrix for %s - %s M%.1f, (epsilon)' % (locstr,datestr,mag), y = 1.08)
ch=plt.colorbar(map, shrink=0.7)
plt.show(map)
return
|
[
"sverros@igskcicgwsgm051.cr.usgs.gov"
] |
sverros@igskcicgwsgm051.cr.usgs.gov
|
c065cd304063c2dd0848ba6db4fc36e777b981b5
|
f36f438c190cdd5aa66c8f23a4365af9391af22c
|
/hwk3.py
|
2eddf6b044dff594c661edf3b7944b9044894499
|
[] |
no_license
|
KyleYoung69/githubKyleYoung
|
1f500b8449526a699afd20e4272e38cf822b282e
|
2b50425ce09332662b944c254f76a6315759fbb8
|
refs/heads/main
| 2023-05-24T09:17:29.973251
| 2021-05-25T21:52:24
| 2021-05-25T21:52:24
| 367,767,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,176
|
py
|
#Kyle Young - ID#188
def same_first_digit(digit1, digit2, digit3):
if str(digit1)[0] == str(digit2)[0] and str(digit2)[0] == str(digit3)[0] and str(digit1)[0] == str(digit3)[0]:
return True
else:
return False
def get_piece_value(piece):
chess = dict([
("pawn", 1),
("bishop", 3),
("knight", 3),
("rook", 5),
("queen", 9),
])
if piece == "pawn" or piece == "bishop" or piece == "knight" or piece == "rook" or piece == "queen":
return chess[piece]
else:
return None
def which_season(month, day):
if month == 1 :
return "winter"
elif month == 2 :
return "winter"
elif month == 3 and day < 20 :
return "winter"
elif month == 3 and day >= 20 :
return "spring"
elif month == 4 :
return "spring"
elif month == 5 :
return "spring"
elif month == 6 and day < 21 :
return "spring"
elif month == 6 and day >= 21 :
return "summer"
elif month == 7 :
return "summer"
elif month == 8 :
return "summer"
elif month == 9 and day < 22 :
return "summer"
elif month == 9 and day >= 22 :
return "fall"
elif month == 10 :
return "fall"
elif month == 11 :
return "fall"
elif month == 12 and day < 21 :
return "fall"
elif month == 12 and day >= 21 :
return "winter"
def number_to_word(num):
zero_to_nine = dict([
(0, "zero"),
(1, "one"),
(2, "two"),
(3, "three"),
(4, "four"),
(5, "five"),
(6, "six"),
(7, "seven"),
(8, "eight"),
(9, "nine")
])
ten_to_nineteen = dict([
(10, "ten"),
(11, "eleven"),
(12, "twelve"),
(13, "thirteen"),
(14, "fourteen"),
(15, "fifteen"),
(16, "sixteen"),
(17, "seventeen"),
(18, "eighteen"),
(19, "nineteen")
])
twenty_to_ninety = dict([
(20, "twenty"),
(30, "thirty"),
(40, "forty"),
(50, "fifty"),
(60, "sixty"),
(70, "seventy"),
(80, "eighty"),
(90, "ninety"),
])
if (num == 0):
return "zero"
elif (len(str(num)) == 1):
return zero_to_nine[num]
elif (len(str(num)) == 2):
not_twenty = (num - 20)
if(not_twenty < 0):
return ten_to_nineteen[num]
elif(not_twenty >= 0 and (not_twenty % 10 == 0)):
return twenty_to_ninety[num]
else:
first_number = num % 10
second_number = num - first_number
return (twenty_to_ninety[second_number]) + " " + (zero_to_nine[first_number])
|
[
"noreply@github.com"
] |
KyleYoung69.noreply@github.com
|
749a2a09910d28d903ab094e86fb56ee068b075f
|
44fb2fe531a0ff2144e4f0c2d4dd611f86f93cfb
|
/FuckAlgorithmAnalysis/luogu/P1554_梦中的统计.py
|
be3c1a4a5763767c50489fcd888cb3858c864a06
|
[] |
no_license
|
Alex-Beng/ojs
|
d9f1a49f76dc6b6429951330af526e0df2152be8
|
26a467dfe8acd8ae4be0cd2784d79eebf09c06ce
|
refs/heads/master
| 2023-07-12T05:36:36.176271
| 2023-07-07T15:09:39
| 2023-07-07T15:09:39
| 152,544,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
min, max = map(int, input().split())
nums = [str(i) for i in range(min, max+1)]
nums_count = [0]*10
for i in nums:
for j in i:
nums_count[int(j)] += 1
nums_count = [str(i) for i in nums_count]
print(' '.join(nums_count))
|
[
"599267065@qq.com"
] |
599267065@qq.com
|
2d1f2608be74a36c2c5177a29fbfa746c861255c
|
c62a1badf38dfb381b7c1131087f72d8ec28729b
|
/main/utils.py
|
6b2496dea8f1ba7ef2080c651b9d71d9fa4334de
|
[] |
no_license
|
KenPet11/UnitedWay
|
7430330be52178f4d135d6b426187e5bc877ee4c
|
3edbd8f68edbf54c12e97cc135f37f9e93d8284d
|
refs/heads/master
| 2020-04-25T14:12:26.194428
| 2019-05-05T21:47:11
| 2019-05-05T21:47:11
| 172,833,494
| 0
| 0
| null | 2019-05-05T21:47:13
| 2019-02-27T03:09:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
from datetime import datetime, timedelta
from calendar import HTMLCalendar
from .models import Event
class Calendar(HTMLCalendar):
def __init__(self, year=None, month=None):
self.year = year
self.month = month
super(Calendar, self).__init__()
# formats a day as a td
# filter events by day
def formatday(self, day, events):
events_per_day = events.filter(event_start_time__day=day)
d = ''
for event in events_per_day:
d += f'<li> {event.get_html_url} </li>'
if day != 0:
return f"<td><span class='date'>{day}</span><ul> {d} </ul></td>"
return '<td></td>'
# formats a week as a tr
def formatweek(self, theweek, events):
week = ''
for d, weekday in theweek:
week += self.formatday(d, events)
return f'<tr> {week} </tr>'
# formats a month as a table
# filter events by year and month
def formatmonth(self, withyear=True):
events = Event.objects.filter(event_start_time__year=self.year, event_start_time__month=self.month)
cal = f'<table border="0" cellpadding="0" cellspacing="0" class="calendar">\n'
cal += f'{self.formatmonthname(self.year, self.month, withyear=withyear)}\n'
cal += f'{self.formatweekheader()}\n'
for week in self.monthdays2calendar(self.year, self.month):
cal += f'{self.formatweek(week, events)}\n'
return cal
|
[
"kpettit@nd.edu"
] |
kpettit@nd.edu
|
6a425bc2043643c6f72bd9ec9d45dd83f78bb750
|
cf14275eb2ad7a50da0f482ead52e12168e7de6f
|
/CRUD_FunctionBased_1/CRUD_FunctionBased_1/settings.py
|
433157cbbc4c730f1cef6124f9af198bbc464fbb
|
[] |
no_license
|
balamurali1/Environment
|
319c4087de011949f405d78a43a15b45b04efb05
|
f5312d56f102423cfb11900cfa99775ffa4f67c5
|
refs/heads/master
| 2023-09-04T06:56:20.449830
| 2021-10-30T09:13:00
| 2021-10-30T09:13:00
| 420,183,269
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,315
|
py
|
"""
Django settings for CRUD_FunctionBased_1 project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-v)@v)pl**7=gkd7%mg&a$_sbyr=reov#epp#sr=m*12wr-f$6)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'Book',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CRUD_FunctionBased_1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CRUD_FunctionBased_1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"balamurali1@gmail.com"
] |
balamurali1@gmail.com
|
8c496a1bd3052881eb01521de7480ac48911b47f
|
db0d5970cf10febfb1028393567143aedabdceb8
|
/capstone/capstoneapi/models/Workflows.py
|
d1360cae7cb121f4b3848290cfcb2fb73cb95e0e
|
[] |
no_license
|
FORDBA/Final-Capstone-Server
|
b192dc63577128d1e4ad04f9e452b86833432164
|
9ac3745986dc3498aaab344d68cd647fa8205421
|
refs/heads/main
| 2023-02-15T21:43:32.037440
| 2021-01-08T19:24:09
| 2021-01-08T19:24:09
| 321,428,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
"""Database Comments module"""
from django.db import models
from django.contrib.auth.models import User
class Workflows(models.Model):
"""Database Comments model"""
due_date = models.DateField()
completion_date = models.DateField(null=True, blank=True)
preparer = models.ForeignKey(User, related_name='user_preparer', on_delete=models.CASCADE)
reviewer = models.ForeignKey(User, related_name='user_reviewer', on_delete=models.CASCADE)
processor = models.ForeignKey(User, related_name='user_processor', on_delete=models.CASCADE)
status = models.ForeignKey("Statuses", on_delete=models.DO_NOTHING)
state = models.ForeignKey("States", on_delete=models.DO_NOTHING)
company = models.ForeignKey("Companies", on_delete=models.DO_NOTHING)
|
[
"bfordcpa@gmail.com"
] |
bfordcpa@gmail.com
|
07f7f8508fd7439a74cbe94e74b385d752990391
|
ba38cfd3ffdcf59b81ce00ef3af10be579bb7da1
|
/Chapter 2 - Linked Lists/2.2.py
|
0fa206065b1c29ac7aed789f8d3dce8b88fc3e35
|
[] |
no_license
|
StephanieGreenberg/ctci6-solutions
|
67cf9a5fc856f8c85495799fb5981d905e039bbe
|
3f7366a131136895b0165c0cbdcf9c6d6ebe965e
|
refs/heads/master
| 2022-11-25T08:59:59.886119
| 2020-07-25T21:49:06
| 2020-07-25T21:49:06
| 281,834,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
def kth_to_last(head, k):
ctr = findLength(head) - 1
while head:
if k == ctr:
return head
ctr -= 1
head = head.next
return None
def findLength(head):
ctr = 0
while head:
ctr += 1
head = head.next
return ctr
|
[
"sgreenberg53@gmail.com"
] |
sgreenberg53@gmail.com
|
2730756ed1bf0108a8937125d88e379ace3b62b6
|
06b882948132d5d501a3474a99e566f3875eef77
|
/project5/project5/kaizen/admin.py
|
20f390a6b7098d23e934569b6810881e1404280c
|
[] |
no_license
|
RidleyLarsen/cs4990
|
a71b43e720facc63952e1e214f8ff91270fa5f83
|
268ec2762683653be75345a5719f44d795abcb38
|
refs/heads/master
| 2016-09-06T16:36:43.342909
| 2015-12-16T22:03:04
| 2015-12-16T22:03:04
| 41,456,998
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
from django.contrib import admin
from .models import Suggestion, Category
# Register your models here.
admin.site.register(Category)
admin.site.register(Suggestion)
|
[
"ridley@velocitywebworks.com"
] |
ridley@velocitywebworks.com
|
ce2c19a1b1978beb9920466d1f9bce3e7326a1e6
|
dfb6a80dda5882a1c2be87b0b6e1e7a87a7b4c20
|
/test/test_acknowledgement.py
|
18d884199b5f0f2f999ca2e5924cdad313218b17
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
artikcloud/artikcloud-python
|
a090201bea9fadbdf5dd57d94d9085b03b34f927
|
c5489b2fca27fd9a8bcea99f309e02cb690dd349
|
refs/heads/master
| 2020-12-26T03:33:00.657575
| 2017-12-28T20:40:05
| 2017-12-28T20:40:05
| 55,102,598
| 13
| 11
| null | 2017-03-18T03:22:58
| 2016-03-30T22:38:07
|
Python
|
UTF-8
|
Python
| false
| false
| 831
|
py
|
# coding: utf-8
"""
ARTIK Cloud API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import artikcloud
from artikcloud.rest import ApiException
from artikcloud.models.acknowledgement import Acknowledgement
class TestAcknowledgement(unittest.TestCase):
""" Acknowledgement unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testAcknowledgement(self):
"""
Test Acknowledgement
"""
model = artikcloud.models.acknowledgement.Acknowledgement()
if __name__ == '__main__':
unittest.main()
|
[
"jswattonjue@gmail.com"
] |
jswattonjue@gmail.com
|
be0abde7011e4e965cd2998da82ee23ff5874981
|
4e74c4230322c013e036c9ab8562bd17dcb27bf4
|
/env/bin/pip2
|
71831be5277cc76f49e72ef0c435f698174f4d51
|
[] |
no_license
|
1871vinayak/django-rest-framework-tutorial
|
2dc2c3a4b5bb288dfd8e4f5e65704543d039c596
|
e4c2bdeef177f1c1ed24c628ab5e1790a7c1ca37
|
refs/heads/master
| 2020-04-05T09:44:14.421127
| 2018-11-08T21:32:44
| 2018-11-08T21:32:44
| 156,771,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
#!/home/vinayak-1871/tutorial/env/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"1871vinayak@gmail.com"
] |
1871vinayak@gmail.com
|
|
571c3c7cc675dc307e237ac80f5f2f03d5a55195
|
62e6e5ac5fd2f955c79a456e6cbbcf7f5d083b29
|
/label_studio/tests/data_manager/test_api_tasks.py
|
520667f75005f77bdff1672f6fb0d3144b522da1
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
naveengct/label-studio
|
9a8f75e55055ce3f0eed1e1e0cbbaa685b94154e
|
11aae8352d1acdf4f3d978a32934daad27779ed7
|
refs/heads/master
| 2023-08-21T09:38:52.514177
| 2021-10-23T20:04:09
| 2021-10-23T20:04:09
| 420,070,239
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,687
|
py
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import pytest
import json
from ..utils import make_task, make_annotation, make_prediction, project_id
from projects.models import Project
@pytest.mark.django_db
def test_views_tasks_api(business_client, project_id):
# create
payload = dict(project=project_id, data={"test": 1})
response = business_client.post(
"/api/dm/views/",
data=json.dumps(payload),
content_type="application/json",
)
assert response.status_code == 201, response.content
view_id = response.json()["id"]
# no tasks
response = business_client.get(f"/api/dm/tasks?fields=all&view={view_id}")
assert response.status_code == 200, response.content
assert response.json()["total"] == 0
assert len(response.json()["tasks"]) == 0
project = Project.objects.get(pk=project_id)
task_data = {"text": "bbb"}
task_id = make_task({"data": task_data}, project).id
annotation_result = {"from_name": "my_class", "to_name": "text", "type": "choices", "value": {"choices": ["pos"]}}
make_annotation({"result": [annotation_result]}, task_id)
make_annotation(
{
"result": [annotation_result],
"was_cancelled": True,
},
task_id,
)
prediction_result = {"from_name": "my_class", "to_name": "text", "type": "choices", "value": {"choices": ["pos"]}}
make_prediction(
{
"result": [prediction_result],
},
task_id,
)
response = business_client.get(f"/api/dm/tasks?fields=all&view={view_id}")
assert response.status_code == 200, response.content
response_data = response.json()
assert response_data["total"] == 1
assert len(response_data["tasks"]) == 1
assert response_data["tasks"][0]["id"] == task_id
assert response_data["tasks"][0]["data"] == task_data
assert response_data["tasks"][0]["total_annotations"] == 1
assert "annotations_results" in response_data["tasks"][0]
assert response_data["tasks"][0]["cancelled_annotations"] == 1
assert response_data["tasks"][0]["total_predictions"] == 1
assert "predictions_results" in response_data["tasks"][0]
@pytest.mark.parametrize(
"tasks_count, annotations_count, predictions_count",
[
[0, 0, 0],
[1, 0, 0],
[1, 1, 1],
[2, 2, 2],
],
)
@pytest.mark.django_db
def test_views_total_counters(tasks_count, annotations_count, predictions_count, business_client, project_id):
# create
payload = dict(project=project_id, data={"test": 1})
response = business_client.post(
"/api/dm/views/",
data=json.dumps(payload),
content_type="application/json",
)
assert response.status_code == 201, response.content
view_id = response.json()["id"]
project = Project.objects.get(pk=project_id)
for _ in range(0, tasks_count):
task_id = make_task({"data": {}}, project).id
print('TASK_ID: %s' % task_id)
for _ in range(0, annotations_count):
make_annotation({"result": []}, task_id)
for _ in range(0, predictions_count):
make_prediction({"result": []}, task_id)
response = business_client.get(f"/api/dm/tasks?fields=all&view={view_id}")
response_data = response.json()
assert response_data["total"] == tasks_count, response_data
assert response_data["total_annotations"] == tasks_count * annotations_count, response_data
assert response_data["total_predictions"] == tasks_count * predictions_count, response_data
|
[
"noreply@github.com"
] |
naveengct.noreply@github.com
|
276ac678559bd74f99bf26ae835e5cfccf858be6
|
8669f31f708e81c40ede6557e7de606b6660cca4
|
/pyomt/decorators.py
|
cd606037953bd2a410110656b656e8f1c1bd8e25
|
[
"Apache-2.0"
] |
permissive
|
cespio/omt2mzn
|
8488496abe4c27e977e0b64d3b2cac7fc9cb7b9c
|
0088ed81272b63f86903a1bf5c5bb290cb8c4a85
|
refs/heads/master
| 2021-06-11T03:35:49.805111
| 2019-05-05T17:23:08
| 2019-05-05T17:23:08
| 128,271,872
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,965
|
py
|
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from functools import wraps
import warnings
import pyomt.exceptions
class deprecated(object):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def __init__(self, alternative=None):
self.alternative = alternative
def __call__(self, func):
def newFunc(*args, **kwargs):
alt = ""
if self.alternative is not None:
alt = " You should call %s() instead!" % self.alternative
warnings.warn("Call to deprecated function %s().%s" % \
(func.__name__, alt),
category=DeprecationWarning,
stacklevel=2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
def clear_pending_pop(f):
"""Pop the solver stack (if necessary) before calling the function.
Some functions (e.g., get_value) required the state of the solver
to stay unchanged after a call to solve. Therefore, we can leave
th solver in an intermediate state in which there is a formula
asserted in the stack that is not needed (e.g., when solving under
assumptions). In order to guarantee that methods operate on the
correct set of formulae, all methods of the solver that rely on
the assertion stack, need to be marked with this decorator.
"""
@wraps(f)
def clear_pending_pop_wrap(self, *args, **kwargs):
if self.pending_pop:
self.pending_pop = False
self.pop()
return f(self, *args, **kwargs)
return clear_pending_pop_wrap
def typecheck_result(f):
"""Performs type checking on the return value using the global environment"""
@wraps(f)
def typecheck_result_wrap(*args, **kwargs):
res = f(*args, **kwargs)
res.get_type() # This raises an exception if an invalid type is found
return typecheck_result_wrap
def catch_conversion_error(f):
"""Catch unknown operators errors and converts them into conversion error."""
@wraps(f)
def catch_conversion_error_wrap(*args, **kwargs):
try:
res = f(*args, **kwargs)
except pyomt.exceptions.UnsupportedOperatorError as ex:
raise pyomt.exceptions.ConvertExpressionError(message=
"Could not convert the input expression. " +
"The formula contains unsupported operators. " +
"The error was: %s" % ex.message,
expression=ex.expression)
return res
return catch_conversion_error_wrap
def assert_infix_enabled(f):
"""Raise an exception if infix notation is not enabled."""
from functools import wraps
from pyomt.exceptions import PyomtModeError
INFIX_ERROR_MSG = """Infix notation is not enabled for the current environment.
Enable it by setting enable_infix_notation to True."""
@wraps(f)
def assert_infix_enabled_wrap(*args, **kwargs):
from pyomt.environment import get_env
if not get_env().enable_infix_notation:
raise PyomtModeError(INFIX_ERROR_MSG)
return f(*args, **kwargs)
return assert_infix_enabled_wrap
|
[
"francio.194@gmail.com"
] |
francio.194@gmail.com
|
8e6dc82201b220746b19a2dc29e0dd3b44ca5738
|
a81c1492783e7cafcaf7da5f0402d2d283b7ce37
|
/google/ads/google_ads/v6/proto/enums/billing_setup_status_pb2.py
|
70c162bdab0373dbfcdf6c8f4b3f52cef85965b9
|
[
"Apache-2.0"
] |
permissive
|
VincentFritzsche/google-ads-python
|
6650cf426b34392d1f58fb912cb3fc25b848e766
|
969eff5b6c3cec59d21191fa178cffb6270074c3
|
refs/heads/master
| 2023-03-19T17:23:26.959021
| 2021-03-18T18:18:38
| 2021-03-18T18:18:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 4,619
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/enums/billing_setup_status.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/enums/billing_setup_status.proto',
package='google.ads.googleads.v6.enums',
syntax='proto3',
serialized_options=b'\n!com.google.ads.googleads.v6.enumsB\027BillingSetupStatusProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v6/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V6.Enums\312\002\035Google\\Ads\\GoogleAds\\V6\\Enums\352\002!Google::Ads::GoogleAds::V6::Enums',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n8google/ads/googleads/v6/enums/billing_setup_status.proto\x12\x1dgoogle.ads.googleads.v6.enums\x1a\x1cgoogle/api/annotations.proto\"\x89\x01\n\x16\x42illingSetupStatusEnum\"o\n\x12\x42illingSetupStatus\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0b\n\x07PENDING\x10\x02\x12\x11\n\rAPPROVED_HELD\x10\x03\x12\x0c\n\x08\x41PPROVED\x10\x04\x12\r\n\tCANCELLED\x10\x05\x42\xec\x01\n!com.google.ads.googleads.v6.enumsB\x17\x42illingSetupStatusProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v6/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V6.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V6\\Enums\xea\x02!Google::Ads::GoogleAds::V6::Enumsb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_BILLINGSETUPSTATUSENUM_BILLINGSETUPSTATUS = _descriptor.EnumDescriptor(
name='BillingSetupStatus',
full_name='google.ads.googleads.v6.enums.BillingSetupStatusEnum.BillingSetupStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PENDING', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='APPROVED_HELD', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='APPROVED', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CANCELLED', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=148,
serialized_end=259,
)
_sym_db.RegisterEnumDescriptor(_BILLINGSETUPSTATUSENUM_BILLINGSETUPSTATUS)
_BILLINGSETUPSTATUSENUM = _descriptor.Descriptor(
name='BillingSetupStatusEnum',
full_name='google.ads.googleads.v6.enums.BillingSetupStatusEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_BILLINGSETUPSTATUSENUM_BILLINGSETUPSTATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=122,
serialized_end=259,
)
_BILLINGSETUPSTATUSENUM_BILLINGSETUPSTATUS.containing_type = _BILLINGSETUPSTATUSENUM
DESCRIPTOR.message_types_by_name['BillingSetupStatusEnum'] = _BILLINGSETUPSTATUSENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BillingSetupStatusEnum = _reflection.GeneratedProtocolMessageType('BillingSetupStatusEnum', (_message.Message,), {
'DESCRIPTOR' : _BILLINGSETUPSTATUSENUM,
'__module__' : 'google.ads.googleads.v6.enums.billing_setup_status_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.enums.BillingSetupStatusEnum)
})
_sym_db.RegisterMessage(BillingSetupStatusEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"noreply@github.com"
] |
VincentFritzsche.noreply@github.com
|
c390f036a72316003dc86e7a10c585c72923d307
|
fa9e6008ef95d4868c998f153f395f2e8cd74bc1
|
/classes/school.py
|
78971a49d684c9009dd101479778b99feba7559b
|
[] |
no_license
|
JeremiahMauga/school-interface-one
|
25b6845c91bd52307248f1ffc5cee00a9578cdae
|
5a5b537656c15cc5a5593425bf41cbfdb78029f3
|
refs/heads/master
| 2023-05-09T07:35:26.172707
| 2021-06-02T16:37:40
| 2021-06-02T16:37:40
| 373,019,980
| 0
| 0
| null | 2021-06-02T02:47:21
| 2021-06-02T02:47:20
| null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from classes.student import Student
from classes.staff import Staff
class School:
def __init__(self, name):
self.name = name
self.students = Student.all_students()
self.staff = Staff.all_Staff()
|
[
"miahmauga@gmail.com"
] |
miahmauga@gmail.com
|
2a35d32ccf95d01c08cc819c5e73fd1270e724b2
|
803b028d0dc7c0b6de1952f29de137e88e3a0def
|
/wordcount/urls.py
|
79c3c133da7083483ee33b7c822b40cff4ed0b1c
|
[] |
no_license
|
pprashantt/wordcount
|
49f259c7496cd1b124c099b6068d3c47359f7cc6
|
054b98ef1741a37a1f474e3b72dc3dfe7442a6dc
|
refs/heads/master
| 2020-04-13T20:41:58.840526
| 2018-12-28T17:44:20
| 2018-12-28T17:44:20
| 163,436,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 786
|
py
|
"""wordcount URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
urlpatterns = [
path('' , views.homepage),
path('counts/' ,views.count , name='count')
]
|
[
"prashant.p@geitpl.com"
] |
prashant.p@geitpl.com
|
1c45a6ffb72a9be96cba5d013eb2e35b113e9c0d
|
486284c50c4058f9a3cffb8cd0f5f3dc23eb97e8
|
/feinman/20_11.py
|
6208afce1c707f75160178c00fd007ec8972add1
|
[] |
no_license
|
oleksiypr/physics
|
e8e31c29e6bd1201ab1d3909ebd34246a32cc7d6
|
5c31502934f30e5769f49acf933ef5d7dd3b0a1a
|
refs/heads/master
| 2021-11-19T18:05:30.806741
| 2021-09-06T19:13:27
| 2021-09-06T19:13:27
| 236,537,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,531
|
py
|
from sympy import *
init_printing()
# //@formatter:off
h = symbols('h') # the heights the body stated rolling from
I_0 = symbols('I_0') # moment of inertia relative (axis) center of gravity
M = symbols('M') # the mass
r = symbols('r', positive=True) # radius of the body surface in contact with the plane
g = symbols('g') # gravity of Earth
# //@formatter:on
print('1). Linear speed of the center of gravity at the end')
# //@formatter:off
omega = symbols('omega')
v = omega * r
# //@formatter:on
eq_energy_conservation = Eq(
M*g*h,
M * v**2 /2 + I_0 * omega**2 /2
)
pretty_print(eq_energy_conservation)
# //@formatter:off
omega = solve(eq_energy_conservation, omega)[1]
v = omega * r
# //@formatter:on
pretty_print(Eq(symbols('v'), v))
print('2). Apply above for cases:')
print('a) sphere')
I_1 = 2 * M*r**2 / 3 # sphere
v_1 = v.subs(I_0, I_1)
assert v_1 == sqrt(6 * g*h / 5)
pretty_print(Eq(symbols('v_1'), v_1))
print('b) disk')
I_2 = M*r**2 /2
v_2 = v.subs(I_0, I_2)
pretty_print(Eq(symbols('v_2'), v_2))
print('c) disk of mass M_1 and radius R_1 on the shaft with mass m_2 and radius r_2')
M_1, m_2 = symbols('M_1 m_2', positive=True)
R_1, r_2 = symbols('R_1 r_2', positive=True)
I_3 = (M_1 * R_1**2)/2 + (m_2 * r_2**2)/2
v_3 = v.subs({
I_0: I_3,
r: R_1,
M: M_1 + m_2
})
assert simplify(sqrt(2*(M_1 + m_2)*g*h/(3*M_1/2 + m_2*(1 + r_2**2/R_1**2/2))) - v_3) == 0
pretty_print(Eq(symbols('v_3'), simplify(v_3)))
|
[
"oleksii.prosianko@fedex.com"
] |
oleksii.prosianko@fedex.com
|
903d4982e43e8339cc5d5c2a3b419c51dfab23a0
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2620/60672/250069.py
|
fe55818dd7580982aee0fd8dd287ae5431a4959e
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
T=input()
for i in range(T):
N=input()
sum=0
for i in range(N+1):
sum=sum+pow(i,5)
print(sum)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
d0693cbf31df34a171c9bb3fa87794665fd6bb38
|
a1bf8212283227b6746f30af10ee3f449e9f7cfa
|
/prj/Django/manager_project/manager_project/settings.py
|
a23d51c0092f4f758db2b2ee7238626fd3aab160
|
[] |
no_license
|
tadasi12/dev
|
33904891dbbfecd709e05b0c7c792481c26d39df
|
afc1d76c9677487e353afa7dfced4920c894ea83
|
refs/heads/master
| 2020-03-19T08:24:28.402504
| 2019-07-08T16:53:58
| 2019-07-08T16:53:58
| 136,201,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,351
|
py
|
"""
Django settings for manager_project project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&mh+r&im*uw37ie6p%+om#2bs2c4-x%1zf1r%h%u5ou^c39-s6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'manager', # 追加部分
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'manager_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'manager_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'ja-JP'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# Static file settings
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'upload')
MEDIA_URL = '/upload/'
|
[
"tadasi12@yahoo.co.jp"
] |
tadasi12@yahoo.co.jp
|
097e95e15a39e5b93e54bbc9056cfb472ad55514
|
0dc44b18d3f087b4926016f63a9fc824b9f3606f
|
/feature_extractor.py
|
a549a0279deaebc987d3bc38b89c89baf581a7c6
|
[] |
no_license
|
vishalk9/Sentence_Semantic_Similarity
|
c14243f111107ca2de4634a0c316f75f0e790e5d
|
63e213e95c3a457f5cf0aa5ddf81ff1b509cdaf4
|
refs/heads/master
| 2020-03-22T18:37:33.011423
| 2018-04-29T08:36:15
| 2018-04-29T08:36:15
| 140,471,344
| 2
| 1
| null | 2018-07-10T18:16:13
| 2018-07-10T18:16:13
| null |
UTF-8
|
Python
| false
| false
| 6,841
|
py
|
from __future__ import division
import nltk
from nltk.corpus import wordnet as wn
from nltk.corpus import brown
import math
import numpy as np
import sys
from itertools import izip
ALPHA = 0.2
BETA = 0.45
ETA = 0.4
PHI = 0.2
DELTA = 0.85
brown_freqs = dict()
N = 0
def get_best_synset_pair(word_1, word_2):
max_sim = -1.0
synsets_1 = wn.synsets(word_1)
synsets_2 = wn.synsets(word_2)
if len(synsets_1) == 0 or len(synsets_2) == 0:
return None, None
else:
max_sim = -1.0
best_pair = None, None
for synset_1 in synsets_1:
for synset_2 in synsets_2:
sim = wn.path_similarity(synset_1, synset_2)
if sim > max_sim:
max_sim = sim
best_pair = synset_1, synset_2
return best_pair
def length_dist(synset_1, synset_2):
l_dist = sys.maxint
if synset_1 is None or synset_2 is None:
return 0.0
if synset_1 == synset_2:
l_dist = 0.0
else:
wset_1 = set([str(x.name()) for x in synset_1.lemmas()])
wset_2 = set([str(x.name()) for x in synset_2.lemmas()])
if len(wset_1.intersection(wset_2)) > 0:
l_dist = 1.0
else:
# just compute the shortest path between the two
l_dist = synset_1.shortest_path_distance(synset_2)
if l_dist is None:
l_dist = 0.0
return math.exp(-ALPHA * l_dist)
def hierarchy_dist(synset_1, synset_2):
h_dist = sys.maxint
if synset_1 is None or synset_2 is None:
return h_dist
if synset_1 == synset_2:
h_dist = max([x[1] for x in synset_1.hypernym_distances()])
else:
# find the max depth of least common subsumer
hypernyms_1 = {x[0]:x[1] for x in synset_1.hypernym_distances()}
hypernyms_2 = {x[0]:x[1] for x in synset_2.hypernym_distances()}
lcs_candidates = set(hypernyms_1.keys()).intersection(
set(hypernyms_2.keys()))
if len(lcs_candidates) > 0:
lcs_dists = []
for lcs_candidate in lcs_candidates:
lcs_d1 = 0
if hypernyms_1.has_key(lcs_candidate):
lcs_d1 = hypernyms_1[lcs_candidate]
lcs_d2 = 0
if hypernyms_2.has_key(lcs_candidate):
lcs_d2 = hypernyms_2[lcs_candidate]
lcs_dists.append(max([lcs_d1, lcs_d2]))
h_dist = max(lcs_dists)
else:
h_dist = 0
return ((math.exp(BETA * h_dist) - math.exp(-BETA * h_dist)) /
(math.exp(BETA * h_dist) + math.exp(-BETA * h_dist)))
def word_similarity(word_1, word_2):
synset_pair = get_best_synset_pair(word_1, word_2)
return (length_dist(synset_pair[0], synset_pair[1]) *
hierarchy_dist(synset_pair[0], synset_pair[1]))
def most_similar_word(word, word_set):
max_sim = -1.0
sim_word = ""
for ref_word in word_set:
sim = word_similarity(word, ref_word)
if sim > max_sim:
max_sim = sim
sim_word = ref_word
return sim_word, max_sim
def info_content(lookup_word):
global N
if N == 0:
for sent in brown.sents():
for word in sent:
word = word.lower()
if not brown_freqs.has_key(word):
brown_freqs[word] = 0
brown_freqs[word] = brown_freqs[word] + 1
N = N + 1
lookup_word = lookup_word.lower()
n = 0 if not brown_freqs.has_key(lookup_word) else brown_freqs[lookup_word]
return 1.0 - (math.log(n + 1) / math.log(N + 1))
def semantic_vector(words, joint_words, info_content_norm):
sent_set = set(words)
semvec = np.zeros(len(joint_words))
i = 0
for joint_word in joint_words:
if joint_word in sent_set:
semvec[i] = 1.0
if info_content_norm:
semvec[i] = semvec[i] * math.pow(info_content(joint_word), 2)
else:
sim_word, max_sim = most_similar_word(joint_word, sent_set)
semvec[i] = PHI if max_sim > PHI else 0.0
if info_content_norm:
semvec[i] = semvec[i] * info_content(joint_word) * info_content(sim_word)
i = i + 1
return semvec
def semantic_similarity(sentence_1, sentence_2, info_content_norm):
# word vector representing if word is present in sentence
words_1 = nltk.word_tokenize(sentence_1)
words_2 = nltk.word_tokenize(sentence_2)
joint_words = set(words_1).union(set(words_2))
vec_1 = semantic_vector(words_1, joint_words, info_content_norm)
vec_2 = semantic_vector(words_2, joint_words, info_content_norm)
return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))
#cosine similarity
def word_order_vector(words, joint_words, windex):
wovec = np.zeros(len(joint_words))
i = 0
wordset = set(words)
for joint_word in joint_words:
if joint_word in wordset:
wovec[i] = windex[joint_word]
else:
sim_word, max_sim = most_similar_word(joint_word, wordset)
if max_sim > ETA:
wovec[i] = windex[sim_word]
else:
wovec[i] = 0
i = i + 1
return wovec
def word_order_similarity(sentence_1, sentence_2):
# word vector consisting counts of word in sentence
words_1 = nltk.word_tokenize(sentence_1)
words_2 = nltk.word_tokenize(sentence_2)
joint_words = list(set(words_1).union(set(words_2)))
windex = {x[1]: x[0] for x in enumerate(joint_words)}#count of all words
r1 = word_order_vector(words_1, joint_words, windex)
r2 = word_order_vector(words_2, joint_words, windex)
return 1.0 - (np.linalg.norm(r1 - r2) / np.linalg.norm(r1 + r2))
#frobenius norm
def similarity(sentence_1, sentence_2, info_content_norm):
# 85% weightage to semantic_similarity and rest 15% to word_order_similarity
return DELTA * semantic_similarity(sentence_1, sentence_2, info_content_norm) + \
(1.0 - DELTA) * word_order_similarity(sentence_1, sentence_2)
X1=[]
X2=[]
Y=[]
with open("Data/train/STS2012-test/STS.input.MSRpar.txt","rb") as f1,open("Data/train/STS2012-test/STS.gs.MSRpar.txt", "rb") as f2:
for row in f1:
# print row
l=row.strip().split('\t')
X1.append(l[0].decode('utf-8'))
X2.append(l[1].decode('utf-8'))
for row in f2:
Y.append(row.strip())
with open("Data/train/STS2012-test/model1_MSRpar.txt","wb") as f:
for s1,s2,y in izip(X1,X2,Y):
pred_y=similarity(s1, s2, True)*5.0
print "%.3f\t%.3f" % (float(y),
pred_y)
f.write(str(pred_y)+"\n")
|
[
"vishal.ku86@gmail.com"
] |
vishal.ku86@gmail.com
|
53f6b166ff3157850e2d9b69c458ddb044823c82
|
b5af43e3253a6ed545291cdcdf42edcd7546bb6f
|
/game.py
|
279c4034ba8622cedc8cd8d01c41771da7e86dd1
|
[
"MIT"
] |
permissive
|
yulwin/python-exercises
|
9d24e06aa97ba1a544f822e6fa30dfd93626fdac
|
812e28e26845d4578d1294dfdd5f5b3e9de9f0b7
|
refs/heads/master
| 2020-03-19T10:05:40.459692
| 2018-06-26T08:54:21
| 2018-06-26T08:54:21
| 136,342,861
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
class Room(object):
def __init__(self, name, description):
self.name = name
self.description = description
self.paths = {}
def go(self, direction):
return self.paths.get(direction, None)
def add_paths(self, paths):
self.paths.update(paths)
|
[
"yulwin178@gmail.com"
] |
yulwin178@gmail.com
|
b38fec8341f41c637b4a772a7664070040e97cc6
|
145762063f36ed93b62219843e2d0b52438006c6
|
/assignment02.py
|
cbbb4799680ce5714a478a403bc3ca20f08f54d2
|
[] |
no_license
|
Szeretni/TTKS0300-Script-Programming
|
dce6c7ccbbfe6f47ee47165e3bef31e7b69aed66
|
3685b19161ed66bffda5fbbcf1d65ab756ffdb7e
|
refs/heads/master
| 2021-05-09T02:03:04.081567
| 2019-03-18T07:04:15
| 2019-03-18T07:04:15
| 119,195,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
greeting = "Hello World"
for character in greeting:
print character
|
[
"hannu@muaddibs.net"
] |
hannu@muaddibs.net
|
d179386d14f850369e106bcfa247671821d4fd31
|
e786cda0fdc003eddd14466ed9b9b336559e58d6
|
/data/get-minute-data.py
|
4536efe756f76de33444e3cded021e1c35088356
|
[
"MIT"
] |
permissive
|
YA9/stock-market-prediction
|
6ac724db58ee52ebedc52dc83e02912aea738edb
|
1756187c75efd69657888e04c41f78526ad86c84
|
refs/heads/master
| 2023-02-01T01:20:58.956907
| 2020-08-21T20:55:40
| 2020-08-21T20:55:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
# Copyright (c) 2018 Benson Tran, MIT License
import requests
import pandas as pd
import arrow
import datetime
import pandas as pd
import numpy as np
def get_quote_data(symbol='AAPL', data_range='7d', data_interval='1m'):
res = requests.get(
f'https://query1.finance.yahoo.com/v8/finance/chart/{symbol}?range={data_range}&interval={data_interval}' )
data = res.json()
body = data['chart']['result'][0]
dt = datetime.datetime
dt = pd.Series(map(lambda x: arrow.get(x).to('EST').datetime.replace(tzinfo=None), body['timestamp']), name='dt')
df = pd.DataFrame(body['indicators']['quote'][0], index=dt)
dg = pd.DataFrame(body['timestamp'])
df = df.loc[:, ('close', 'volume')]
df.dropna(inplace=True) # removing NaN rows
df.columns = ['Price', 'Volume'] # Renaming columns in pandas
start_date = df.index[0].strftime('%Y%m%d')
out_filename = f"{symbol}{start_date}{data_range}{data_interval}.csv"
df.to_csv(out_filename)
return df
if __name__ == "__main__":
data = get_quote_data(input('ticker (ex "JNUG"): '),
input('range (ex "7d"): '), input('interval (ex "1m"): '))
print(data)
|
[
"amascillaro@gmail.com"
] |
amascillaro@gmail.com
|
4a1121a0cdf9f1fee3aa3a07d6155aa50db72c68
|
471ea669e21abdb4e4915610b4b5eb43ea3cffe9
|
/leetcode/390.消除游戏.py
|
e25d5de18c3f8e1f181c22f6906c3c30c596872b
|
[] |
no_license
|
JiahuaLink/nowcoder-leetcode
|
26aed099e215cfc1d8e8afffc62fafa26b26b06f
|
0155fc33511cbe892f58550d561d3aa3efcd56b9
|
refs/heads/master
| 2023-07-09T03:05:31.227720
| 2021-08-03T06:50:36
| 2021-08-03T06:50:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
# 按题目说明解法
class Solution(object):
def lastRemaining(self, n):
nums = [i+1 for i in range(n)]
res = []
while len(nums) > 1:
for i in range(1, len(nums), 2):
res.append(nums[i])
nums, res = res[::-1], []
return nums[0]
# 找规律,如果输入a输出b,则输入2a输出2*(a-b+1)
class Solution(object):
def lastRemaining(self, n):
if n == 1:
return 1
return 2 * (n/2 - self.lastRemaining(n/2) + 1)
|
[
"noreply@github.com"
] |
JiahuaLink.noreply@github.com
|
e4b483a764f9ab2efc9c6ffe2605fa6913727b13
|
68430b047879745c1b5791717e74f4f033d6b1a4
|
/mention_parse.py
|
3a762c82e11a1e6c4f49513fcd16167657117828
|
[] |
no_license
|
v-arora/ucsc-class-info-bot
|
6109f3d7c4300dcbf9396dc18712a6bf05c5ae53
|
c4c85febacef0d0cec831d099745ecbb0422857e
|
refs/heads/master
| 2021-01-15T20:29:29.295270
| 2016-03-10T06:36:25
| 2016-03-10T06:36:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,559
|
py
|
"""
Vastly superior version of find_mentions which is faster can can see:
* multi-mentions: mentions of same department with list of numbers, e.g. "Math 21, 23b, 24 and 100"
* letter-list mentions: mentions of same number with list of letters, e.g. "CE 129A/B/C"
* letter-list mentions in a multi mention, e.g. "CS 4a, 37a/b, 15, 163w/x/y/z"
"""
import re
# from build_database._all_departments, with build_database._lit_department_codes.values() and "CS" and "CE"
_pattern_depts = \
"acen|ams|anth|aplx|art|artg|astr|bioc|bme|ce|chem|chin|clei|clni|clte|cmmu|cmpe|cmpm|cmps|cowl|cres|" \
"crwn|cs|danm|eart|econ|educ|ee|bioe|envs|film|fmst|fren|game|germ|gree|havc|hebr|his|hisc|ital|japn|" \
"jwst|krsg|laad|lals|latn|lgst|ling|lit|ltcr|ltel|ltfr|ltge|ltgr|ltin|ltit|ltmo|ltpr|ltsp|ltwl|math|" \
"biol|merr|metx|musc|oaks|ocea|phil|phye|phys|poli|port|prtr|psyc|punj|russ|scic|socd|socy|span|sphs|" \
"stev|thea|tim|ucdc|writ|yidd"
# matches a letter-list mention: a mention of same number with list of letters, e.g. "CE 129A/B/C"
_pattern_mention_letter_list = "(?:\d+(?:[A-Za-z] ?/ ?)+[A-Za-z])"
# matches a normal mention: a mention with a course number and one optional letter, e.g. "12" or "12a"
_pattern_mention_normal = "(?:\d+[A-Za-z]?)"
# matches either a letter-list mention or a normal mention
_pattern_mention_any = "(?:" + _pattern_mention_letter_list + "|" + _pattern_mention_normal + ")"
# matches a delimiter in a multi-mention, e.g. "Math 21, 23b, 24 and 100"
_pattern_delimiter = "(?:[,/ &+]|or|and|with)*"
# matches a whole mention string - a department code then multiple course numbers and possibly multiple course letters.
# e.g. matches "CS 10, 15a, or 35a/b/c"
_pattern_final = \
"(?:^|\\b)(?:" + _pattern_depts + ") ?(?:" + _pattern_mention_any + _pattern_delimiter + ")*" + _pattern_mention_any
def _parse_letter_list(dept, list_letter_mention):
"""Given a string of one course number a list of letters, returns a list with one letter per number.
e.g. '129A/B/C' becomes ['129A', '129B', '129C']
:param dept: the department the mention is in
:type dept: str
:param list_letter_mention: a string with one course number and a list of letters, e.g. '129A/B/C'
:type list_letter_mention: str
:return: a list of normal mentions, e.g. ['129A', '129B', '129C']
:rtype: list
"""
m = re.match(" ?(\d+) ?((?:[A-Za-z] ?/ ?)+[A-Za-z])", list_letter_mention) # != _pattern_mention_letter_list
num = m.group(1)
letters = m.group(2).split('/')
return_list = []
for l in letters:
return_list.append(dept + " " + num + l.strip())
return return_list
def _parse_multi_mention(multi_mention):
"""Parses multi-mentions into normal mentions.
:param multi_mention: a multi-mention, e.g. "Math 21, 23b, 24 and 100"
:type multi_mention: str
:return: normal mentions from the multi-mention
:rtype: list
"""
mentions = []
# extract department code
match_dept = re.search(_pattern_depts, multi_mention, re.IGNORECASE)
dept = multi_mention[match_dept.start():match_dept.end()].lower()
if dept == 'cs':
dept = 'cmps'
if dept == 'ce':
dept = 'cmpe'
# the rest of the string, past department code
rest = multi_mention[match_dept.end():]
# look for letter-list mentions, like "129a/b/c"
mentions_letter_list = re.findall(_pattern_mention_letter_list, rest)
for m in mentions_letter_list:
mentions.extend(_parse_letter_list(dept, m))
# take out letter-list mentions, if any
rest = re.sub(_pattern_mention_letter_list, "", rest)
# look for normal mentions, like "12" or "12a"
men_normal = re.findall(_pattern_mention_normal, rest)
for m in men_normal:
mentions.append(dept + ' ' + m)
return mentions
def parse_string(str_):
"""Finds mentions in a string.
Can see...
* multi-mentions: mentions of same department with list of numbers, e.g. "Math 21, 23b, 24 and 100"
* letter-list mentions: mentions of same number with list of letters, e.g. "CE 129A/B/C"
* letter-list mentions in a multi mention, e.g. "CS 4a, 37a/b, 15, 163w/x/y/z"
:param str_: string to find mentions in
:type str_: string
:return: list of strings of mentions
:rtype: list
"""
if not str_:
return []
mentions = []
multi_mentions = re.findall(_pattern_final, str_, re.IGNORECASE | re.MULTILINE)
for m in multi_mentions:
mentions.extend(_parse_multi_mention(m))
return mentions
|
[
"pfroud@gmail.com"
] |
pfroud@gmail.com
|
e7fdd285417b2b0a0bc0034520c25347072d82a9
|
3bade207becd7eaf2be98d1e6d3ced8b180d65e6
|
/chat_room/chat_server.py
|
47f88c2b90d54b23b064f5f7605288eaf7957f05
|
[] |
no_license
|
letitbedulllife/chat_room
|
69a1288364546f0c263ec6c2d5cc284ec911ca61
|
a5f41443fde9637508ac9e3fc2705b7a3b6d2e08
|
refs/heads/master
| 2020-07-27T18:14:58.713231
| 2019-09-18T00:37:17
| 2019-09-18T00:37:17
| 209,183,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
'''
chat room
env:python3.6
socket udp & fork exc
'''
import os
from socket import *
HOST='0.0.0.0'
PORT=9999
sockfd_addr=(HOST,PORT)
user_data=[]
def controller_data(sockfd):
while True:
msg=input('请输入管理员消息:')
msg='C 管理员 '+msg
sockfd.sendto(msg.encode(),sockfd_addr)
def do_quit(sockfd, name):
msg='\n%s 退出了群聊'%name
for item in user_data:
if item[0]==name:
sockfd.sendto(b'EXIT',item[1])
else:
sockfd.sendto(msg.encode(),item[1])
for item in user_data:
if item[0]==name:
user_data.remove(item)
def do_chat(sockfd,name,text):
msg='\n%s : %s'%(name,text)
for i in user_data:
if name!=i[0]:
sockfd.sendto(msg.encode(),i[1])
def do_login(sockfd,name,addr):
for item in user_data:
if name == item[0]:
sockfd.sendto('用户名已存在'.encode(),addr)
return
else:
sockfd.sendto(b'OK',addr)
msg='欢迎%s加入群聊'%name
for item in user_data:
sockfd.sendto(msg.encode(),item[1])
user_data.append((name,addr))
def receive_data(sockfd):
while True:
data, addr =sockfd.recvfrom(1024)
temp=data.decode().split(' ',2)
if temp[0]=='L':
do_login(sockfd,temp[1],addr)
elif temp[0]=='C':
do_chat(sockfd,temp[1],temp[2])
elif temp[0]=='Q':
do_quit(sockfd,temp[1])
def main():
sockfd =socket(AF_INET,SOCK_DGRAM)
sockfd.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sockfd.bind(sockfd_addr)
pid = os.fork()
if pid==0:
receive_data(sockfd)
else:
controller_data(sockfd)
if __name__ == '__main__':
main()
|
[
"994854055@qq.com"
] |
994854055@qq.com
|
07ea3e284b935d07ab11e00124830db9bb27a6c7
|
512b388a53022f561e2375b4621f78572d3b4f04
|
/utils/models.py
|
14f07ebf726d38b7d946eb8638c9981ad0c2400d
|
[] |
no_license
|
Madoka09/Worker15
|
006d5ac44dc55c3ae7f72d3b8300f3567395cdff
|
181012d309052b2df3d4ef99a197e8acef73a185
|
refs/heads/master
| 2023-03-24T05:29:02.060796
| 2021-03-16T21:56:21
| 2021-03-16T21:56:21
| 336,394,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
"""
Utilities for created_at and modified_at fields
"""
# Django
from django.db import models
class TimestampsModel(models.Model):
"""
UtilsModel provides a base class from which other models will inherit.
This class provides the following attributes:
- created_at: timestamp that store th datetime the object was created
- modified_at: timestamp that store th datetime the object was modified
"""
created_at = models.DateTimeField(
'created at',
auto_now_add=True,
help_text='Date time on which the object was created'
)
modified_at = models.DateTimeField(
'modified at',
auto_now=True,
help_text='Date time on which the object was modified'
)
class Meta:
abstract = True
get_latest_by = 'created_at'
ordering = ['-created_at', '-modified_at']
|
[
"personal.galvan.francisco@gmail.com"
] |
personal.galvan.francisco@gmail.com
|
6728b00d8785d466013431e65c1dbabfcf82983d
|
30fe7671b60825a909428a30e3793bdf16eaaf29
|
/.metadata/.plugins/org.eclipse.core.resources/.history/e5/20fadfd0c2de0016156ad23b6c415615
|
4e07c45103b7fff65595cef2312f6ca204789746
|
[] |
no_license
|
abigdream84/PythonStudy
|
0fc7a3b6b4a03a293b850d0ed12d5472483c4fb1
|
059274d3ba6f34b62ff111cda3fb263bd6ca8bcb
|
refs/heads/master
| 2021-01-13T04:42:04.306730
| 2017-03-03T14:54:16
| 2017-03-03T14:54:16
| 79,123,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59
|
#!/usr/bin/env python
#coding:UTF-8
import pymysql
|
[
"abigdream@hotmail.com"
] |
abigdream@hotmail.com
|
|
ac6bf41cbf3cc72ac865198b018b54cf9211913d
|
c051bafb897c69f3bdd0252709a0d63c0b9d1cca
|
/model/case.py
|
260938689c63e92dbf6fc22c044af0630ff31813
|
[] |
no_license
|
IronPanda0/pethos
|
bdf08f9f777a199b33cc63b5f50002985c7a1b5a
|
3f359560d68c60d14aba24c4c04f66158cea522c
|
refs/heads/master
| 2023-04-09T11:43:07.167992
| 2021-04-02T16:30:32
| 2021-04-02T16:30:32
| 352,262,287
| 0
| 1
| null | 2021-04-19T02:30:54
| 2021-03-28T06:56:29
|
Vue
|
UTF-8
|
Python
| false
| false
| 667
|
py
|
# coding: utf-8
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Case(db.Model):
__tablename__ = 'case'
caseId = db.Column(db.Integer, primary_key=True, info='病例ID')
caseName = db.Column(db.String(50, 'utf8_general_ci'), info='病例名称')
animalName = db.Column(db.String(20), info='宠物名称')
diseaseName = db.Column(db.String(50, 'utf8_general_ci'), info='病种名称')
diseaseInfo = db.Column(db.String(500, 'utf8_general_ci'), info='文字简介')
videoUrl = db.Column(db.String(50, 'utf8_general_ci'), info='视频信息')
imageUrl = db.Column(db.String(50, 'utf8_general_ci'), info='图片信息')
|
[
"2632235311@qq.com"
] |
2632235311@qq.com
|
334199b4568464627f17b79b76c620b056d04749
|
9576ff34dea3373b3ffd47448490cad25276101b
|
/surveys/serializers/answer.py
|
4605dff6dfb78ad75d717d5c409bf40ad162cf8e
|
[] |
no_license
|
CheloVek0116/surveys_system
|
b24d334aa13da36a82d79198a6cbd2ca588b6b6f
|
d4c67f56dbf6ba0ce0adbd52e3456e7dcb8dd02a
|
refs/heads/master
| 2023-01-31T21:05:35.052397
| 2020-12-18T06:01:31
| 2020-12-18T06:58:31
| 322,407,958
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
from rest_framework import serializers
from surveys.models import (
ChoiceQuestion,
MultipleChoiceQuestion,
TextAnswer,
TextQuestion,
)
from surveys.serializers.answer_option import (
ChoiceAnswerOptionSerializer,
MultipleChoiceAnswerOptionSerializer,
)
class TextAnswerSerializer(serializers.ModelSerializer):
class Meta:
model = TextAnswer
fields = ('pk', 'text')
class TextQuestionResponseSerializer(serializers.ModelSerializer):
answer = TextAnswerSerializer()
class Meta:
model = TextQuestion
fields = ('pk', 'answer', 'question_type',)
extra_kwargs = {'pk': {'read_only': False}}
class ChoiceQuestionResponseSerializer(serializers.ModelSerializer):
answer = ChoiceAnswerOptionSerializer()
class Meta:
model = ChoiceQuestion
fields = ('pk', 'answer', 'question_type',)
extra_kwargs = {'pk': {'read_only': False}}
class MultipleChoiceQuestionResponseSerializer(serializers.ModelSerializer):
answer = MultipleChoiceAnswerOptionSerializer(many=True)
class Meta:
model = MultipleChoiceQuestion
fields = ('pk', 'answer', 'question_type',)
extra_kwargs = {'pk': {'read_only': False}}
|
[
"megabalabol@mail.ru"
] |
megabalabol@mail.ru
|
12aad8f53eae2ba82cffcd065f95e49e51f06c4e
|
926621c29eb55046f9f59750db09bdb24ed3078e
|
/lib/surface/compute/instances/set_scheduling.py
|
2ba983bd7fbe2cc02857892b32261fb66244365c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
bopopescu/SDK
|
525d9b29fb2e901aa79697c9dcdf5ddd852859ab
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
refs/heads/master
| 2022-11-22T18:24:13.464605
| 2016-05-18T16:53:30
| 2016-05-18T16:53:30
| 282,322,505
| 0
| 0
|
NOASSERTION
| 2020-07-24T21:52:25
| 2020-07-24T21:52:24
| null |
UTF-8
|
Python
| false
| false
| 3,914
|
py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for setting scheduling for virtual machine instances."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.core import apis as core_apis
class SetSchedulingInstances(base_classes.NoOutputAsyncMutator):
"""Set scheduling options for Google Compute Engine virtual machine instances.
"""
@staticmethod
def Args(parser):
restart_on_failure = parser.add_argument(
'--restart-on-failure',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='Restart instances if they are terminated by Compute Engine.')
restart_on_failure.detailed_help = """\
The instances will be restarted if they are terminated by Compute '
Engine. This does not affect terminations performed by the user.'
"""
messages = core_apis.GetMessagesModule('compute', 'v1')
migration_options = sorted(messages.Scheduling
.OnHostMaintenanceValueValuesEnum
.to_dict().keys())
maintenance_policy = parser.add_argument(
'--maintenance-policy',
choices=migration_options,
type=lambda x: x.upper(),
help=('Specifies the behavior of the instances when their host '
'machines undergo maintenance.'))
maintenance_policy.detailed_help = """\
Specifies the behavior of the instances when their host machines undergo
maintenance. TERMINATE indicates that the instances should be
terminated. MIGRATE indicates that the instances should be migrated to a
new host. Choosing MIGRATE will temporarily impact the performance of
instances during a migration event.
"""
parser.add_argument(
'name',
metavar='INSTANCE',
completion_resource='compute.instances',
help='The name of the instance for which to change scheduling options.')
flags.AddZoneFlag(
parser,
resource_type='instance',
operation_type='set scheduling settings for')
@property
def service(self):
return self.compute.instances
@property
def method(self):
return 'SetScheduling'
@property
def resource_type(self):
return 'instances'
def CreateRequests(self, args):
"""Returns a list of request necessary for setting scheduling options."""
instance_ref = self.CreateZonalReference(args.name, args.zone)
scheduling_options = self.messages.Scheduling()
scheduling_options.automaticRestart = args.restart_on_failure
if args.maintenance_policy:
scheduling_options.onHostMaintenance = (
self.messages.Scheduling.OnHostMaintenanceValueValuesEnum(
args.maintenance_policy))
request = self.messages.ComputeInstancesSetSchedulingRequest(
instance=instance_ref.Name(),
project=self.project,
scheduling=scheduling_options,
zone=instance_ref.zone)
return [request]
SetSchedulingInstances.detailed_help = {
'brief': ('Set scheduling options for Google Compute Engine virtual '
'machines'),
'DESCRIPTION': """\
*${command}* is used to configure scheduling options for Google Compute
Engine virtual machines.
""",
}
|
[
"richarddewalhalla@gmail.com"
] |
richarddewalhalla@gmail.com
|
c6710ec2ab5913f61c93dc69b575afb49412ee8d
|
d0e0235a85e383d80bb1a52e8ba4174c266b7c71
|
/amolf/numerical_data_analysis/infovsCrosscorrelation.py
|
9de48fbc250f1e44e386002b0b748aa13e5ac086
|
[
"BSD-2-Clause"
] |
permissive
|
Repythory/Libraries
|
388f829b8bb8bfa3d2ca0af1b04f40f40b436679
|
d5f01267a5f396bd0d74b6291f4552bcc6790777
|
refs/heads/master
| 2021-01-21T21:40:04.790975
| 2016-03-18T16:25:39
| 2016-03-18T16:25:39
| 26,526,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
#compute information vs cross correlation
from numpy import*
from scipy import *
from matplotlib import pyplot as plt
T=24.
w=2*pi/T
phi=pi/2.
r=200.
A=50.
dt=0.05
def infoCross(a):
fi=arange(0,pi,0.1)
t=arange(0,T,dt)
XM=arange(150,320)
YM=arange(150,320)
c=a
infotot=[]
cor=arange(-0.99,1,0.05)
for b in cor:
infoA=[]
for phi in fi:
integ=[]
XYind=0
pxy=[]
for x in XM:
pxy.append([])
for y in YM:
C=a*c-b**2
Z=1/(sqrt((2*pi)**2*C)*T)
pxy[XYind].append(dt*sum(Z*exp(-0.5*(1/C)*(a*(x-A*sin(w*t)-r)**2+c*(y-A*sin(w*t+phi)-r)**2+2*b*((x-A*sin(w*t)-r)*(y-A*sin(w*t+phi)-r))))))
integ.append(sum(Z*(exp(-0.5*(1/C)*(a*(x-A*sin(w*t)-r)**2+c*(y-A*sin(w*t+phi)-r)**2+2*b*((x-A*sin(w*t)-r)*(y-A*sin(w*t+phi)-r))))) * (-0.5*(1/C)*(a*(x-A*sin(w*t)-r)**2+c*(y-A*sin(w*t+phi)-r)**2+2*b*((x-A*sin(w*t)-r)*(y-A*sin(w*t+phi)-r)))+0.5* log((2*pi)**2*C))))
XYind=XYind+1
f_pxy=array(pxy)
fl_pxy=log(f_pxy)
ent=-f_pxy*fl_pxy
ent[isnan(ent)]=0
infoA.append(ent.sum()-(sum(integ)*dt))
infotot.append(infoA)
return infotot
|
[
"monti@bionet2"
] |
monti@bionet2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.