blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b4c0437db6470f56799c36a888f998b93d784ab | f28bb4392681912679b41b7e2d1994404e68f772 | /blogpy_articles/migrations/0001_initial.py | 06f1ab5a22fd1e720c396dfc7a2916eaa90fb772 | [] | no_license | erfanmorsali/my_blogpy | 7385f3c15f02ff1c4fcfce11792b29b643d7df6f | 15bb670fe866612ee86dbe2968c708263a24a69e | refs/heads/master | 2023-02-22T08:06:24.704781 | 2021-01-30T09:08:45 | 2021-01-30T09:08:45 | 307,731,993 | 0 | 2 | null | 2020-11-03T15:50:10 | 2020-10-27T14:45:59 | Python | UTF-8 | Python | false | false | 1,118 | py | # Generated by Django 3.1.2 on 2020-10-27 19:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='عنوان مقاله')),
('description', models.TextField(verbose_name='توضیحات')),
('image', models.ImageField(upload_to='articles/', verbose_name='تصویر مقاله')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='تاریخ ثبت مقاله')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='نویسنده')),
],
),
]
| [
"erfanmorsalidev@gmail.com"
] | erfanmorsalidev@gmail.com |
edfb5453073a6d9575cdaf11a8e4117f7ae0ec0d | 5e05c6ec892d9a6bc33c0c0a9b6ce4c7135a83f4 | /cristianoronaldoyopmailcom_299/settings.py | d5a0c910720d8dd82153b4b4433f70e3d17e090e | [] | no_license | payush/cristianoronaldoyopmailcom-299 | 54eb5118840ea7ea68f077ffd7032a62a79880f3 | 52e5bb6ad599605b8cdf1088f9d7cdcf7c1a0265 | refs/heads/master | 2020-03-23T14:23:17.476546 | 2018-07-20T06:30:07 | 2018-07-20T06:30:07 | 141,672,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,157 | py | """
Django settings for cristianoronaldoyopmailcom_299 project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't!!vo0zfzvwkp-_r@$vuqjc=hanbxi^#jl1w9*^z8m(q)mlke8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cristianoronaldoyopmailcom_299.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cristianoronaldoyopmailcom_299.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
| [
"ayushpuroheet@gmail.com"
] | ayushpuroheet@gmail.com |
7312d5425046c75f8b0c000b41197500ca3427cf | cbfaff10128807a84a91dfb52b720781bc07c29b | /airflow/dags/polygonetl_airflow/build_parse_dag.py | 4eb599cffa79ff6e4a4d8d3715a5496109d1fc11 | [
"MIT"
] | permissive | hansolavhilsen/polygon-etl | 7128516029bac7e112fe79c54e0508c086fa0f47 | ba45f5996a336f39864f6d29fbb8d7d7b40e76bb | refs/heads/main | 2023-07-03T11:25:39.039512 | 2021-08-09T11:21:47 | 2021-08-09T11:21:47 | 393,390,827 | 0 | 0 | MIT | 2021-08-06T13:48:54 | 2021-08-06T13:48:53 | null | UTF-8 | Python | false | false | 8,860 | py | from __future__ import print_function
import collections
import logging
import os
from datetime import datetime, timedelta
from glob import glob
from airflow import models
from airflow.operators.bash_operator import BashOperator
from airflow.operators.email_operator import EmailOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.sensors import ExternalTaskSensor
from google.cloud import bigquery
from polygonetl_airflow.bigquery_utils import create_view
from polygonetl_airflow.common import read_json_file, read_file
from polygonetl_airflow.parse.parse_logic import ref_regex, parse, create_dataset
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
dags_folder = os.environ.get('DAGS_FOLDER', '/home/airflow/gcs/dags')
def build_parse_dag(
dag_id,
dataset_folder,
parse_destination_dataset_project_id,
notification_emails=None,
parse_start_date=datetime(2020, 5, 30),
schedule_interval='0 0 * * *',
parse_all_partitions=None,
send_success_email=False
):
logging.info('parse_all_partitions is {}'.format(parse_all_partitions))
if parse_all_partitions:
dag_id = dag_id + '_FULL'
SOURCE_PROJECT_ID = 'public-data-finance'
SOURCE_DATASET_NAME = 'crypto_polygon'
PARTITION_DAG_ID = 'polygon_partition_dag'
default_dag_args = {
'depends_on_past': False,
'start_date': parse_start_date,
'email_on_failure': True,
'email_on_retry': False,
'retries': 5,
'retry_delay': timedelta(minutes=5)
}
if notification_emails and len(notification_emails) > 0:
default_dag_args['email'] = [email.strip() for email in notification_emails.split(',')]
dag = models.DAG(
dag_id,
catchup=False,
schedule_interval=schedule_interval,
default_args=default_dag_args)
validation_error = None
try:
validate_definition_files(dataset_folder)
except ValueError as e:
validation_error = e
# This prevents failing all dags as they are constructed in a loop in ethereum_parse_dag.py
if validation_error is not None:
def raise_validation_error(ds, **kwargs):
raise validation_error
validation_error_operator = PythonOperator(
task_id='validation_error',
python_callable=raise_validation_error,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return dag
def create_parse_task(table_definition):
def parse_task(ds, **kwargs):
client = bigquery.Client()
parse(
bigquery_client=client,
table_definition=table_definition,
ds=ds,
source_project_id=SOURCE_PROJECT_ID,
source_dataset_name=SOURCE_DATASET_NAME,
destination_project_id=parse_destination_dataset_project_id,
sqls_folder=os.path.join(dags_folder, 'resources/stages/parse/sqls'),
parse_all_partitions=parse_all_partitions
)
table_name = table_definition['table']['table_name']
parsing_operator = PythonOperator(
task_id=table_name,
python_callable=parse_task,
provide_context=True,
execution_timeout=timedelta(minutes=60),
dag=dag
)
contract_address = table_definition['parser']['contract_address']
if contract_address is not None:
ref_dependencies = ref_regex.findall(table_definition['parser']['contract_address'])
else:
ref_dependencies = []
return parsing_operator, ref_dependencies
def create_add_view_task(dataset_name, view_name, sql):
def create_view_task(ds, **kwargs):
client = bigquery.Client()
dest_table_name = view_name
dest_table_ref = create_dataset(client, dataset_name, parse_destination_dataset_project_id).table(dest_table_name)
print('View sql: \n' + sql)
create_view(client, sql, dest_table_ref)
create_view_operator = PythonOperator(
task_id=f'create_view_{view_name}',
python_callable=create_view_task,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return create_view_operator
wait_for_ethereum_load_dag_task = ExternalTaskSensor(
task_id='wait_for_polygon_partition_dag',
external_dag_id=PARTITION_DAG_ID,
external_task_id='done',
execution_delta=timedelta(minutes=30),
priority_weight=0,
mode='reschedule',
poke_interval=5 * 60,
timeout=60 * 60 * 12,
dag=dag)
json_files = get_list_of_files(dataset_folder, '*.json')
logging.info(json_files)
all_parse_tasks = {}
task_dependencies = {}
for json_file in json_files:
table_definition = read_json_file(json_file)
task, dependencies = create_parse_task(table_definition)
wait_for_ethereum_load_dag_task >> task
all_parse_tasks[task.task_id] = task
task_dependencies[task.task_id] = dependencies
checkpoint_task = BashOperator(
task_id='parse_all_checkpoint',
bash_command='echo parse_all_checkpoint',
priority_weight=1000,
dag=dag
)
for task, dependencies in task_dependencies.items():
for dependency in dependencies:
if dependency not in all_parse_tasks:
raise ValueError(
'Table {} is not found in the the dataset. Check your ref() in contract_address field.'.format(
dependency))
all_parse_tasks[dependency] >> all_parse_tasks[task]
all_parse_tasks[task] >> checkpoint_task
final_tasks = [checkpoint_task]
sql_files = get_list_of_files(dataset_folder, '*.sql')
logging.info(sql_files)
# TODO: Use folder name as dataset name and remove dataset_name in JSON definitions.
dataset_name = os.path.basename(dataset_folder)
full_dataset_name = 'polygon_' + dataset_name
for sql_file in sql_files:
sql = read_file(sql_file)
base_name = os.path.basename(sql_file)
view_name = os.path.splitext(base_name)[0]
create_view_task = create_add_view_task(full_dataset_name, view_name, sql)
checkpoint_task >> create_view_task
final_tasks.append(create_view_task)
if notification_emails and len(notification_emails) > 0 and send_success_email:
send_email_task = EmailOperator(
task_id='send_email',
to=[email.strip() for email in notification_emails.split(',')],
subject='Polygon ETL Airflow Parse DAG Succeeded',
html_content='Polygon ETL Airflow Parse DAG Succeeded for {}'.format(dag_id),
dag=dag
)
for final_task in final_tasks:
final_task >> send_email_task
return dag
def get_list_of_files(dataset_folder, filter='*.json'):
logging.info('get_list_of_files')
logging.info(dataset_folder)
logging.info(os.path.join(dataset_folder, filter))
return [f for f in glob(os.path.join(dataset_folder, filter))]
def validate_definition_files(dataset_folder):
json_files = get_list_of_files(dataset_folder, '*.json')
dataset_folder_name = dataset_folder.split('/')[-1]
all_lowercase_table_names = []
for json_file in json_files:
file_name = json_file.split('/')[-1].replace('.json', '')
table_definition = read_json_file(json_file)
table = table_definition.get('table')
if not table:
raise ValueError(f'table is empty in file {json_file}')
dataset_name = table.get('dataset_name')
if not dataset_name:
raise ValueError(f'dataset_name is empty in file {json_file}')
if dataset_folder_name != dataset_name:
raise ValueError(f'dataset_name {dataset_name} is not equal to dataset_folder_name {dataset_folder_name}')
table_name = table.get('table_name')
if not table_name:
raise ValueError(f'table_name is empty in file {json_file}')
if file_name != table_name:
raise ValueError(f'file_name {file_name} doest match the table_name {table_name}')
all_lowercase_table_names.append(table_name.lower())
table_name_counts = collections.defaultdict(lambda: 0)
for table_name in all_lowercase_table_names:
table_name_counts[table_name] += 1
non_unique_table_names = [name for name, count in table_name_counts.items() if count > 1]
if len(non_unique_table_names) > 0:
raise ValueError(f'The following table names are not unique {",".join(non_unique_table_names)}') | [
"araa@connect.ust.hk"
] | araa@connect.ust.hk |
2d5c82c21a0762d88be5c748f3c0444cf149e6ba | e61467cee2ace448cd138cfe3891865d149e18aa | /freelancer/settings.py | 7c1b74b623f180adfcb49a58e06f760c01365f68 | [] | no_license | leonardo1909/freelancer | 159958c035717682a8b9a7487194d54748f32347 | 171615c957c9d9fcf25a2a5a37d0986b06c6608b | refs/heads/master | 2022-12-05T03:53:41.037631 | 2020-08-23T21:50:31 | 2020-08-23T21:50:31 | 289,768,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | """
Django settings for freelancer project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lw$thitde!4mc*a=(g8nul6-6k5bl!n#4*-e9rphq^nt9jzxsg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'habilidades',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'freelancer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'freelancer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = 'vol/web/static'
MEDIA_ROOT = 'vol/web/media'
| [
"leonardo.jose94@outlook.com"
] | leonardo.jose94@outlook.com |
6a1f1091be991369d46e65e58cffcef7aa13c248 | 2d80f83b1687a890c0016c677e5941472a618ba8 | /hanbitco/__init__.py | 022ef4c5e728a5d8b3cd5e4968ed8af39c9d06ab | [
"MIT"
] | permissive | plutusds/hanbitco-api-python | d1ac616b853093069b1a99d1cf168b018e8c9ac7 | 0783d5f9be0668b42eba295a736cea08191806b3 | refs/heads/main | 2023-04-03T12:48:09.629754 | 2021-03-24T03:22:28 | 2021-03-24T03:22:28 | 348,755,381 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from hanbitco.constants import OrderType, OrderSide, OrderStatus
from hanbitco.utils import create_order_payload
from hanbitco.api import HanbitcoAPI
| [
"kevink1103@gmail.com"
] | kevink1103@gmail.com |
77eaad8fff2a0f72251107e65f2fd3b4fb38914c | 4dc4edd6cb8a6e895cce46ac0c4ed7f9655cc0fd | /Array.py | 616ee4b264052741e83c7d02f25da045ce5e28e5 | [
"MIT"
] | permissive | KaloyanDragiev/Python_Jenkins_Splunk | 98380a68bb5002dd83c568ced18b371e6c1f77c7 | 8d0c6139fc4121baed0f0208f56ad6933a728db8 | refs/heads/main | 2023-04-10T16:49:15.387056 | 2021-04-27T12:44:49 | 2021-04-27T12:44:49 | 360,524,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | cars = ["Mercedes-Benz", "Audi", "BMW"]
for x in cars:
print(x)
| [
"noreply@github.com"
] | KaloyanDragiev.noreply@github.com |
bc9f514336676ddc97d1fc533448cc2a989f0a2d | 7dcbd6f01d33429925918afd3a7daff04e5b8346 | /rust_webrcon/oxide_commands.py | 6cb48c0a80a748d41c029cc7204d92a9602bad64 | [
"MIT"
] | permissive | thegreatstorm/rust_webrcon | 25514e92499672f462840c0711f11e7962bad1b0 | 2d0b670c57b30cceb5386334ed31e97e1a1f234e | refs/heads/master | 2022-11-30T01:42:26.948539 | 2020-08-13T23:05:14 | 2020-08-13T23:05:14 | 286,509,793 | 4 | 1 | null | 2020-08-10T23:30:22 | 2020-08-10T15:19:09 | Python | UTF-8 | Python | false | false | 2,951 | py | from rust_webrcon.utils.connection import connect_rust_rcon
def oxide_version(server_info):
command = 'oxide.version'
response = connect_rust_rcon(server_info, command)
return response
def oxide_plugin_list(server_info):
command = 'oxide.version'
response = connect_rust_rcon(server_info, command)
return response
def oxide_get_user_info(server_info, user):
command = 'oxide.show user {}'.format(user)
response = connect_rust_rcon(server_info, command)
return response
def oxide_get_group_info(server_info, group):
command = 'oxide.show group {}'.format(group)
response = connect_rust_rcon(server_info, command)
return response
def oxide_create_group(server_info, group):
command = 'oxide.group add {}'.format(group)
response = connect_rust_rcon(server_info, command)
return response
def oxide_remove_group(server_info, group):
command = 'oxide.group remove {}'.format(group)
response = connect_rust_rcon(server_info, command)
return response
def oxide_add_user_group(server_info, steam_id, group):
command = 'oxide.usergroup add {} {}'.format(steam_id,group)
response = connect_rust_rcon(server_info, command)
return response
def oxide_remove_user_group(server_info, steam_id, group):
command = 'oxide.usergroup remove {} {}'.format(steam_id,group)
response = connect_rust_rcon(server_info, command)
return response
def oxide_load_plugin(server_info, plugin_name):
command = 'oxide.load {}'.format(plugin_name)
response = connect_rust_rcon(server_info, command)
return response
def oxide_unload_plugin(server_info, plugin_name):
command = 'oxide.unload {}'.format(plugin_name)
response = connect_rust_rcon(server_info, command)
return response
def oxide_reload_plugin(server_info, plugin_name):
command = 'oxide.reload {}'.format(plugin_name)
response = connect_rust_rcon(server_info, command)
return response
def oxide_reload_all_plugins(server_info):
command = 'oxide.reload *'
response = connect_rust_rcon(server_info, command)
return response
def oxide_grant_user_perm(server_info, steam_id, permission):
command = 'oxide.grant user {} {}'.format(steam_id, permission)
response = connect_rust_rcon(server_info, command)
return response
def oxide_revoke_user_perm(server_info, steam_id, permission):
command = 'oxide.revoke user {} {}'.format(steam_id, permission)
response = connect_rust_rcon(server_info, command)
return response
def oxide_grant_group_perm(server_info, steam_id, permission):
command = 'oxide.grant group {} {}'.format(steam_id, permission)
response = connect_rust_rcon(server_info, command)
return response
def oxide_revoke_group_perm(server_info, steam_id, permission):
command = 'oxide.revoke group {} {}'.format(steam_id, permission)
response = connect_rust_rcon(server_info, command)
return response | [
"djasso@splunk.com"
] | djasso@splunk.com |
95c6dc627b9d0477a4aace75cad812e43a612748 | abc6015c3c4bc7b15de81d8bdc920674bfd63cd6 | /TGNDA_fog.py | 160e9f1c131c685fd17c41a5c7fffd3987c47d48 | [] | no_license | tnda-har/tda4fog | 2ff7c33efd8d557ad6a095e3b6b52e28c815724c | 3849757da45308cbb63b9bd5e4fbd80ea2d17f5a | refs/heads/main | 2023-07-17T08:15:41.513970 | 2021-09-03T07:08:12 | 2021-09-03T07:08:12 | 368,104,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,906 | py | # %load test.py
import math
import os
import random
import time
from numpy import interp
import pandas as pd
import numpy as np
from gtda.diagrams import Scaler, BettiCurve, PersistenceLandscape, Silhouette
from gtda.homology import VietorisRipsPersistence
from gtda.pipeline import make_pipeline
from gtda.time_series import TakensEmbedding
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.metrics import roc_curve, auc
import warnings
def get_random_list(start, stop, n):
arr = list(range(start, stop + 1))
shuffle_n(arr, n)
return arr[-n:]
def shuffle_n(arr, n):
random.seed(time.time())
for i in range(len(arr) - 1, len(arr) - n - 1, -1):
j = random.randint(0, i)
arr[i], arr[j] = arr[j], arr[i]
def create_window(act, window_length, dataframe):
indices = list(dataframe[dataframe.Action == act].index)
groups = []
temp = []
group_count = 0
for i in range(len(indices)):
if i == len(indices) - 1:
temp.append(indices[i])
groups.append(temp)
temp = []
break
temp.append(indices[i])
if indices[i] + 1 != indices[i + 1]:
group_count += 1
groups.append(temp)
temp = []
fs = 64
final_dataframe = pd.DataFrame()
for i in groups:
required = math.floor(len(i) / (window_length * fs))
req_index = i[0:(required * window_length * fs)]
final_dataframe = pd.concat([final_dataframe, dataframe.iloc[req_index, :]], axis=0)
return final_dataframe
def sbj_df(df, sbj='S01'):
DF0 = create_window(0, 2, df)
DF0 = DF0[DF0['name'] == sbj]
DF1 = create_window(1, 2, df)
DF1 = DF1[DF1['name'] == sbj]
DF2 = create_window(2, 2, df)
DF2 = DF2[DF2['name'] == sbj]
return DF0, DF1, DF2
def gtda(dataframe, w=128):
all_data = []
dataframe = dataframe.drop(columns=['time', 'Action', 'name'])
for i in range(0, len(dataframe), w):
data = dataframe.iloc[i:i + w]
data = data.to_numpy().transpose()
if data.shape[1] == w:
all_data.append(data)
all_data = np.array(all_data)
steps = [TakensEmbedding(time_delay=5, dimension=3),
VietorisRipsPersistence(),
Scaler()
]
tda_pipe = make_pipeline(*steps)
diagrams = tda_pipe.fit_transform(all_data)
BC = BettiCurve(n_bins=50).fit_transform(diagrams)
PL = PersistenceLandscape(n_bins=50).fit_transform(diagrams)
SL = Silhouette(n_bins=50).fit_transform(diagrams)
return np.mean(BC, axis=1), np.sum(PL, axis=1), np.mean(SL, axis=1)
def training(x, y, sbj_name):
y = np.array(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, shuffle=True)
model = RandomForestClassifier(random_state=1)
model.fit(x_train, y_train)
y_pre = model.predict(x_test)
print(classification_report(y_test, y_pre, digits=4))
pre_y = model.predict_proba(x_test)[:, 1]
fpr, tpr, threshold = metrics.roc_curve(y_test, pre_y)
roc_auc = metrics.auc(fpr, tpr)
print('AUC:', roc_auc)
print('\n')
cv = KFold(n_splits=5, shuffle=True, random_state=None)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
for train_index, test_index in cv.split(x):
X_train, X_test = x[train_index], x[test_index]
Y_train, Y_test = y[train_index], y[test_index]
build_model = model
build_model.fit(X_train, Y_train.astype("int"))
Y_pre = build_model.predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(Y_test, Y_pre)
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=.6, label='ROC fold %d(AUC=%0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr) # 计算平均AUC值
plt.plot(mean_fpr, mean_tpr, color='b', label=r'Mean ROC (AUC=%0.2f)' % mean_auc, lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
print('mean auc', mean_auc)
plt.fill_between(mean_tpr, tprs_lower, tprs_upper, color='gray', alpha=.2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(sbj_name + 'FOG_ROC')
plt.legend(loc='lower right')
plt.show()
return
def training_plot(x, y, m):
y = np.array(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, shuffle=True)
model = RandomForestClassifier(random_state=1)
model.fit(x_train, y_train)
y_pre = model.predict(x_test)
print(classification_report(y_test, y_pre, digits=4))
pre_y = model.predict_proba(x_test)[:, 1]
fpr, tpr, threshold = metrics.roc_curve(y_test, pre_y)
roc_auc = metrics.auc(fpr, tpr)
print('AUC:', roc_auc)
print('\n')
cv = KFold(n_splits=5, shuffle=True, random_state=None)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
for train_index, test_index in cv.split(x):
X_train, X_test = x[train_index], x[test_index]
Y_train, Y_test = y[train_index], y[test_index]
build_model = model
build_model.fit(X_train, Y_train.astype("int"))
Y_pre = build_model.predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(Y_test, Y_pre)
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
i += 1
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
print('mean auc', mean_auc)
plt.plot(fpr, tpr, lw=1, alpha=.6, label='FOG AUC of %s = %0.3f' % (m, mean_auc))
def clean_nan(x):
x = np.nan_to_num(x, posinf=0, neginf=0)
return x
warnings.filterwarnings("ignore")
df = pd.read_csv('TGNDA_for_FOG2.csv')
subject_list = ['S01', 'S02', 'S03', 'S05', 'S06', 'S07', 'S08', 'S09']
whole_x = []
whole_y = []
whole_bc_x = []
whole_bc_y = []
whole_pl_x = []
whole_pl_y = []
whole_sl_x = []
whole_sl_y = []
for sbj in subject_list:
DF0, DF1, DF2 = sbj_df(df, sbj=sbj)
NOR_DF = pd.concat([DF0, DF2])
nor_bc, nor_pl, nor_sl = gtda(NOR_DF)
fog_bc, fog_pl, fog_sl = gtda(DF1)
print(len(fog_bc))
fog = [fog_bc, fog_pl, fog_sl]
normal = [nor_bc, nor_pl, nor_sl]
idx = get_random_list(0, len(fog_bc) - 1, int(len(fog_bc) * 1))
for length, nor_feature in enumerate(normal):
fog_feature = fog[length]
nor_feature = nor_feature[idx]
X = np.concatenate([nor_feature, fog_feature], axis=0)
Y = []
Y += len(nor_feature) * [0]
Y += len(fog_feature) * [1]
if length == 0:
whole_bc_x.append(X)
whole_bc_y += Y
elif length == 1:
whole_pl_x.append(X)
whole_pl_y += Y
elif length == 2:
whole_sl_x.append(X)
whole_sl_y += Y
print(sbj, '第{}个特征'.format(length + 1))
training(X, Y, sbj)
all_fog = np.concatenate(fog, axis=1)
all_nor = np.concatenate(normal, axis=1)
all_nor = all_nor[idx]
all_X = np.concatenate([all_nor, all_fog], axis=0)
all_Y = []
all_Y += len(all_nor) * [0]
all_Y += len(all_fog) * [1]
print(sbj, '特征融合')
training(all_X, all_Y, sbj)
whole_x.append(all_X)
whole_y += all_Y
whole_bc_x = np.concatenate(whole_bc_x, axis=0)
whole_pl_x = np.concatenate(whole_pl_x, axis=0)
whole_sl_x = np.concatenate(whole_sl_x, axis=0)
whole_x = np.concatenate(whole_x, axis=0)
whole = 'All Person'
plt.figure()
print('all person BC')
training_plot(whole_bc_x, whole_bc_y, 'BC')
print('all person PL')
training_plot(whole_pl_x, whole_pl_y, 'PL')
print('all person SL')
training_plot(whole_sl_x, whole_sl_y, 'SL')
print('all person whole')
training_plot(whole_x, whole_y, 'Fusion')
plt.title(whole + ' FOG ROC')
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Luck', alpha=.8)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
| [
"noreply@github.com"
] | tnda-har.noreply@github.com |
4334d50846ce9d51cd46cd5202f5b2cccab0c0b3 | 64806194523cf79e3d948f2e3e4bf28d1002f6d3 | /mkb/evaluation/evaluation.py | b6c655ad1573d8f4bb6183232e9ee1bbe7d415a8 | [
"MIT"
] | permissive | EmanuelaBoros/mkb | 6cfef3797db9d19d5e93c0694f6c344dc9a6899d | 3a75f10a5cf86fb74bb349530982130b6e35740a | refs/heads/master | 2023-06-27T12:46:50.041189 | 2021-08-06T17:23:05 | 2021-08-06T17:23:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,019 | py | from creme import stats
import pandas as pd
from torch.utils import data
import torch
import collections
from ..datasets import base
from ..utils import Bar
__all__ = ['Evaluation']
class Evaluation:
"""Evaluate model on selected dataset.
Evaluate metrics Hits@1, Hits@3, Hits@10, MRR, MR on entities, relations and tails.
Returns distincts metrics for link prediction ie, entities or relations and relation
predictions.
Parameters:
entities (dict): Entities of the dataset.
relations (dict): Relations of the dataset.
batch_size (int): Size of the batch.
true_triples (list): Available triplets to filter_bias metrics. If not specified, `Evaluation`
will mesure raw metrics. Usually we filter_bias triplets based on train, validation and test
datasets.
device (str): cpu or cuda.
num_workers (str): Number of workers for pytorch dataset.
Example:
>>> from mkb import datasets
>>> from mkb import evaluation
>>> from mkb import models
>>> from mkb import losses
>>> from mkb import sampling
>>> import torch
>>> _ = torch.manual_seed(42)
>>> train = [
... (0, 0, 1),
... (0, 1, 1),
... (2, 0, 3),
... (2, 1, 3),
... ]
>>> valid = [
... (0, 0, 1),
... (2, 1, 3),
... ]
>>> test = [
... (0, 0, 1),
... (2, 1, 3),
... ]
>>> entities = {
... 'e0': 0,
... 'e1': 1,
... 'e2': 2,
... 'e3': 3,
... }
>>> relations = {
... 'r0': 0,
... 'r1': 1,
... }
>>> dataset = datasets.Dataset(
... train = train,
... valid = valid,
... test = test,
... entities = entities,
... relations = relations,
... batch_size = 2,
... seed = 42,
... shuffle = False,
... )
>>> negative_sampling = sampling.NegativeSampling(
... size = 2,
... train_triples = dataset.train,
... entities = dataset.entities,
... relations = dataset.relations,
... seed = 42,
... )
>>> model = models.RotatE(hidden_dim=3, entities=dataset.entities,
... relations=dataset.relations, gamma=1)
>>> optimizer = torch.optim.Adam(
... filter(lambda p: p.requires_grad, model.parameters()),
... lr = 0.5,
... )
>>> loss = losses.Adversarial(alpha=0.5)
>>> for _ in range(5):
... for data in dataset:
... sample, weight, mode = data['sample'], data['weight'], data['mode']
... positive_score = model(sample)
... negative_sample = negative_sampling.generate(sample=sample, mode=mode)
... negative_score = model(sample, negative_sample, mode)
... loss(positive_score, negative_score, weight).backward()
... _ = optimizer.step()
>>> model = model.eval()
>>> validation = evaluation.Evaluation(true_triples=train + valid + test,
... entities=entities, relations=relations, batch_size=2)
>>> validation.eval(model=model, dataset=test)
{'MRR': 0.5417, 'MR': 2.25, 'HITS@1': 0.25, 'HITS@3': 1.0, 'HITS@10': 1.0}
>>> validation.eval_relations(model=model, dataset=test)
{'MRR_relations': 1.0, 'MR_relations': 1.0, 'HITS@1_relations': 1.0, 'HITS@3_relations': 1.0, 'HITS@10_relations': 1.0}
>>> validation.detail_eval(model=model, dataset=test, threshold=1.5)
head tail metadata
MRR MR HITS@1 HITS@3 HITS@10 MRR MR HITS@1 HITS@3 HITS@10 frequency
relation
1_1 0.0000 0.0 0.0 0.0 0.0 0.0000 0.0 0.0 0.0 0.0 0.0
1_M 0.0000 0.0 0.0 0.0 0.0 0.0000 0.0 0.0 0.0 0.0 0.0
M_1 0.0000 0.0 0.0 0.0 0.0 0.0000 0.0 0.0 0.0 0.0 0.0
M_M 0.6667 2.0 0.5 1.0 1.0 0.4167 2.5 0.0 1.0 1.0 1.0
>>> validation.types_relations(model = model, dataset=test, threshold=1.5)
{'r0': 'M_M', 'r1': 'M_M'}
References:
1. [RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space](https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding)
"""
def __init__(self, entities, relations, batch_size, true_triples=[], device='cpu', num_workers=1):
self.entities = entities
self.relations = relations
self.true_triples = true_triples
self.batch_size = batch_size
self.device = device
self.num_workers = num_workers
def _get_test_loader(self, triples, mode):
test_dataset = base.TestDataset(
triples=triples, true_triples=self.true_triples, entities=self.entities,
relations=self.relations, mode=mode)
return data.DataLoader(
dataset=test_dataset, batch_size=self.batch_size, num_workers=self.num_workers,
collate_fn=base.TestDataset.collate_fn)
def get_entity_stream(self, dataset):
"""Get stream dedicated to link prediction."""
head_loader = self._get_test_loader(triples=dataset, mode='head-batch')
tail_loader = self._get_test_loader(triples=dataset, mode='tail-batch')
return [head_loader, tail_loader]
def get_relation_stream(self, dataset):
"""Get stream dedicated to relation prediction."""
test_dataset = base.TestDatasetRelation(
triples=dataset, true_triples=self.true_triples, entities=self.entities,
relations=self.relations)
return data.DataLoader(
dataset=test_dataset, batch_size=self.batch_size, num_workers=self.num_workers,
collate_fn=base.TestDatasetRelation.collate_fn)
def eval(self, model, dataset):
"""Evaluate selected model with the metrics: MRR, MR, HITS@1, HITS@3, HITS@10"""
metrics = collections.OrderedDict({
metric: stats.Mean()
for metric in ['MRR', 'MR', 'HITS@1', 'HITS@3', 'HITS@10']
})
with torch.no_grad():
for test_set in self.get_entity_stream(dataset):
metrics = self.compute_score(
model=model,
test_set=test_set,
metrics=metrics,
device=self.device
)
return {name: round(metric.get(), 4) for name, metric in metrics.items()}
def eval_relations(self, model, dataset):
metrics = collections.OrderedDict({
f'{metric}': stats.Mean()
for metric in ['MRR', 'MR', 'HITS@1', 'HITS@3', 'HITS@10']
})
with torch.no_grad():
metrics = self.compute_score(
model=model,
test_set=self.get_relation_stream(dataset),
metrics=metrics,
device=self.device
)
return {f'{name}_relations': round(metric.get(), 4) for name, metric in metrics.items()}
@classmethod
def compute_score(cls, model, test_set, metrics, device):
training = False
if model.training:
model = model.eval()
training = True
bar = Bar(dataset=test_set, update_every=1)
bar.set_description('Evaluation')
for data in bar:
sample = data['sample'].to(device)
negative_sample = data['negative_sample'].to(device)
filter_bias = data['filter_bias'].to(device)
mode = data['mode']
if mode == 'head-batch' or mode == 'tail-batch':
score = model(
sample=sample,
negative_sample=negative_sample,
mode=mode
)
elif mode == 'relation-batch':
score = model(negative_sample)
score += filter_bias
argsort = torch.argsort(score, dim=1, descending=True)
if mode == 'head-batch':
positive_arg = sample[:, 0]
if mode == 'relation-batch':
positive_arg = sample[:, 1]
elif mode == 'tail-batch':
positive_arg = sample[:, 2]
batch_size = sample.size(0)
for i in range(batch_size):
# Notice that argsort is not ranking
ranking = (argsort[i, :] == positive_arg[i]).nonzero()
assert ranking.size(0) == 1
ranking = 1 + ranking.item()
# ranking + 1 is the true ranking used in evaluation metrics
metrics['MRR'].update(1.0/ranking)
metrics['MR'].update(ranking)
metrics['HITS@1'].update(
1.0 if ranking <= 1 else 0.0)
metrics['HITS@3'].update(
1.0 if ranking <= 3 else 0.0)
metrics['HITS@10'].update(
1.0 if ranking <= 10 else 0.0)
if training:
model = model.train()
return metrics
@classmethod
def compute_detailled_score(cls, model, test_set, metrics, types_relations, device):
training = False
if model.training:
model = model.eval()
training = True
bar = Bar(dataset=test_set, update_every=1)
bar.set_description('Evaluation')
for data in bar:
sample = data['sample'].to(device)
negative_sample = data['negative_sample'].to(device)
filter_bias = data['filter_bias'].to(device)
mode = data['mode']
score = model(
sample=sample,
negative_sample=negative_sample,
mode=mode,
)
score += filter_bias
argsort = torch.argsort(score, dim=1, descending=True)
if mode == 'head-batch':
positive_arg = sample[:, 0]
elif mode == 'tail-batch':
positive_arg = sample[:, 2]
batch_size = sample.size(0)
for i in range(batch_size):
# Notice that argsort is not ranking
ranking = (argsort[i, :] == positive_arg[i]).nonzero()
assert ranking.size(0) == 1
ranking = 1 + ranking.item()
type_relation = types_relations[
sample[:, 1][i].item()
]
# ranking + 1 is the true ranking used in evaluation metrics
metrics[mode][type_relation]['MRR'].update(1.0/ranking)
metrics[mode][type_relation]['MR'].update(ranking)
metrics[mode][type_relation]['HITS@1'].update(
1.0 if ranking <= 1 else 0.0)
metrics[mode][type_relation]['HITS@3'].update(
1.0 if ranking <= 3 else 0.0)
metrics[mode][type_relation]['HITS@10'].update(
1.0 if ranking <= 10 else 0.0)
if training:
model = model.train()
return metrics
def types_relations(self, model, dataset, threshold=1.5):
"""
Divide input dataset relations into different categories (i.e. ONE-TO-ONE, ONE-TO-MANY,
MANY-TO-ONE and MANY-TO-MANY) according to the mapping properties of relationships.
"""
stat_df = pd.DataFrame(self.true_triples)
stat_df.columns = ['head', 'relation', 'tail']
mean_head = stat_df[['head', 'relation', 'tail']].groupby(
['tail', 'relation']).count().groupby('relation').mean()
mean_tail = stat_df[['head', 'relation', 'tail']].groupby(
['head', 'relation']).count().groupby('relation').mean()
mean_relations = pd.concat(
[mean_head, mean_tail], axis='columns').reset_index()
mean_relations['head'] = mean_relations['head'].apply(
lambda x: '1' if x <= threshold else 'M')
mean_relations['tail'] = mean_relations['tail'].apply(
lambda x: '1' if x <= threshold else 'M')
mean_relations['type'] = mean_relations['head'] + \
'_' + mean_relations['tail']
mapping_type_relations = mean_relations.to_dict()['type']
relations_id = {value: key for key, value in self.relations.items()}
return {
relations_id[key]: value for key, value in mapping_type_relations.items()
}
def detail_eval(self, model, dataset, threshold=1.5):
"""
Divide input dataset relations into different categories (i.e. ONE-TO-ONE, ONE-TO-MANY,
MANY-TO-ONE and MANY-TO-MANY) according to the mapping properties of relationships.
Reference:
1. [Bordes, Antoine, et al. "Translating embeddings for modeling multi-relational data." Advances in neural information processing systems. 2013.](http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-relational-data.pdf)
"""
mapping_type_relations = self.types_relations(
model=model,
dataset=dataset,
threshold=threshold
)
mapping_type_relations = {
self.relations[key]: value for key, value in mapping_type_relations.items()
}
types_relations = ['1_1', '1_M', 'M_1', 'M_M']
metrics = collections.OrderedDict({
'head-batch': collections.OrderedDict({}),
'tail-batch': collections.OrderedDict({})
})
for mode in ['head-batch', 'tail-batch']:
for type_relation in types_relations:
metrics[mode][type_relation] = collections.OrderedDict({
f'{metric}': stats.Mean()
for metric in ['MRR', 'MR', 'HITS@1', 'HITS@3', 'HITS@10']
})
with torch.no_grad():
for test_set in self.get_entity_stream(dataset):
metrics = self.compute_detailled_score(
model=model,
test_set=test_set,
metrics=metrics,
types_relations=mapping_type_relations,
device=self.device
)
for mode in ['head-batch', 'tail-batch']:
for type_relation in types_relations:
for metric in ['MRR', 'MR', 'HITS@1', 'HITS@3', 'HITS@10']:
metrics[mode][type_relation][metric] = round(
metrics[mode][type_relation][metric].get(), 4)
results = pd.DataFrame(metrics)
head = pd.DataFrame(results['head-batch'].values.tolist())
tail = pd.DataFrame(results['tail-batch'].values.tolist())
head.columns = pd.MultiIndex.from_product([["head"], head.columns])
tail.columns = pd.MultiIndex.from_product([["tail"], tail.columns])
results = pd.concat([head, tail], axis='columns')
results = results.set_index(pd.Series(['1_1', '1_M', 'M_1', 'M_M']))
results.index.name = 'relation'
# Add frequency of each type of relation:
frequency = collections.OrderedDict()
for type_relation in types_relations:
frequency[type_relation] = 0
for _, type_relation in mapping_type_relations.items():
frequency[type_relation] += 1
for type_relation in types_relations:
frequency[type_relation] /= len(mapping_type_relations)
frequency = pd.DataFrame.from_dict(
frequency,
orient='index',
columns=['frequency']
)
frequency.columns = pd.MultiIndex.from_product(
[["metadata"], frequency.columns]
)
results = pd.concat([results, frequency], axis='columns')
return results
| [
"raphael.sourty@gmail.com"
] | raphael.sourty@gmail.com |
7175e51e0ffb40cc2db8f90702cd73f0d831deeb | 415a614a54eae170e926154593bab34aac4695fb | /python-dictionary/small1.py | 9b530d8d8c1ce6e84a971309d11e75a936d06b27 | [] | no_license | DasomAnH/python | 434f8d16ad5a437c80efc62c0ae16ea6437a299b | 14b248f71bc0784f433e5e8e38703f97c5d98cb5 | refs/heads/master | 2022-12-12T22:35:56.726214 | 2020-08-31T16:32:13 | 2020-08-31T16:32:13 | 289,390,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | phonebook_dict = {
'Alice': '703-493-1834',
'Bob': '857-384-1234',
'Elizabeth': '484-584-2923'
}
# retrieve value
person = phonebook_dict['Elizabeth']
print(person)
# add a value
phonebook_dict['kareem'] = '938-489-1234'
# delete
phonebook_dict['Alice'] = 'number hot found'
del phonebook_dict['Alice']
# removed_contact = phonebook_dict.pop('Alice')
# edit
phonebook_dict['Bob'] = "968-345-2345"
print(phonebook_dict)
| [
"ektha116@gmail.com"
] | ektha116@gmail.com |
33c9ea501c3e9f897d843332978df996557de4fc | 905a2ac66149377da7832c7457201af66da4d846 | /order/tests.py | 3f1ae1045eb4c8558a88cb3219794371ee1ad788 | [] | no_license | no0xgold/one-page-ecommerce-web-app | d185ad0ba2369c56e9482471328f1432a90a65e2 | ab56f63e59e68365753b4b5c4f3a0faf95a21a8e | refs/heads/main | 2023-04-17T20:39:20.845733 | 2021-04-30T13:21:35 | 2021-04-30T13:21:35 | 342,781,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py |
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
User=get_user_model()
# Create your tests here.
class OrderTestCase(TestCase):
def setUp(self):
user_a_pass ='amira12234'
self.user_a_pass=user_a_pass
user_a = User(username='amira', email='amira@amira.com')
user_a.is_staff= True
user_a.is_superuser = True
user_a.set_password(user_a_pass)
user_a.save()
self.user_a = user_a
def test_create_order(self):
obj = Order.objects.create(user=self.user_a, product=product_a)
| [
"noxraktor@gmail.com"
] | noxraktor@gmail.com |
21905c5fbd4025d75f10237b226ebf37b75e9435 | f4d1492bf4e01dabcac68f2244f9fd540c21ca29 | /Card.py | 127c772f1389660dbb333434d7f7ac87d723f7df | [] | no_license | jordi2105/LabyrinthBot | 9c346e70e9f98f1d5d79509b907bfccf72b3a187 | 57124118b7ae2997b0f157c7fe75fec3caaf875d | refs/heads/master | 2022-12-03T20:55:56.944994 | 2020-09-02T10:56:29 | 2020-09-02T10:56:29 | 264,283,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | import Objective
import pygame as p
import copy
class Card:
def __init__(self, objective: Objective=None, image_file_url: str=None):
self.objective = objective
if image_file_url is not None:
image = p.image.load(image_file_url)
self.image = p.transform.scale(image, (100, 150))
def copy(self):
copy_obj = Card()
for name, attr in self.__dict__.items():
if hasattr(attr, 'copy') and callable(getattr(attr, 'copy')):
copy_obj.__dict__[name] = attr.copy()
else:
copy_obj.__dict__[name] = copy.deepcopy(attr)
return copy_obj
| [
"jordi.verheul@hotmail.com"
] | jordi.verheul@hotmail.com |
ab2312766b10746a33edee87aae7a0185bc0508e | 70ce903a7b835e4e960abe405158513790d37426 | /django-bloggy/bloggy_project/blog/models.py | 6e45b65afb50888a69149b4da6bd875560586d7b | [] | no_license | lpatmo/book2-exercises | 29af718d74732a5bbe287ab60a67b0d84d4e0abd | 9524bc58997ff4eda10177abf70805f3691e247c | refs/heads/master | 2020-12-25T22:29:09.391501 | 2014-10-26T03:15:35 | 2014-10-26T03:15:35 | 25,755,186 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from django.db import models
from uuslug import uuslug
class Post(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100)
content = models.TextField()
tag = models.CharField(max_length=20, blank=True, null=True)
image = models.ImageField(upload_to="images", blank=True, null=True)
views = models.IntegerField(default=0)
slug = models.CharField(max_length=100, unique=True)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = uuslug(self.title, instance=self, max_length=100)
super(Post, self).save(*args, **kwargs)
| [
"hermanmu@gmail.com"
] | hermanmu@gmail.com |
0f56d4ad2504d3fc8c7bc698669c0f95e8b2b0c0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02701/s368667639.py | 8111538afd640004a5f4d8e54edf3e76da2a6571 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | print(len(set(open(0).read().split())) - 1) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a5abed5427045f5e23f936aa700050d060c48ab1 | f5b340223a3dbde19889aaef01b9b18b6aab429a | /tasks_request.py | 3a2620d4c2131971893b8621a2e9e5b38cbf1eb0 | [] | no_license | jtpolo14/T0002 | a4b0399ce418d1607d2e4efa6e27f90eb12444bf | f099b62d92835bba600ce12d4a2a7d32a01156f6 | refs/heads/master | 2021-01-10T09:28:54.746517 | 2016-12-17T23:54:40 | 2016-12-17T23:54:40 | 53,239,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | from celery import Celery
import time
from urllib.request import urlopen
import uuid
app = Celery('tasks', broker='redis://35.165.238.44', backend='redis://35.165.238.44')
@app.task
def check_input_file(file_path):
return file_path
@app.task
def move_input_file(file_path_start, file_path_end):
return (file_path_start, file_path_end)
@app.task
def word_count_python(infile='readme.txt'):
start = time.time()
with open(infile) as reader:
word_count = 0
for line in reader:
word_count += len(line.split())
time.sleep(5)
return (word_count, time.time() - start)
@app.task
def a001_get_file(file_url):
start = time.time()
ret = get_from_url(file_url)
if not ret['status'] == 0:
return ('a001 - error downloading url', time.time() - start)
else:
return (ret['file'], time.time() - start)
def get_from_url(url, prefix=None):
file_name = get_unique_file_name(prefix)
response = urlopen(url)
CHUNK = 16 * 1024
with open(file_name, 'wb') as f:
while True:
chunk = response.read(CHUNK)
if not chunk:
break
f.write(chunk)
return {'file':file_name, 'status':0}
def get_unique_file_name(prefix=None):
if prefix and type(prefix) == str:
return prefix + str(uuid.uuid4())
else:
return str(uuid.uuid4())
| [
"noreply@github.com"
] | jtpolo14.noreply@github.com |
42f319f1590604485a6f3d6007b2a3d6c014d924 | bfb5051ae8de82ec9ebfd329336e6aa6e7cea3d9 | /src/entities/difficulty.py | 4da58572326508ea2a53998c5c2206c083771a6d | [] | no_license | J-Uhero/ot-harjoitustyo | 400cadf132eaa78a5e106ab9ae409c804ef35745 | dc04dd58d910093ae4c9fb3ac581e3a99b72513b | refs/heads/master | 2023-04-29T07:55:47.081100 | 2021-05-19T23:39:33 | 2021-05-19T23:39:33 | 350,096,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | class Difficulty:
"""Luokka, joka kuvaa pelin vaikeustasoa ja sisältää vaikeustasoon
vaikuttavia muuttujia ja niiden parametreja.
"""
def __init__(self):
"""Konstruktori, joka alustaa vaikeustasoa ilmentävät muuttujat
asettaa oletusarvoisesti vaikeustason helpoksi.
"""
self._height = None
self._width = None
self._mines = None
self._degree = None
self.easy()
def height(self):
return self._height
def width(self):
return self._width
def mines(self):
return self._mines
def degree(self):
return self._degree
def easy(self):
self._height = 9
self._width = 9
self._mines = 10
self._degree = "easy"
def medium(self):
self._height = 16
self._width = 16
self._mines = 40
self._degree = "medium"
def hard(self):
self._height = 16
self._width = 30
self._mines = 99
self._degree = "hard"
| [
"juho.herranen@gmail.com"
] | juho.herranen@gmail.com |
5361f7d7c94132ad1719f0f21b8ce0657580b815 | f8bdc85e59cc703ec3fa771ea2abac9950b9ea83 | /server/pathgenerator.py | 6cc53fa91731d2f4d19e2b1b4c6ff76ba1053b8c | [] | no_license | subash-a/protosketch | 7b31fec37fa2246a5cccb4a17a8febb094b4c7a6 | f4d933e7e609bc6a1296cead8fd0c30e1aa0e22a | refs/heads/master | 2020-03-30T12:35:37.976140 | 2014-07-22T17:24:28 | 2014-07-22T17:24:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | import uuid
def getNewPrototypeId():
return str(uuid.uuid4())
| [
"s7subash@gmail.com"
] | s7subash@gmail.com |
0d838535c8dd3ab286128f9df2c55d083a319f7d | cec8af4a2e2459d92db3b217ef729dcf0779c2dd | /website/urls.py | 0f382aa400a3d7654b0ac42e77a7c55ce7b38087 | [] | no_license | aashishnepal/pi-project | 1ee5db0345b42718ea172f2e6bd26f102b951c81 | dc76136d2bec2468537b75409fed121e3e10905a | refs/heads/master | 2020-11-25T18:32:36.785369 | 2019-12-27T08:49:52 | 2019-12-27T08:49:52 | 228,794,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.LoginPageView.as_view(), name='login'),
path('home/', views.HomePageView.as_view(), name='home'),
path('about/', views.AboutPageView.as_view(), name='about'),
]
| [
"aashishnepal008@gmail.com"
] | aashishnepal008@gmail.com |
1ab588834c32ac5271741da9e08ff13e07c2fdcf | 0c4b4c8c13f6ef1e26e20172e36cabd33851a17a | /radius_input_peri&cone.py | 5e428611e6005056ec0bc15b4ba985796f4c300e | [] | no_license | masankar666/Day1_26052021_pub | c1540c0660fd700daa190c8943b1e86af2de82ee | 312dc8206aefdb88172ff01cd860c766a6b1859f | refs/heads/main | 2023-06-04T10:32:43.362485 | 2021-06-26T14:28:23 | 2021-06-26T14:28:23 | 371,361,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | '''
#Task 2
radius = 20
calculate perimeter of circle : 2 pi radius
'''
#dynamic radius input from user
r = float(input("Enter your radius as input:"))
h = float(input("Enter your height:"))
pi = 3.14
peri = 2 * pi * r
area = 1/3 * pi * (r ** 2) * h
print("Task 2 pgm 1 :")
print("radius = ",r)
print("height = ",h)
print ("calculate perimeter of circle = ",peri)
#area of cone
print ("area of cone = ",(int(area)))
| [
"noreply@github.com"
] | masankar666.noreply@github.com |
77bf35076a85c2afd353540ed90cac6cc4258fa7 | f1ab02aa2593867c695d290bd06153c14cf0f098 | /npf/core/xmin/util/admin.py | d37563c3504757c9badc8a1b05e42f3aa296c801 | [] | no_license | 1npf/EDISSON-FAZA-1 | 0d3fa9c4836ae0bd4b405de646f3220687506cc5 | 81d78a8a0df4ba88cb4a32c6d4a616ca9a9e6ae1 | refs/heads/master | 2016-09-01T16:23:59.131240 | 2015-12-24T12:16:55 | 2015-12-24T12:16:55 | 48,541,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,065 | py | from django.contrib import admin
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db import models
from django.http import Http404
from django.utils.html import escape
from django.contrib.contenttypes.models import ContentType
from npf.core.printform.models import Template
try:
from django.utils.encoding import force_unicode
except ImportError:
from django.utils.encoding import force_text as force_unicode
def get_admin_for_model(model):
return get_model_and_admin(model._meta.app_label, model._meta.model_name)
def get_model_and_admin(app_label, model_name):
for (model, model_admin) in admin.site._registry.items():
if model._meta.app_label == app_label and model._meta.model_name == model_name:
return model, model_admin
return None
def get_model_and_admin_or_404(app_label, module_name):
model_and_admin = get_model_and_admin(app_label, module_name)
if model_and_admin is None:
raise Http404
return model_and_admin
def get_admin_urls_for_model(request, model):
admin_urls = {}
model_and_admin = get_admin_for_model(model)
if model_and_admin is None:
return admin_urls
model_admin = model_and_admin[1]
app_label = model._meta.app_label
has_module_perms = request.user.has_module_perms(app_label)
if has_module_perms:
perms = model_admin.get_model_perms(request)
info = (app_label, model._meta.model_name)
if perms.get('change', False):
try:
admin_urls['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=admin.site.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
admin_urls['add_url'] = reverse('admin:%s_%s_add' % info, current_app=admin.site.name)
except NoReverseMatch:
pass
return admin_urls
def flatten_choices(choices):
# Normalize to strings.
output = []
for option_value, option_label in choices:
if isinstance(option_label, (list, tuple)):
output.append({'id': '', 'type': 'header', 'description': escape(force_unicode(option_value))})
for option in option_label:
output.append({'id': option[0], 'type': 'item', 'description': escape(force_unicode(option[1]))})
else:
output.append({'id': option_value, 'type': 'header_item',
'description': escape(force_unicode(option_label))})
return output
def get_app_list(request):
def _actions_exists(model_admin, model):
if bool(model_admin.get_action_choices(request)):
return True
ct = ContentType.objects.get_for_model(model, not model._meta.proxy)
if Template.objects.filter(model=ct).exists():
return True
return False
app_dict = {}
user = request.user
for model, model_admin in admin.site._registry.items():
app_label = model._meta.app_label
app_name = model._meta.app_config.verbose_name
has_module_perms = user.has_module_perms(app_label)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
fields = []
for field in model._meta.fields:
field_config = {
'name': field.name,
'verbose_name': field.verbose_name,
'app': app_label,
'model': model._meta.model_name,
'field_class': "%s.%s" % (field.__class__.__module__, field.__class__.__name__),
'default': field.get_default(),
'editable': field.editable,
'allow_blank': field.blank,
'help_text': field.help_text,
'max_length': field.max_length,
'choices': flatten_choices(field.choices)
}
if isinstance(field, models.ForeignKey):
field_config['related'] = {'class': "%s.%s" % (field.rel.to.__module__, field.rel.to.__name__)}
field_config['related'].update(get_admin_urls_for_model(request, field.rel.to))
fields.append(field_config)
model_dict = {
'app': app_label,
'model': model._meta.model_name,
'model_name': model._meta.object_name,
'verbose_name': model._meta.verbose_name,
'verbose_name_plural': model._meta.verbose_name_plural,
'perms': perms,
'list_display': model_admin.get_list_display(request),
'list_editable': model_admin.list_editable,
'list_per_page': model_admin.list_per_page,
'search_fields': model_admin.get_search_fields(request),
'fields': fields,
'actions': _actions_exists(model_admin, model)
}
model_dict.update(get_admin_urls_for_model(request, model))
if hasattr(model_admin, 'columns'):
model_dict['columns'] = model_admin.get_columns(request)
else:
model_dict['columns'] = []
if 'is_tree' in dir(model_admin):
model_dict.update({'is_tree': True})
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'app': app_label,
'name': app_name,
'app_url': reverse('admin:app_list', kwargs={'app_label': app_label},
current_app=admin.site.name),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = list(app_dict.values())
app_list.sort(key=lambda x: x['name'])
return app_list
def get_actions(request, model, model_admin):
actions = model_admin.get_action_choices(request)
template_actions = []
ct = ContentType.objects.get_for_model(model, not model._meta.proxy)
templates = Template.objects.filter(model=ct)
for template in templates:
template_actions.append({
'text': template.name,
'name': 'create_doc_' + template.name
})
if template_actions:
actions.append(('create_doc', 'Сделать документ', template_actions))
return actions
| [
"maxim.sorokin@gmail.com"
] | maxim.sorokin@gmail.com |
808afd2c166dd88286794b21c33a75891fcad75a | eb0bb5267035c0222da0c072c5dcd85b46099904 | /test/bug.986.t | 7d7e22538d6c69ad56a124722cd5c465bf5b6fda | [
"MIT"
] | permissive | bjornreppen/task | 6d96f578eec7b9cceeb4d728caeda87e7a446949 | a9eac8bb715ac8f51073c080ac439bf5c09493e8 | refs/heads/master | 2021-05-30T07:48:39.263967 | 2015-10-21T20:50:42 | 2015-10-21T20:50:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | t | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright 2006 - 2015, Paul Beckingham, Federico Hernandez.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# http://www.opensource.org/licenses/mit-license.php
#
###############################################################################
import sys
import os
import unittest
# Ensure python finds the local simpletap module
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from basetest import Task, TestCase
from basetest import Taskd, ServerTestCase
class TestBug986(TestCase):
def setUp(self):
"""Executed before each test in the class"""
self.t = Task()
def test_dateformat_precedence(self):
"""Verify rc.dateformat.info takes precedence over rc.dateformat"""
self.t('add test')
self.t('1 start')
code, out, err = self.t('1 info rc.dateformat:XX rc.dateformat.info:__')
self.assertIn('__', out)
self.assertNotIn('XX', out)
code, out, err = self.t('1 info rc.dateformat:__ rc.dateformat.info:')
self.assertIn('__', out)
if __name__ == "__main__":
from simpletap import TAPTestRunner
unittest.main(testRunner=TAPTestRunner())
# vim: ai sts=4 et sw=4 ft=python
| [
"paul@beckingham.net"
] | paul@beckingham.net |
f3d46fb64540cba1c12c46e08db3cba2eb1aa2f2 | f60f3ca2ea77cfaf2d12b949241252c42a0950c8 | /src/simulator/workflows/container.py | bceec745f29237b65de38323661bcadd15b7e136 | [] | no_license | belyakov-am/cloud-simulator | 39b1d84f4f0edba1098bd8d85f35d08106d4db04 | d2516adab27888a21e60651d536821c0a61092d8 | refs/heads/master | 2023-04-28T10:49:49.048826 | 2021-05-23T14:49:15 | 2021-05-23T14:49:15 | 344,893,767 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | class Container:
"""Representation of (Docker) container with required libraries for
workflow execution.
"""
def __init__(self, workflow_uuid: str, provision_time: int) -> None:
self.workflow_uuid = workflow_uuid
self.provision_time = provision_time
def __repr__(self) -> str:
return (f"Container("
f"workflow_uuid = {self.workflow_uuid}, "
f"provision_time = {self.provision_time})")
def __eq__(self, other: "Container") -> bool:
return (self.workflow_uuid == other.workflow_uuid
and self.provision_time == other.provision_time)
def __hash__(self) -> int:
return hash(self.workflow_uuid) ^ hash(self.provision_time)
| [
"pochtaforfkn@gmail.com"
] | pochtaforfkn@gmail.com |
c835bb059b3de560e44f8f5470abdf3e721dc9fe | 3d8c0fddfa020c3c29fc244bba5c89aa9e8d5f15 | /lab_5/naive_bayes_model.py | 51cab803438b30b81db702bd6c8c6e32c4aa88d1 | [] | no_license | Max-1892/machine_learning | 43c24ea03ccc26d67ef015447b7e5525401cc0bd | ec363911fc913e8271e629657ffb0a2377c9571f | refs/heads/master | 2020-01-27T10:04:41.455509 | 2019-07-07T18:49:13 | 2019-07-07T18:49:13 | 67,073,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,793 | py | import numpy as np
class NaiveBayes:
"""
This class encapsulates the naive Bayes algorithm.
Note: if a probability of 0 comes up during the prediction stage
a very small number is used for the calculation in it's place.
"""
def __init__(self):
self.class_priors = {0: 0, 1: 0}
# Represents table of conditional probabilities,
# first key is class value, second value is value of attributes in
# a particular row of the table
self.feature_given_class_prob = {0: {0: {}, 1: {}}, 1: {0: {}, 1: {}}}
def build_model(self, data_instances):
# Split on class
negative_instances = []
positive_instances = []
for instance in data_instances:
if instance[-1] == 0:
negative_instances.append(instance[:-1])
elif instance[-1] == 1:
positive_instances.append(instance[:-1])
# Calculate class priors
self.class_priors[0] = float(len(negative_instances)) / len(data_instances)
self.class_priors[1] = float(len(positive_instances)) / len(data_instances)
negative_instances = np.array(negative_instances)
positive_instances = np.array(positive_instances)
# Calculate feature given class
for class_value in range(2):
for feature_value in range(2):
for feature_idx in range(len(instance[:-1])):
if class_value == 0:
if len(negative_instances) > 0:
self.feature_given_class_prob[class_value][feature_value][feature_idx] = \
float((negative_instances[:,feature_idx] == feature_value).sum()) / len(negative_instances)
else:
self.feature_given_class_prob[class_value][feature_value][feature_idx] = 0.0
if class_value == 1:
if len(positive_instances) > 0:
self.feature_given_class_prob[class_value][feature_value][feature_idx] = \
float((positive_instances[:,feature_idx] == feature_value).sum()) / len(positive_instances)
else:
self.feature_given_class_prob[class_value][feature_value][feature_idx] = 0.0
def predict(self, data_instance):
argmax_class_probability = -1.0
argmax_class = -1
for class_value in range(2):
probability_of_belonging_to_class_value = self.class_priors[class_value]
for attr_idx, instance_attr in enumerate(data_instance):
if self.feature_given_class_prob[class_value][instance_attr][attr_idx] == 0:
probability_of_belonging_to_class_value *= 0.00000000000000000000001
else:
probability_of_belonging_to_class_value *= \
self.feature_given_class_prob[class_value][instance_attr][attr_idx]
if probability_of_belonging_to_class_value > argmax_class_probability:
argmax_class_probability = probability_of_belonging_to_class_value
argmax_class = class_value
return argmax_class
def print_model(self):
model = "P(class value = {}) = {}\n".format(0, self.class_priors[0])
model += "P(class value = {}) = {}\n".format(1, self.class_priors[1])
for class_key, inst_val_map in self.feature_given_class_prob.iteritems():
for instance_val_key, value in inst_val_map.iteritems():
for attr_key, probability in value.iteritems():
model += "P(f_{} = {} | Class value = {}) = {}\n".format( \
attr_key, instance_val_key, class_key, probability)
return model
| [
"somethingfunny14@gmail.com"
] | somethingfunny14@gmail.com |
29dcc6918e6e732253f2ff0f81d976da35166127 | 744e72e6293c4cf93e1d97dc22c1d31851348056 | /resizePDF.py | a26bd81a5ec08659f044cac572a1f888ef3618db | [] | no_license | blueskycorner/resizePDF | b89fc58c380e4d795cf8dfa9ece4bf73220d38c4 | ddd479ad5e732141fe816a4ce1e188cb75e3dc42 | refs/heads/master | 2022-12-15T22:09:26.808343 | 2019-03-18T13:20:08 | 2019-03-18T13:20:08 | 173,715,753 | 0 | 0 | null | 2022-11-22T03:27:38 | 2019-03-04T09:36:43 | Dockerfile | UTF-8 | Python | false | false | 5,505 | py | import json
import os
import boto3
from fpdf import FPDF
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from botocore.exceptions import ClientError
bucketNameParamName = "bucketName"
tmpPathParamName = "tmpPath"
bucketName = os.getenv(bucketNameParamName)
s3 = boto3.client('s3')
def buildPDF(filename, imagesList):
try:
pdf = FPDF()
pdf.compress = False
print("Nb images to add: " + str(len(imagesList)))
for image in imagesList:
pdf.add_page()
pdf.image(image,0,0,210,297)
pdf.output(filename)
except Exception as e:
print(e)
raise e
def sendEmail(emailFrom, emailTo, downloadUrl):
print("address: " + emailTo)
print("downloadUrl: " + downloadUrl)
SENDER = emailFrom
RECIPIENT = emailTo
# The subject line for the email.
SUBJECT = "Your PDF file is ready !"
# The email body for recipients with non-HTML email clients.
BODY_TEXT = "Hi,\n\nHere is a link to download your file:\n" + downloadUrl + "\n\nHave a nice day."
# The HTML body of the email.
BODY_HTML = """\
<html>
<head></head>
<body>
Hi,<br><br>
Here is a link to download your document: <a href="
"""
BODY_HTML += downloadUrl
BODY_HTML += """\
">Click here</a>
<br><br>
Have a nice day.<br>
</body>
</html>
"""
# The character encoding for the email.
CHARSET = "utf-8"
# Create a new SES resource and specify a region.
client = boto3.client('ses')
# Create a multipart/mixed parent container.
msg = MIMEMultipart('mixed')
# Add subject, from and to lines.
msg['Subject'] = SUBJECT
msg['From'] = SENDER
msg['To'] = RECIPIENT
# Create a multipart/alternative child container.
msg_body = MIMEMultipart('alternative')
# Encode the text and HTML content and set the character encoding. This step is
# necessary if you're sending a message with characters outside the ASCII range.
textpart = MIMEText(BODY_TEXT.encode(CHARSET), 'plain', CHARSET)
htmlpart = MIMEText(BODY_HTML.encode(CHARSET), 'html', CHARSET)
# Add the text and HTML parts to the child container.
msg_body.attach(textpart)
msg_body.attach(htmlpart)
# Attach the multipart/alternative child container to the multipart/mixed
# parent container.
msg.attach(msg_body)
#print(msg)
try:
#Provide the contents of the email.
response = client.send_raw_email(
Source=SENDER,
Destinations=[
RECIPIENT
],
RawMessage={
'Data':msg.as_string(),
}
# ConfigurationSetName=CONFIGURATION_SET
)
# Display an error if something goes wrong.
except ClientError as e:
print(e.response['Error']['Message'])
else:
print("Email sent! Message ID:")
print(response['MessageId'])
def resizePDF(event, context):
response = None
try:
emailAddress = event['queryStringParameters']['emailAddress']
prefix = event['queryStringParameters']['prefix']
compression = event['queryStringParameters']['compression']
tmpPath = os.getenv(tmpPathParamName)
print("prefix: " + prefix)
print("compression: " + compression)
print("bucketName: " + bucketName)
# Download files and build
s3Ressource = boto3.resource('s3')
bucket = s3Ressource.Bucket(bucketName)
extensionsAllowed = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG']
imagesList = []
for object in bucket.objects.filter(Prefix=prefix):
print(object)
if (object.size > 0):
path, filename = os.path.split(object.key)
filenameshort, extension = os.path.splitext(filename)
print("path: " + path)
print("filename: " + filename)
print("filenameshort: " + filenameshort)
print("extension: " + extension)
if (extension in extensionsAllowed):
print("path: " + path)
print("filename: " + filename)
dest = tmpPath + filename
print("dest: " + dest)
bucket.download_file(object.key, dest)
imagesList.append(dest)
# Build the PDF
doc = tmpPath + prefix + ".pdf"
buildPDF(doc, imagesList)
destKey = prefix + "/document.pdf"
bucket.upload_file(doc, destKey)
signedUrlDownload = s3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': bucketName,
'Key': destKey
},
ExpiresIn=3600
)
# Send the email
sendEmail(emailAddress, emailAddress, signedUrlDownload)
responseBody = {"signedUrlDownload": signedUrlDownload}
response = {
"statusCode": 200,
"body": json.dumps(responseBody),
"headers": {"Access-Control-Allow-Origin": "*"},
"isBase64Encoded": "false"
}
except Exception as e:
responseBody = {"error": str(e)}
response = {
"statusCode": 500,
"body": json.dumps(responseBody)
}
return response
| [
"ec2-user@ip-172-31-1-156.ec2.internal"
] | ec2-user@ip-172-31-1-156.ec2.internal |
25d08dc751e8d64a9112ba62276617681a6e5d78 | 42e0305c8cc9e20fee14d359ec3d466fb4608607 | /进程和线程/信号量.py | 96d8b44d2b620d8c9a64ba0f291d7ef73961073b | [] | no_license | wuxvsuizhong/Li_pro | 976159583927823464d4576efb59aaf86ef65e13 | 7facd87e67f767412917d9b8668746f1d87ec28f | refs/heads/master | 2023-08-08T23:13:08.226873 | 2023-07-22T10:09:25 | 2023-07-22T10:09:25 | 107,368,788 | 0 | 0 | null | 2017-10-18T06:50:33 | 2017-10-18T06:42:18 | null | UTF-8 | Python | false | false | 551 | py | import random
import time
from threading import Thread,Semaphore
# 信号量用于控制访问特定资源的线程数量,通常用于某些资源有明确访问数量限制额场景,简单说就是用于限流
# 停车场
# 设置车位资源数为5
sp = Semaphore(5)
def task(name):
sp.acquire()
print(f"{name}抢到了车位")
time.sleep(random.randint(3,5))
sp.release()
print(f"{name}开走了")
if "__main__" == __name__:
for i in range(10):
t = Thread(target=task,args=(f"宝马{i}",))
t.start() | [
"devuser01@123.com"
] | devuser01@123.com |
3d12d6287b41ff9445a633794470f5b2b34fd4ee | c45306676df2fe733007ff12acd88a8b7d700c42 | /DrawLossCurve.py | 3f600ae3be6c244da132e775e4fcbcf478a1c218 | [] | no_license | huanhsu/Caffe_Tool | 83863ea351da4d3006f2ad4961b91e5b0c758239 | bc9d7205f26ed3e071b1f2d3fac4ab14d64b82e4 | refs/heads/master | 2021-01-16T17:55:23.472297 | 2017-08-16T07:20:59 | 2017-08-16T07:20:59 | 100,025,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,751 | py | import numpy as np
import re
import matplotlib.patches as mpatches
from argparse import ArgumentParser
from matplotlib import pylab as plt
import pdb
#Draw loss curves with many output in a file
#This example has 3 loss curves
#an example on log
#I0728 11:05:30.162691 1057 solver.cpp:219] Iteration 20 (0.0501322 iter/s, 398.945s/20 iters), loss = 29.1468
#I0728 11:05:30.162755 1057 solver.cpp:238] Train net output #0: prob = 7.30188 (* 1 = 7.30188 loss)
#I0728 11:05:30.162770 1057 solver.cpp:238] Train net output #1: prob_2c_4f = 7.30041 (* 1 = 7.30041 loss)
#I0728 11:05:30.162781 1057 solver.cpp:238] Train net output #2: prob_2c_5c = 7.29855 (* 1 = 7.29855 loss)
#I0728 11:05:30.162792 1057 solver.cpp:238] Train net output #3: prob_3d_5c = 7.29898 (* 1 = 7.29898 loss)
#explaining for pattern
#([+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?) is matching number with point or number with e
#.*\n.* means no matter string after, new line, no matter string before
def main(files):
plt.style.use('ggplot')
fig, ax1 = plt.subplots()
ax1.set_ylim([0.002,8])
ax1.set_xlabel('Iteration')
ax1.set_ylabel('Training Loss')
temp=[]
temp.append(files.files1)
for i, log_file in enumerate(temp):
loss_iterations, losses, losses2, losses3, losses4 = parse_log(log_file)
disp_results(fig, ax1, loss_iterations, losses, losses2, losses3, losses4, color_ind=i)
patch1 = mpatches.Patch(color=colors[0], label=' loss')
patch2 = mpatches.Patch(color=colors[1], label='2c_4f loss')
patch3 = mpatches.Patch(color=colors[2], label='2c_5c loss')
patch4 = mpatches.Patch(color=colors[3], label='3d_5c loss')
plt.legend(handles=[patch1, patch2, patch3, patch4])
plt.show()
def parse_log(log_file):
with open(log_file, 'r') as log_file:
log = log_file.read()
loss_pattern = r"Iteration (\d+) \(.*\), loss = .*\n.* Train net output #0: prob = ([+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?) .*\n.* Train net output #1: prob_2c_4f = ([+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?) .*\n.* Train net output #2: prob_2c_5c = ([+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?) .*\n.* Train net output #3: prob_3d_5c = ([+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?)"
losses = []
losses2 = []
losses3 = []
losses4 = []
loss_iterations = []
for r in re.findall(loss_pattern, log):
loss_iterations.append(int(r[0]))
losses.append(float(r[1]))
losses2.append(float(r[5]))
losses3.append(float(r[9]))
losses4.append(float(r[13]))
loss_iterations = np.array(loss_iterations)
losses = np.array(losses)
losses2 = np.array(losses2)
losses3 = np.array(losses3)
losses4 = np.array(losses4)
return loss_iterations, losses, losses2, losses3, losses4
def disp_results(fig, ax1, loss_iterations, losses, losses2, losses3, losses4, color_ind=0):
modula = len(plt.rcParams['axes.color_cycle'])
color1=plt.rcParams['axes.color_cycle'][(color_ind * 4 + 0) % modula]
ax1.plot(loss_iterations, losses,color=color1)
color2=plt.rcParams['axes.color_cycle'][(color_ind * 4 + 1) % modula]
ax1.plot(loss_iterations, losses2,color=color2)
color3=plt.rcParams['axes.color_cycle'][(color_ind * 4 + 2) % modula]
ax1.plot(loss_iterations, losses3,color=color3)
color4=plt.rcParams['axes.color_cycle'][(color_ind * 4 + 3) % modula]
ax1.plot(loss_iterations, losses4,color=color4)
colors.append(color1)
colors.append(color2)
colors.append(color3)
colors.append(color4)
if __name__ == '__main__':
global colors
colors=[]
parser = ArgumentParser(description="Draw loss curve")
parser.add_argument('files1')
args = parser.parse_args()
main(args)
| [
"noreply@github.com"
] | huanhsu.noreply@github.com |
787ab026083a3a4bd73e4ea8f22df850b46410bd | b7e72138d624d21fbeb877d0dd5613389fb70503 | /rifaqui/core/migrations/0009_raffle_draw_date.py | c5592c5f1dc6a7f21ce2c8dda2c8a003361de9e7 | [] | no_license | ConTTudOweb/RifaquiProject | f4f2667e7185df517b8919013f505ccd68d8d901 | 301b4d881a1e820774f9a74359dcbd7eff3d7e45 | refs/heads/master | 2020-03-15T06:33:34.094190 | 2018-06-22T03:25:43 | 2018-06-22T03:25:43 | 132,010,143 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | # Generated by Django 2.0.4 on 2018-05-02 21:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20180502_2122'),
]
operations = [
migrations.AddField(
model_name='raffle',
name='draw_date',
field=models.DateField(blank=True, null=True, verbose_name='data do sorteio'),
),
]
| [
"alessandro@kminformatica.com.br"
] | alessandro@kminformatica.com.br |
ae2eade74f9f078d1840f1f5df750227c8959659 | ce6e91fb9a5a9049d817d020ca0018b7f4008b9b | /runtests.py | ef35cd877b6d81a7ad6d506365c6d7dfbe0e8cb7 | [] | no_license | ccnmtl/django-pagetimer | b98536273b38c64f10d6832b7b74833099e68436 | 2844b3c702df2952deffdf6cd75c9e47e6f35284 | refs/heads/master | 2021-01-09T20:53:18.627185 | 2017-08-30T19:32:23 | 2017-08-30T19:32:23 | 58,394,973 | 0 | 0 | null | 2017-08-30T19:32:23 | 2016-05-09T17:25:37 | Python | UTF-8 | Python | false | false | 2,149 | py | """ run tests for pagetimer
$ virtualenv ve
$ ./ve/bin/pip install Django==1.8
$ ./ve/bin/pip install .
$ ./ve/bin/python runtests.py
"""
import django
from django.conf import settings
from django.core.management import call_command
def main():
# Dynamically configure the Django settings with the minimum necessary to
# get Django running tests
settings.configure(
MIDDLEWARE_CLASSES=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.contenttypes',
'pagetimer',
),
TEST_RUNNER='django.test.runner.DiscoverRunner',
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
],
COVERAGE_EXCLUDES_FOLDERS=['migrations'],
ROOT_URLCONF='pagetimer.urls',
# Django replaces this, but it still wants it. *shrugs*
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'HOST': '',
'PORT': '',
'USER': '',
'PASSWORD': '',
}
},
)
django.setup()
# Fire off the tests
call_command('test')
if __name__ == '__main__':
main()
| [
"anders@columbia.edu"
] | anders@columbia.edu |
752a144606ef7beeca941dda8a76f1980182e009 | 3a6fb0ef104d07491cbb56d95f7a6140dff29eaa | /2b.py | a97428ac04428c13b7ffa2cfada1e457d2e9693e | [] | no_license | audiodude/advent2019 | 5903e38f87d095c8b2bd30be7a7a18bbc8b25310 | 4c725a732c5691df60f99d4eda562b0cf81020fc | refs/heads/master | 2020-12-01T02:03:07.433484 | 2019-12-30T18:55:22 | 2019-12-30T18:55:22 | 230,537,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,953 | py | # First attempt: reverse the program and find the combination of initial values
# that produce the output. Unfortunately, we of course end up with output like this:
# (+ (+ -2 (+ 4 (+ 2 (+ 1 (+ (+ 3 (* (+ 5 (+ (+ 2 (* (+ (+ 3 (* 5 (+ 2 (* (+ 2
# (+ (* 2 (* (* (+ (* (+ 1 (* 4 (+ (+ 1 (* (+ 2 (* 4 -1)) 2)) 3))) 4) 2) 2) 5
# )) 1)) 3)))) 5) 4)) 5)) 4)) 4))))) 4)
#
# Which is a linear equation with two unknowns (-1 and -2 in this case) which
# of course can't be solved.
#
# import fileinput
# codes = [int(c) for c in fileinput.input()[0].split(',')]
# codes[1] = -1
# codes[2] = -2
# stop_pos = 0
# for pos in range(len(codes) - 1, 0, -1):
# if codes[pos] == 99:
# stop_pos = pos
# break
# assert stop_pos, 'stop_pos not found'
# def value_of(idx, stop_pos):
# for pos in range(stop_pos, 0, -4):
# opcode, p1, p2, out = codes[pos-4:pos]
# if out != idx:
# continue
# if opcode == 1:
# return '(+ %s %s)' % (value_of(p1, pos-4), value_of(p2, pos-4))
# elif opcode == 2:
# return '(* %s %s)' % (value_of(p1, pos-4), value_of(p2, pos-4))
# else:
# return str(codes[idx])
# print(value_of(0, stop_pos))
# Second attempt, brute force guessing:
import fileinput
def process(codes):
for pos in range(0, len(codes), 4):
if codes[pos] == 99:
break
op1 = codes[codes[pos+1]]
op2 = codes[codes[pos+2]]
output_pos = codes[pos + 3]
if codes[pos] == 1:
codes[output_pos] = op1 + op2
elif codes[pos] == 2:
codes[output_pos] = op1 * op2
codes = [int(c) for c in fileinput.input()[0].split(',')]
def attempt(tries):
for noun in range(tries):
for verb in range(tries):
new_codes = codes[:]
new_codes[1] = noun
new_codes[2] = verb
process(new_codes)
if new_codes[0] == 19690720:
return (noun, verb)
else:
assert False, 'Could not find answer'
noun, verb = attempt(100)
print(noun * 100 + verb)
| [
"audiodude@gmail.com"
] | audiodude@gmail.com |
1f9f53be7d85b393f7c0638c796d8ddc9f14b72f | 77090c3eaf15342505edc228ea19769ab219e0f7 | /CNVbenchmarkeR/output/manta3-datasetall/results17316/runWorkflow.py | 8ecfbc9ac2eb16a453983e3a063bca3a9ffd2a6b | [
"MIT"
] | permissive | robinwijngaard/TFM_code | 046c983a8eee7630de50753cff1b15ca3f7b1bd5 | d18b3e0b100cfb5bdd9c47c91b01718cc9e96232 | refs/heads/main | 2023-06-20T02:55:52.071899 | 2021-07-13T13:18:09 | 2021-07-13T13:18:09 | 345,280,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,090 | py | #!/usr/bin/env python2
# Workflow run script auto-generated by command: '/home/robin/Documents/Project/manta/Install/bin/configManta.py --bam=/home/robin/Documents/Project/Samples/bam/all/17316.bam --referenceFasta=/home/robin/Documents/Project/Samples/hg38/hg38.fa --config=/home/robin/Documents/Project/TFM_code/CNVbenchmarkeR/output/manta2-datasetall/configManta.py.ini --exome --runDir=/home/robin/Documents/Project/TFM_code/CNVbenchmarkeR/output/manta2-datasetall/results17316'
#
import os, sys
if sys.version_info >= (3,0):
import platform
raise Exception("Manta does not currently support python3 (version %s detected)" % (platform.python_version()))
if sys.version_info < (2,6):
import platform
raise Exception("Manta requires python2 version 2.6+ (version %s detected)" % (platform.python_version()))
scriptDir=os.path.abspath(os.path.dirname(__file__))
sys.path.append(r'/home/robin/Documents/Project/manta/Install/lib/python')
from mantaWorkflow import MantaWorkflow
def get_run_options(workflowClassName) :
from optparse import OptionGroup, SUPPRESS_HELP
from configBuildTimeInfo import workflowVersion
from configureUtil import EpilogOptionParser
from estimateHardware import EstException, getNodeHyperthreadCoreCount, getNodeMemMb
epilog="""Note this script can be re-run to continue the workflow run in case of interruption.
Also note that dryRun option has limited utility when task definition depends on upstream task
results -- in this case the dry run will not cover the full 'live' run task set."""
parser = EpilogOptionParser(description="Version: %s" % (workflowVersion), epilog=epilog, version=workflowVersion)
parser.add_option("-m", "--mode", type="string",dest="mode",
help=SUPPRESS_HELP)
parser.add_option("-j", "--jobs", type="string",dest="jobs",
help="number of jobs, must be an integer or 'unlimited' (default: Estimate total cores on this node)")
parser.add_option("-g","--memGb", type="string",dest="memGb",
help="gigabytes of memory available to run workflow, must be an integer (default: Estimate the total memory for this node)")
parser.add_option("-d","--dryRun", dest="isDryRun",action="store_true",default=False,
help="dryRun workflow code without actually running command-tasks")
parser.add_option("--quiet", dest="isQuiet",action="store_true",default=False,
help="Don't write any log output to stderr (but still write to workspace/pyflow.data/logs/pyflow_log.txt)")
def isLocalSmtp() :
import smtplib
try :
smtplib.SMTP('localhost')
except :
return False
return True
isEmail = isLocalSmtp()
emailHelp = SUPPRESS_HELP
if isEmail :
emailHelp="send email notification of job completion status to this address (may be provided multiple times for more than one email address)"
parser.add_option("-e","--mailTo", type="string",dest="mailTo",action="append",help=emailHelp)
debug_group = OptionGroup(parser,"development debug options")
debug_group.add_option("--rescore", dest="isRescore",action="store_true",default=False,
help="Reset task list to re-run hypothesis generation and scoring without resetting graph generation.")
parser.add_option_group(debug_group)
ext_group = OptionGroup(parser,"extended portability options (should not be needed by most users)")
ext_group.add_option("--maxTaskRuntime", type="string", metavar="hh:mm:ss",
help="Specify max runtime per task (no default)")
parser.add_option_group(ext_group)
(options,args) = parser.parse_args()
if not isEmail : options.mailTo = None
if len(args) :
parser.print_help()
sys.exit(2)
if options.mode is None :
options.mode = "local"
elif options.mode not in ["local"] :
parser.error("Invalid mode. Available modes are: local")
if options.jobs is None :
try :
options.jobs = getNodeHyperthreadCoreCount()
except EstException:
parser.error("Failed to estimate cores on this node. Please provide job count argument (-j).")
if options.jobs != "unlimited" :
options.jobs=int(options.jobs)
if options.jobs <= 0 :
parser.error("Jobs must be 'unlimited' or an integer greater than 1")
# note that the user sees gigs, but we set megs
if options.memGb is None :
try :
options.memMb = getNodeMemMb()
except EstException:
parser.error("Failed to estimate available memory on this node. Please provide available gigabyte argument (-g).")
elif options.memGb != "unlimited" :
options.memGb=int(options.memGb)
if options.memGb <= 0 :
parser.error("memGb must be 'unlimited' or an integer greater than 1")
options.memMb = 1024*options.memGb
else :
options.memMb = options.memGb
options.resetTasks=[]
if options.isRescore :
options.resetTasks.append("makeHyGenDir")
return options
def main(pickleConfigFile, primaryConfigSection, workflowClassName) :
from configureUtil import getConfigWithPrimaryOptions
runOptions=get_run_options(workflowClassName)
flowOptions,configSections=getConfigWithPrimaryOptions(pickleConfigFile,primaryConfigSection)
# new logs and marker files to assist automated workflow monitoring:
warningpath=os.path.join(flowOptions.runDir,"workflow.warning.log.txt")
errorpath=os.path.join(flowOptions.runDir,"workflow.error.log.txt")
exitpath=os.path.join(flowOptions.runDir,"workflow.exitcode.txt")
# the exit path should only exist once the workflow completes:
if os.path.exists(exitpath) :
if not os.path.isfile(exitpath) :
raise Exception("Unexpected filesystem item: '%s'" % (exitpath))
os.unlink(exitpath)
wflow = workflowClassName(flowOptions)
retval=1
try:
retval=wflow.run(mode=runOptions.mode,
nCores=runOptions.jobs,
memMb=runOptions.memMb,
dataDirRoot=flowOptions.workDir,
mailTo=runOptions.mailTo,
isContinue="Auto",
isForceContinue=True,
isDryRun=runOptions.isDryRun,
isQuiet=runOptions.isQuiet,
resetTasks=runOptions.resetTasks,
successMsg=wflow.getSuccessMessage(),
retryWindow=0,
retryMode='all',
warningLogFile=warningpath,
errorLogFile=errorpath)
finally:
exitfp=open(exitpath,"w")
exitfp.write("%i\n" % (retval))
exitfp.close()
sys.exit(retval)
main(r"/home/robin/Documents/Project/TFM_code/CNVbenchmarkeR/output/manta2-datasetall/results17316/runWorkflow.py.config.pickle","manta",MantaWorkflow)
| [
"robinwijngaard@gmail.com"
] | robinwijngaard@gmail.com |
743cc0818768c373bc08f9acf81e567aacb3a69b | d528d21d32a2a7f299e8365d0a935b8718f9c07f | /cogs/utils/checks.py | 7f0962fe5b0e94c665e2849f9eb198a293c99c7d | [] | no_license | sizumita/Aegis | 53b3f3db4d88b8ffdbc0d44781f55251081a32fc | 2c9684695a32481583fd214fa63deaddea3d5ebc | refs/heads/master | 2020-09-11T00:05:48.629459 | 2020-06-23T14:04:41 | 2020-06-23T14:04:41 | 221,874,644 | 6 | 4 | null | 2019-12-10T10:58:34 | 2019-11-15T08:04:23 | Python | UTF-8 | Python | false | false | 2,758 | py | from .database import CommandPermission
from discord.ext.commands import check
import discord
async def check_command_permission(context):
"""
権限周りについて:
DMの場合確実に有効
CommandPermissionがなければそもそも有効化されていない
作成されていて、かつroles、users、permissionsが空であれば誰でも使える
:param context: commands.Context
:return: bool
"""
# DMの場合
if not context.guild:
return True
# manage系、ヘルプコマンドだった場合
if context.command.name == 'help':
return True
elif context.cog:
if context.cog.qualified_name == 'Manage':
return True
p: CommandPermission = await CommandPermission.query.where(CommandPermission.id == context.guild.id) \
.where(CommandPermission.name == context.bot.get_command_full_name(context.command)).gino.first()
# ない場合
if not p:
if getattr(context.cog, 'already_on', False):
p = await CommandPermission.create(id=context.guild.id,
name=context.bot.get_command_full_name(context.command))
else:
return False
if context.author.guild_permissions.administrator:
return True
# 制限なしの場合
if not p.roles and not p.users:
return True
checks = []
if p.roles:
is_id_in = any(True for i in context.author.roles if str(i.id) in p.roles)
checks.append(is_id_in)
if p.users:
checks.append(True if str(context.author.id) in p.users else False)
return any(checks)
def admin_only():
def predicate(ctx):
permissions: discord.Permissions = ctx.author.guild_permissions
if not permissions.administrator:
return False
return True
return check(predicate)
def safety():
"""CommandPermissionがあってかつ何も設定されていないときにadminしか実行できないようにする"""
async def predicate(ctx):
p: CommandPermission = await CommandPermission.query.where(CommandPermission.id == ctx.guild.id) \
.where(CommandPermission.name == ctx.bot.get_command_full_name(ctx.command)).gino.first()
if not p:
return False
if not p.users and not p.roles:
permissions: discord.Permissions = ctx.author.guild_permissions
if not permissions.administrator:
return False
return True
return check(predicate)
def prefix_in(prefixes):
async def predicate(ctx):
if ctx.prefix not in prefixes:
return False
return True
return check(predicate)
| [
"sumito@izumita.com"
] | sumito@izumita.com |
f837b00ff86d2477efe671f6b6412d0ad0150621 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/d1.py | 02a7c272c8bcf2a81b93b768bcfc238aa6155369 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'D1':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
8c8a9ad367a45afbe6c7fd1fd503d6ef9dc05db6 | 901562de637c44a0f0c43aeebe56f5ebd949571e | /analysis/repo-statistics/project_counters_jarsize_tab.py | 79e19b8a948f5b16eeae9fe84647de8d5cf8bf77 | [] | no_license | istlab/evol_security_publication_2012 | f41608dd7d054aa890fa6d011dd3a44407b3a598 | daa64b60641a82fb5ed9a5b4e8f08c75f700138c | refs/heads/master | 2020-12-11T09:31:03.511279 | 2013-11-10T23:02:44 | 2013-11-10T23:02:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py | # Extracts the bug counters data from the JSON representation
# and outputs them in CSV format so that it can be read by R.
import ijson
import json
import sys
import csv
bug_types = [
'SECURITY_HIGH',
'SECURITY_LOW',
'STYLE',
'CORRECTNESS',
'BAD_PRACTICE',
'MT_CORRECTNESS',
'I18N',
'PERFORMANCE',
'EXPERIMENTAL',
]
with open("data/project_counters_jarsize.csv", "w") as csv_output:
csvwriter = csv.writer(csv_output)
project_counts = {}
project_key = ""
row = ['project', 'version', 'jarsize']
for bug_type in bug_types:
row.append(bug_type)
csvwriter.writerow(row)
with open("data/project_counters.json", "r") as json_file:
json_input = json.load(json_file)
for project, data in json_input.iteritems():
for version in data['versions']:
meta_data = version['JarMetadata']
row = [project, meta_data['version_order'],
meta_data['jar_size']]
counters = version['Counters']
counters = version['Counters']
if 'MALICIOUS_CODE' in counters:
malicious_code = counters.pop('MALICIOUS_CODE')
if 'SECURITY_LOW' in counters:
counters['SECURITY_LOW'] += malicious_code
else:
counters['SECURITY_LOW'] = malicious_code
security_low = 0
for bug_type in bug_types:
if bug_type in counters:
row.append(counters[bug_type])
else:
row.append('NA')
csvwriter.writerow(row)
| [
"louridas@aueb.gr"
] | louridas@aueb.gr |
880bc21c593bf7e8364d6d0ac67964c6954c756b | 754a22e0156af095d224075cfa2a1a460014756f | /mysite/settings.py | 70dd40e6c9fc7427dc6d17bc4823dd416a4d8cf6 | [] | no_license | LucieGal/my-own-blog | 3a7fa4a7bcaf80db489777325bb36df2cee5bfd8 | f0f07f24e118e6e9e8c97474526c8ea5eb843f60 | refs/heads/master | 2023-06-15T00:52:30.462831 | 2021-07-10T14:03:59 | 2021-07-10T14:03:59 | 379,363,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.24.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%ufdj@-rtsl93rv1o8xvev=+b(cvm$5tbaayzt7$*(2dsr-#ds'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'fr-fr'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"galea.lucie@gmail.com"
] | galea.lucie@gmail.com |
8a20be67dc8ff938517418a5e77c313ae3bb79ea | 318289d6d338b5aa7d87df18095839fa679ec588 | /ksusta/apps.py | 75b5219e7f210a93577efe0f443c56064e3e132d | [
"MIT"
] | permissive | ayubaezekiel/eExams | b491f36649d24ad6d87bc30017d09117c52ad926 | 5c9920cf55b9cb258c7c732bffceb10e5e9188b5 | refs/heads/main | 2023-09-05T14:05:11.134918 | 2021-11-08T11:41:17 | 2021-11-08T11:41:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | from django.apps import AppConfig
class KsustaConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'ksusta'
| [
"ezekielayoba@yahoo.com"
] | ezekielayoba@yahoo.com |
94ec5975940892096bc5b805de5af3e9c66312a3 | 6b8960551ee4be37c46f6c5f28257845fcb871ed | /task1.py | 2105ae960977b9acf3bde10337df6d46c5ad633f | [] | no_license | htrueman/db2_limited_test | 10e9e574fe52b2346c33f4485f8b1dec00c30ac8 | 489379a952ad5c1ecb5123e9e3d41ec28206dc01 | refs/heads/master | 2022-12-09T06:32:27.709446 | 2017-06-12T01:40:08 | 2017-06-12T01:40:08 | 93,772,542 | 0 | 0 | null | 2022-11-22T01:46:27 | 2017-06-08T16:56:17 | Python | UTF-8 | Python | false | false | 649 | py | test_num1 = 1
test_num2 = 10
test_num3 = 2
def handle_numbers(number1, number2, number3):
count_div_numbers = 0
div_numbers_list = []
for number in range(number1, number2 + 1):
if number % number3 == 0:
count_div_numbers += 1
div_numbers_list.append(str(number))
if div_numbers_list:
return "Result:\n{}, because {} are divisible by {}".\
format(count_div_numbers, ', '.join(div_numbers_list), number3)
else:
return "Result:\nThere are no divisible numbers by {} in given range".\
format(number3)
print (handle_numbers(test_num1, test_num2, test_num3))
| [
"vege1wgw@gmail.com"
] | vege1wgw@gmail.com |
9dab3cde69c85a1c6fe735d132871bf0bf28607e | c9f0975de0e1bfe9043ef04d43df00e6c7fcbe56 | /src/bandits/__init__.py | b318a2379fd7830e1500ce648db25ce5d8531d14 | [] | no_license | kbantoec/bandits | dbaf89d4c29a14a97e2d39539410874d3ff582bc | 4585d0c1b60c19cb2511cabfb1ce211499e72ebe | refs/heads/master | 2023-01-19T04:41:32.855694 | 2020-11-10T18:11:25 | 2020-11-10T18:11:25 | 306,442,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | from .agent import Bandit
from .one_armed_bandit import OneArmedBandit, Experiment | [
"y.banto@outlook.com"
] | y.banto@outlook.com |
15203c190344a87445df55a4a7f5f3c15e73d4b5 | 03da0660db7c833e476dd4cba44ba2c300d3fb58 | /.PyCharmCE2017.3/system/python_stubs/-762174762/_hashlib.py | eb91b77cf05268f82a8144bddd9ef684816d9226 | [] | no_license | Shreyaskulkarni98/MQTTBroker-Beta | c9717d3e9d559f9f7d6eb46738dac68f8c4f346c | 22a15cac4755adf522d76e15129c0ef4b2da797d | refs/heads/master | 2022-11-01T20:58:40.690542 | 2019-01-20T08:51:26 | 2019-01-20T08:51:26 | 161,118,749 | 0 | 1 | null | 2022-10-22T14:32:03 | 2018-12-10T04:53:34 | Python | UTF-8 | Python | false | false | 1,710 | py | # encoding: utf-8
# module _hashlib
# from (pre-generated)
# by generator 1.145
# no doc
# no imports
# functions
def new(*args, **kwargs): # real signature unknown
"""
Return a new hash object using the named algorithm.
An optional string argument may be provided and will be
automatically hashed.
The MD5 and SHA1 algorithms are always supported.
"""
pass
def openssl_md5(*args, **kwargs): # real signature unknown
""" Returns a md5 hash object; optionally initialized with a string """
pass
def openssl_sha1(*args, **kwargs): # real signature unknown
""" Returns a sha1 hash object; optionally initialized with a string """
pass
def openssl_sha224(*args, **kwargs): # real signature unknown
""" Returns a sha224 hash object; optionally initialized with a string """
pass
def openssl_sha256(*args, **kwargs): # real signature unknown
""" Returns a sha256 hash object; optionally initialized with a string """
pass
def openssl_sha384(*args, **kwargs): # real signature unknown
""" Returns a sha384 hash object; optionally initialized with a string """
pass
def openssl_sha512(*args, **kwargs): # real signature unknown
""" Returns a sha512 hash object; optionally initialized with a string """
pass
def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None): # real signature unknown; restored from __doc__
"""
pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None) -> key
Password based key derivation function 2 (PKCS #5 v2.0) with HMAC as
pseudorandom function.
"""
pass
# no classes
# variables with complex values
openssl_md_meth_names = None # (!) real value is ''
| [
"Shreyasrameshkulkarni@gmail.com"
] | Shreyasrameshkulkarni@gmail.com |
fe01b307a0814fd473a553ad5bfd3a7ad7f22547 | 245a3f8cea6f232bf3142706c11188b51eb21774 | /python/hetu/onnx/onnx_opset/Where.py | 6da3b659d9f9858f398695ae791903a6f8c2c8b5 | [
"Apache-2.0"
] | permissive | initzhang/Hetu | 5bfcb07e62962fbc83def14148f8367fab02625a | 447111a358e4dc6df5db9c216bdb3590fff05f84 | refs/heads/main | 2023-06-20T18:37:21.760083 | 2021-07-27T04:37:48 | 2021-07-27T04:37:48 | 389,848,768 | 0 | 0 | Apache-2.0 | 2021-07-27T04:32:57 | 2021-07-27T04:32:57 | null | UTF-8 | Python | false | false | 610 | py | from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from onnx import onnx_pb
from hetu.onnx import constants, util, graph
from hetu.onnx.handler import hetu_op
from hetu.onnx.onnx_opset import general
@hetu_op(["WhereOp"], onnx_op=["Where"])
class Where():
@classmethod
def version_1(cls, ctx, node, **kwargs):
assert False, "This version of the operator has been available since version 9 of the default ONNX operator set"
pass
@classmethod
def version_9(cls, ctx, node, **kwargs):
pass
| [
"swordonline@foxmail.com"
] | swordonline@foxmail.com |
07ebc735e3d7c3b9475a58511601718c8e2eb1b3 | 3cd0623135f5005eeec232f508f7f3f523e156f3 | /fiddles/_08/test_fiddle.py | 5cfdac2ab8ec333514e8bcb737073bb706cd94ec | [] | no_license | tompascall/adventofcode2019 | 4f287ec7cbe0f97caed68fb88ed54c552766f56c | 8b2eebf902e833029aecd508343b3d8b3a086460 | refs/heads/master | 2020-09-28T08:25:37.828312 | 2020-03-22T09:19:15 | 2020-03-22T09:19:15 | 226,733,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | import unittest
from fiddles._08.fiddle import (
get_layers,
count_digit_in_layer,
merge_layers,
)
class TestFiddle(unittest.TestCase):
def test_get_layers(self):
digits = '123456789012'
layers = get_layers(digits, width=3, height=2)
self.assertEqual(
layers,
[
[
['1','2','3'],
['4','5','6']
],
[
['7','8','9'],
['0','1','2']
]
]
)
def test_count_digit_in_layer(self):
digits = '121456789012'
layers = get_layers(digits, width=3, height=2)
self.assertEqual(
count_digit_in_layer(layers[0], '1'),
2
)
layers = [
[
['0','2'],
['2','2'],
['2','2']
],
[
['1','1'],
['2','2'],
['2','2']
],
[
['2','2'],
['1','2'],
['1','0']
],
[
['0','0'],
['0','0'],
['2','2']
],
]
self.assertEqual(
merge_layers(layers, 3),
[
['0','1'],
['1','0'],
['1','0']
]
)
if __name__ == '__main__':
unittest.main()
| [
"tamas.gulacsy.toth@lensa.com"
] | tamas.gulacsy.toth@lensa.com |
daf9960e7287238bf5dffe7767564296ba29dcd1 | f46fee7ac51bc459f6309d5dafed1a277cc52772 | /demo16.py | 2990ce342a9f5eda4c54f8c5d24ecbf8d9ebe394 | [] | no_license | wu840407/PYKT-Python-for-Keras-and-TensorFlow | d7e0637640b669246bc7f94e32ee32af1a23d375 | 90c5305f7167def16202626c99242978a829d2e0 | refs/heads/master | 2023-05-01T05:47:47.772704 | 2021-05-14T09:02:17 | 2021-05-14T09:02:17 | 366,560,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | import matplotlib.pyplot as plt
from sklearn import datasets
iris = datasets.load_iris()
print(dir(iris))
labels = iris.feature_names
print(labels)
X = iris.data
species = iris.target
counter = 1
for i in range(0, 4):
for j in range(i + 1, 4):
plt.figure(counter, figsize=(12, 9))
counter += 1
xData = X[:, i]
yData = X[:, j]
x_min, x_max = xData.min() - 0.5, xData.max() + 0.5
y_min, y_max = yData.min() - 0.5, yData.max() + 0.5
plt.scatter(xData, yData, c=species, cmap=plt.cm.Paired)
plt.xlabel(labels[i])
plt.ylabel(labels[j])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks([])
plt.yticks([])
plt.show() | [
"wu@uuu.com.tw"
] | wu@uuu.com.tw |
a92e09e86e4b6a1367be78bdfb2c56bb263cd6dc | ed702ade0903ae74709da52fca3b3279c45877c7 | /Processors/translation.py | 113548142dae0d1829316206661dc96f7a83fbc6 | [] | no_license | aaskorohodov/pythonProject3 | 1611f0846a4f9af3ee12a5b7b62b56beb5d22791 | 5257c41770a61d0351b65bfe25bca0a2c52ac3c6 | refs/heads/master | 2023-07-30T22:51:45.839766 | 2021-08-27T20:52:49 | 2021-08-27T20:55:01 | 393,996,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,023 | py | import requests
def ru_eng(message):
word = message.text
URL_AUTH = 'https://developers.lingvolive.com/api/v1.1/authenticate'
URL_TRANSLATE = 'https://developers.lingvolive.com/api/v1/Minicard'
KEY = 'ODRlNGU0NDctOWU2ZS00ZTcxLTk5ZWYtNjI2ZTA0MzYwOGU3OmQwN2E5YjVhMDg2MjQwYTI5ZDU3NjA1Y2NiODI3ZjRj'
headers_auth = {'Authorization': 'Basic ' + KEY}
auth = requests.post(URL_AUTH, headers=headers_auth)
if auth.status_code == 200:
token = auth.text
headers_translate = {'Authorization': 'Bearer ' + token}
params = {'text': word,
'srcLang': 1049,
'dstLang': 1033}
r = requests.get(URL_TRANSLATE, headers=headers_translate, params=params)
r = r.json()
try:
return r['Translation']['Translation']
except:
return 'Перевод не найден'
else:
return 'Сервис перевода сейчас недоступен. Попробуйте позднее'
def eng_ru(message):
word = message.text
URL_AUTH = 'https://developers.lingvolive.com/api/v1.1/authenticate'
URL_TRANSLATE = 'https://developers.lingvolive.com/api/v1/Minicard'
KEY = 'ODRlNGU0NDctOWU2ZS00ZTcxLTk5ZWYtNjI2ZTA0MzYwOGU3OmQwN2E5YjVhMDg2MjQwYTI5ZDU3NjA1Y2NiODI3ZjRj'
headers_auth = {'Authorization': 'Basic ' + KEY}
auth = requests.post(URL_AUTH, headers=headers_auth)
if auth.status_code == 200:
token = auth.text
headers_translate = {'Authorization': 'Bearer ' + token}
params = {'text': word,
'srcLang': 1033,
'dstLang': 1049}
r = requests.get(URL_TRANSLATE, headers=headers_translate, params=params)
r = r.json()
try:
return r['Translation']['Translation']
except:
return 'Перевод не найден'
else:
return 'Сервис перевода сейчас недоступен. Попробуйте позднее' | [
"aaskorohodov@gmail.com"
] | aaskorohodov@gmail.com |
2ca77983524514c47a936a1f296297e5ba1c4456 | 7b1b4ed8bd4c887362b367625a833c28aa919dd8 | /wpaudit/providers/aliyun/resources/ram/policies.py | 09ac9427cfcba323da87129ef7e60ece906a9935 | [] | no_license | wperic/wpaudit | 6bbd557c803ce9bceb764c1451daeb5e440a3d9c | ed69c1eabcf85e80ed8fe5397d2d369fd3ff35d8 | refs/heads/main | 2023-07-16T21:36:57.528548 | 2021-09-03T10:35:43 | 2021-09-03T10:35:43 | 402,716,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,794 | py | from wpaudit.providers.aliyun.resources.base import AliyunResources
from wpaudit.providers.aliyun.facade.base import AliyunFacade
import json
class Policies(AliyunResources):
def __init__(self, facade: AliyunFacade):
super().__init__(facade)
async def fetch_all(self):
for raw_policy in await self.facade.ram.get_policies():
id, policy = await self._parse_policy(raw_policy)
if id:
self[id] = policy
async def _parse_policy(self, raw_policy):
"""
Only processing policies with an
:param raw_policy:
:return:
"""
if raw_policy.get('AttachmentCount') > 0:
policy_dict = {}
policy_dict['id'] = policy_dict['name'] = raw_policy.get('PolicyName')
policy_dict['description'] = raw_policy.get('Description')
policy_dict['create_date'] = raw_policy.get('CreateDate')
policy_dict['update_date'] = raw_policy.get('UpdateDate')
policy_dict['attachment_count'] = raw_policy.get('AttachmentCount')
policy_dict['type'] = raw_policy.get('PolicyType')
policy_dict['default_version'] = raw_policy.get('DefaultVersion')
policy_version = await self.facade.ram.get_policy_version(policy_dict['name'],
policy_dict['type'],
policy_dict['default_version'])
policy_version['PolicyDocument'] = json.loads(policy_version['PolicyDocument'])
# policy_dict['policy_document'] = policy_version['PolicyDocument']
policy_dict['policy_document'] = policy_version
policy_entities = await self.facade.ram.get_policy_entities(policy_dict['name'],
policy_dict['type'])
policy_dict['entities'] = {}
if policy_entities['Users']['User']:
policy_dict['entities']['users'] = []
for user in policy_entities['Users']['User']:
policy_dict['entities']['users'].append(user['UserName'])
if policy_entities['Groups']['Group']:
policy_dict['entities']['groups'] = []
for group in policy_entities['Groups']['Group']:
policy_dict['entities']['groups'].append(group['GroupName'])
if policy_entities['Roles']['Role']:
policy_dict['entities']['roles'] = []
for role in policy_entities['Roles']['Role']:
policy_dict['entities']['roles'].append(role['RoleName'])
return policy_dict['id'], policy_dict
else:
return None, None
| [
"90035639+wperic@users.noreply.github.com"
] | 90035639+wperic@users.noreply.github.com |
9f350befb965c94227bb57cfedbbedd959044200 | 6567a6d0b648300cc5c3fa264a925602f61ab8c4 | /guvi5.py | e5e6aefb6421834b1423ab36aa25c4ca25a15943 | [] | no_license | AnanthiD/codekata | 6ee948ca2aea9a052a1b4604e4fc28fb91b18cda | 533e2d0b9b3ca14c37eac936a927d9933eb35374 | refs/heads/master | 2020-05-23T01:05:28.676110 | 2019-07-20T09:45:59 | 2019-07-20T09:45:59 | 186,580,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | a=input()
if(a.isalpha()):
print("Alphabet")
else:
print("no") | [
"noreply@github.com"
] | AnanthiD.noreply@github.com |
4d19c858d848ed41fa60d015b55f3e9820b42186 | 926ec284a08a72964fb81a68e342d09f33fa6d3f | /omaha_server/omaha/parser.py | f2a84365bee435000ca942451c8a13819cc0f089 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | yukels/omaha-server | a2395431978e17174fe76513b6cada99cf5317eb | 48406bef1e77e393031e410454a6ee10e8c47925 | refs/heads/master | 2021-01-17T23:53:03.781282 | 2016-07-25T14:00:55 | 2016-07-25T14:00:55 | 55,139,490 | 0 | 0 | null | 2016-03-31T09:53:58 | 2016-03-31T09:53:58 | null | UTF-8 | Python | false | false | 3,031 | py | # coding: utf8
"""
This software is licensed under the Apache 2 license, quoted below.
Copyright 2014 Crystalnix Limited
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
import os
from lxml import etree, objectify
__all__ = ['parser', 'parse_request']
BASE_DIR = os.path.dirname(__file__)
with open(os.path.join(BASE_DIR, 'request.xsd')) as f:
schema = etree.XMLSchema(file=f)
parser = objectify.makeparser(schema=schema)
def parse_request(request):
"""
>>> request = b'''<?xml version="1.0" encoding="UTF-8"?>
... <request protocol="3.0"
... version="1.3.23.0"
... ismachine="0"
... sessionid="{5FAD27D4-6BFA-4daa-A1B3-5A1F821FEE0F}"
... userid="{D0BBD725-742D-44ae-8D46-0231E881D58E}"
... installsource="scheduler"
... testsource="ossdev"
... requestid="{C8F6EDF3-B623-4ee6-B2DA-1D08A0B4C665}">
... <os platform="win" version="6.1" sp="" arch="x64"/>
... <app appid="{430FD4D0-B729-4F61-AA34-91526481799D}" version="1.2.23.0" nextversion="" lang="en" brand="GGLS"
... client="someclientid" installage="39">
... <updatecheck/>
... <ping r="1"/>
... </app>
... <app appid="{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}" version="2.2.2.0" nextversion="" lang="en" brand="GGLS"
... client="" installage="6">
... <updatecheck/>
... <ping r="1"/>
... </app>
... </request>'''
>>> request_obj = parse_request(request)
>>> request_obj.get('version')
'1.3.23.0'
>>> request_obj.os.get('platform')
'win'
>>> request_obj.app.get('appid')
'{430FD4D0-B729-4F61-AA34-91526481799D}'
>>> request_obj.app.find('updatecheck')
''
>>> request_obj.keys()
['protocol', 'version', 'ismachine', 'sessionid', 'userid', 'installsource', 'testsource', 'requestid']
>>> request_obj.values()
['3.0', '1.3.23.0', '0', '{5FAD27D4-6BFA-4daa-A1B3-5A1F821FEE0F}', '{D0BBD725-742D-44ae-8D46-0231E881D58E}', 'scheduler', 'ossdev', '{C8F6EDF3-B623-4ee6-B2DA-1D08A0B4C665}']
>>> request_obj.tag
'request'
>>> for app in request_obj.find('app'):
... app.get('appid')
...
'{430FD4D0-B729-4F61-AA34-91526481799D}'
'{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}'
"""
return objectify.fromstring(request, parser)
| [
"yurtaev.egor@gmail.com"
] | yurtaev.egor@gmail.com |
0c124ba03429697b2bab6c819eb50dc44460c5dc | 84846fa2c35819f386a7b6f55f32227ff9d34838 | /work/random_likes.py | bd17d65524fbfdc4cba7a3c7253f6c373a27d032 | [
"Apache-2.0"
] | permissive | deniskolokol/universalrec | 43efcfc0c3b9d44e5e4a16bd27b9710228fa1085 | 55169f718777bb557d90c709c9a505a0a36f5478 | refs/heads/master | 2023-01-12T12:32:44.590950 | 2016-03-07T10:19:36 | 2016-03-07T10:19:36 | 51,755,830 | 0 | 0 | null | 2022-12-26T20:01:17 | 2016-02-15T13:03:23 | Scala | UTF-8 | Python | false | false | 791 | py | # run this first in flickthru virtualenv!
import os
import random
from core.models import Like
from django.db.models import Count
filename = 'likes.csv'
fobj = open(filename, 'w+')
fobj.write('entity_id,event,target_entity_id,event_time\n')
img_index = range(499) # indexes of images to pick from
registered = {}
for rec in Like.objects.order_by('image', 'user', 'liked', 'created_at'):
try:
image = registered[str(rec.image.id)]
except KeyError:
image = img_index.pop(random.randrange(len(img_index)))
registered[str(rec.image.id)] = image
event = 'like' if rec.liked else 'dislike'
fobj.write('%s,%s,%s,%s\n' % (
rec.user.id, event, image, rec.created_at.isoformat()
))
fobj.close()
print 'Done: %s' % os.path.abspath(filename)
| [
"dkolokol@gmail.com"
] | dkolokol@gmail.com |
04d46f70d2543594d36fc9d340ad9c2da9f9cd7b | 7eb8bf846dc7021751019debf91925139203bed2 | /Django_Clases/tercer_proyecto/populate_modelos_aplicacion.py | 348b1e00929e50d9b01698e636df06708a4c9001 | [] | no_license | rpparada/python-and-django-full-stack-web-developer-bootcamp | 5c384dc1c19557097c893cf6149c1831984b1946 | 7b91f16cfb49d7de71901857b4e4c8f447db5e6f | refs/heads/master | 2021-09-08T22:40:44.737431 | 2018-03-12T15:12:06 | 2018-03-12T15:12:06 | 116,153,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','tercer_proyecto.settings')
import django
django.setup()
import random
from modelos_aplicacion.models import Usuarios
from faker import Faker
generaFake = Faker()
def popular(N=10):
for entrada in range(N):
nombre_falso = generaFake.first_name()
apellido_falso = generaFake.last_name()
email_falso = generaFake.email()
# email_falso = generaFake.email(*args, **kwargs)
usuario = Usuarios.objects.get_or_create(nombre=nombre_falso,apellido=apellido_falso,email=email_falso)[0]
if __name__ == '__main__':
print('Cargando tabla(s)... ')
popular(20)
print('Rabla(s) cargada(s)!')
| [
"rpparada@gmail.com"
] | rpparada@gmail.com |
6bbe246fd9bd6d0eb23ccd5e2f43f5280487874c | d29c2dea4afbb21de0b1e508e501ee6711805451 | /__main__.py | e084aa8b11fab88e422d61a1e430451cb2602f83 | [
"MIT"
] | permissive | cdeitrick/workflows | ef69003cbd6030bc828815b7c898128327da129a | 8edd2a08078144a2445af3903eb13b71abb96538 | refs/heads/master | 2020-03-18T07:04:20.554986 | 2019-12-18T21:16:39 | 2019-12-18T21:16:39 | 134,430,686 | 0 | 0 | MIT | 2019-07-11T03:29:48 | 2018-05-22T14:50:28 | Python | UTF-8 | Python | false | false | 333 | py | from pipelines import main
import argparse
def create_parser()->argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"which",
help = "assembly or variants.",
type = str,
choices = ['assembly', 'variants']
)
args = parser.parse_args()
return args
if __name__ == "__main__":
main.main_shelly() | [
"cld100@pitt.edu"
] | cld100@pitt.edu |
f254c37980b3283334624c8365ad413025cf7c98 | 3cafdf8967c5e1c331a312684e550353cf10fffd | /Older_versions/NCBI.py | 5d0dea2e726821c3cf8f724948ef928a8cd3cbc4 | [] | no_license | TiagoS16/SARS-CoV-2-e-genes | 27a516a48dd84f4b0b5cf28a7f5ed417c92da2a2 | f301595abcc1258639d9ecf85df778d6f9cb7bee | refs/heads/master | 2023-07-19T21:41:53.431125 | 2021-08-25T15:59:29 | 2021-08-25T15:59:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | from Bio import Entrez
from Bio import SeqIO
word = 'FGB[Gene]'
res= 1
email= 'pg42877@alunos.uminho.pt'
Entrez.email= email
handle = Entrez.esearch(db = 'nucleotide', term=word, retmax= res)
record = Entrez.read(handle)
gi_list = record['IdList']
print(gi_list)
for a in gi_list:
a = 'NG_008833.1'
handle = Entrez.efetch(db="nucleotide", id=a, rettype="gb", retmode="text")
record = SeqIO.read(handle, "genbank")
save_file = open('my_blast.xml', 'w')
save_file.write(handle.read())
handle.close()
print(record.id)
| [
"74024284+MiguelBarros99@users.noreply.github.com"
] | 74024284+MiguelBarros99@users.noreply.github.com |
6479a595ec5e5e6a86e7178104d6df7763bfa983 | 5f58a50d7c44d0cf612b9076df40da89302b5ba6 | /geeadd/batch_copy.py | ff05da1fea9836b073de8a843dc1faa2c53b45c2 | [
"Apache-2.0"
] | permissive | jkrizan/gee_asset_manager_addon | 386a2a5b96e31bdb5e40a08ad12545e11a376764 | 884793185ef5641f0b53349feb5f4c3be272fd28 | refs/heads/master | 2020-05-19T12:58:15.830923 | 2019-01-01T16:46:16 | 2019-01-01T16:46:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | from __future__ import print_function
import ee
import os
ee.Initialize()
def copy(collection_path,final_path):
assets_list = ee.data.getList(params={'id': collection_path})
assets_names = [os.path.basename(asset['id']) for asset in assets_list]
print('Copying a total of '+str(len(assets_names))+'.....')
for count,items in enumerate(assets_names):
print ('Copying '+str(count+1)+' of '+str(len(assets_names)), end='\r')
init=collection_path+'/'+items
final=final_path+'/'+items
try:
ee.data.copyAsset(init,final)
except Exception as e:
pass
#batchcopy(collection_path='users/samapriya/Belem/BelemRE',final_path='users/samapriya/bl')
| [
"samapriya.roy@gmail.com"
] | samapriya.roy@gmail.com |
82d31d3429aec8f65ca302385483553480dfd265 | a57fa788fc8d4ace67c42c4d4ea7083bf139493a | /test/win_test_disk_drive.py | d4d6fa43f1088c427bc714d9087f573a0a740c29 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | venanciofire/py_win_unc | 9a221b175f0f4e0ba66e2f9f632bee8aaf5ff6b6 | e46256560cb6acfdbfbe8992d454e7ea492cb6ba | refs/heads/master | 2023-03-22T07:09:04.640066 | 2013-09-30T15:58:23 | 2013-09-30T15:58:23 | 588,721,538 | 1 | 0 | MIT | 2023-01-13T20:34:25 | 2023-01-13T20:34:25 | null | UTF-8 | Python | false | false | 354 | py | from unittest import TestCase
from win_unc.errors import NoDrivesAvailableError
from win_unc.disk_drive import get_available_disk_drive
class TestAvailableDiskDrive(TestCase):
def test_get_available_disk_drive(self):
try:
self.assertIsNotNone(get_available_disk_drive())
except NoDrivesAvailableError:
pass
| [
"elliot@3noch.com"
] | elliot@3noch.com |
e55b057232bdc2f6da6ef749bc1f2f138a2fdb19 | d3e7b50efb9ddfa329eab0411d0f4cf058d4aabe | /tasks/task_c.py | 22a377b7e2147a94a834d1b3c7c12399925dbe2f | [] | no_license | joelbispo/luigi-demo-blackrock | 1a6f774930d71dd9554e453dfc8cc9ab03f656f8 | fdb55df581e79044aced601a96de6edfe0c86382 | refs/heads/master | 2022-11-16T06:49:31.967588 | 2020-07-16T08:38:26 | 2020-07-16T08:38:26 | 280,096,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | import luigi
from util.cronometro import cronometro
class TarefaC(luigi.Task):
def run(self):
cronometro(5)
with self.output().open('w') as log:
log.write('Oi, Arena, eu terminei')
def output(self):
return luigi.LocalTarget(f'C:\\temp-luigi\\{self.__class__.__name__}.txt') | [
"joelbisponeto@gmail.com"
] | joelbisponeto@gmail.com |
0f49a3e164a082564a9962af1e266ec0ba9b7b33 | 83b7dddc0ff247aae41e332c6c41a5b3dbbcf529 | /src/brewv/bottle.py | 28c0a9ee2d66d00691a21779dd5176affda6b47b | [
"MIT"
] | permissive | lime-green/brew-versions | a453e2891a5b787a0f331e2264020497f78f94dc | 502f4efe56b516df9646533ead745e44bc509ac5 | refs/heads/main | 2023-04-05T14:19:59.854486 | 2021-04-20T16:00:35 | 2021-04-20T16:00:35 | 350,513,815 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,312 | py | import os
import platform
from requests.exceptions import HTTPError
from .constants import BOTTLE_FILE_SUFFIX, IS_MAC_OS, MAC_VER_TO_CODENAME, SYSTEM
from .util import (
check_output,
download_file,
is_supported_mac_ver,
logger,
mac_ver,
make_request,
)
class BottleNotFound(Exception):
def __init__(self, source_revision=None):
self.source_revision = source_revision
class BottleClient:
def __init__(self, formula_name):
self.formula_name = formula_name
self.bottle_os_identifier = build_bottle_os_identifier()
class GithubBottleClient(BottleClient):
GITHUB_AUTH = {"Authorization": "Bearer QQ=="}
base_url = dict(
Darwin="https://ghcr.io/v2/homebrew/core",
Linux="https://ghcr.io/v2/linuxbrew/core",
)[SYSTEM]
def _parse_manifest(self, manifest_response, ref_name):
source_revision = manifest_response["annotations"].get(
"org.opencontainers.image.revision"
)
for manifest in manifest_response["manifests"]:
annotations = manifest["annotations"]
if annotations["org.opencontainers.image.ref.name"] == ref_name:
return annotations["sh.brew.bottle.digest"], source_revision
return None, source_revision
def download_bottle(self, version, destination_path):
manifest_url = f"{self.base_url}/{self.formula_name}/manifests/{version}"
ref_name = f"{version}.{self.bottle_os_identifier}"
source_revision = None
try:
manifest = make_request(
manifest_url,
headers={
"Accept": "application/vnd.oci.image.index.v1+json",
**self.GITHUB_AUTH,
},
)
bottle_digest, source_revision = self._parse_manifest(manifest, ref_name)
if not bottle_digest:
logger.info("No digest found in manifest")
raise BottleNotFound(source_revision)
bottle_url = (
f"{self.base_url}/{self.formula_name}/blobs/sha256:{bottle_digest}"
)
download_file(
bottle_url,
destination_path,
headers=self.GITHUB_AUTH,
sha256_verification=bottle_digest,
)
except HTTPError:
logger.warning("Got HTTP error when attempting to download bottle")
raise BottleNotFound(source_revision)
class BintrayBottleClient(BottleClient):
base_url = dict(
Darwin="https://homebrew.bintray.com/bottles",
Linux="https://linuxbrew.bintray.com/bottles",
)[SYSTEM]
subject = dict(Darwin="homebrew", Linux="linuxbrew")[SYSTEM]
def download_bottle(self, version, destination_path):
bottle_file_name = (
f"{self.formula_name}-{version}"
f".{self.bottle_os_identifier}.{BOTTLE_FILE_SUFFIX}"
)
url = f"{self.base_url}/{bottle_file_name}"
try:
download_file(url, destination_path)
except HTTPError:
logger.warning("Got HTTP error when attempting to download bottle")
raise BottleNotFound
def download_bottle(formula_name, version, bottle_cache_file):
clients = [GithubBottleClient(formula_name), BintrayBottleClient(formula_name)]
source_revision = None
for client in clients:
logger.info(f"Trying bottle download with {client.__class__.__name__}")
try:
return client.download_bottle(version, bottle_cache_file)
except BottleNotFound as e:
source_revision = e.source_revision
raise BottleNotFound(source_revision)
def build_bottle_os_identifier():
if IS_MAC_OS:
assert is_supported_mac_ver()
codename = MAC_VER_TO_CODENAME[mac_ver()].replace(" ", "_").lower()
if platform.machine() == "arm64":
return f"arm64_{codename}"
return codename
return f"{platform.machine()}_linux"
def get_bottle_cache_location(formula_name, version):
brew_cache_dir = check_output(["brew", "--cache"])
bottle_cache_name = f"{formula_name}--{version}"
return os.path.join(
brew_cache_dir,
f"{bottle_cache_name}.{build_bottle_os_identifier()}.{BOTTLE_FILE_SUFFIX}",
)
| [
"josh.doncastermarsiglio@tophatmonocle.com"
] | josh.doncastermarsiglio@tophatmonocle.com |
99d16f620ac24b74834e13c63e09b6196c038fb0 | 7f4fb112bc9ab2b90f5f2248f43285ce9ac2e0a0 | /src/igem/neutronics/air/bare/borosilicate-glass-backfill/0wt/plot_all.in.one_cask.thickness_dose.rate_t4045_plug.py | c763b91a79e02074300d90606bbccfa7b9fb3d2b | [] | no_license | TheDoctorRAB/plot | dd3b5134c91c8fa7032fcc077c5427b26a80e49d | ed6746d511222c03e79f93548fe3ecd4286bf7b1 | refs/heads/master | 2021-07-11T10:21:19.347531 | 2020-07-16T17:13:15 | 2020-07-16T17:13:15 | 20,462,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,119 | py | ########################################################################
# R.A.Borrelli
# @TheDoctorRAB
# rev.11.March.2015
########################################################################
#
# Plot routine
# All in one file, with no separate control input, lib files
# Plot data is contained in a separate data file, read on command line
# Set up for a secondary y axis if needed
#
########################################################################
#
#
#
#######
#
# imports
#
# plot
#
import numpy
import matplotlib
import matplotlib.pyplot as plot
from matplotlib.ticker import MultipleLocator
#
#######
#
# command line
#
from sys import argv
script,plot_datafile=argv #column 0 is the x values then odd columns contain dose/flux
#
#######
#
# screen resolution
#
import Tkinter
root=Tkinter.Tk()
#
########################################################################
#
#
#
#######
#
# screen resolution
#
###
#
# pixels
#
width=root.winfo_screenwidth()
height=root.winfo_screenheight()
#
###
#
# mm
#
width_mm=root.winfo_screenmmwidth()
height_mm=root.winfo_screenmmheight()
#
###
#
# in
#
width_in=width_mm/25.4
height_in=height_mm/25.4
#
###
#
# dpi
#
width_dpi=width/width_in
height_dpi=height/height_in
#
dpi_values=(96,120,144,168,192)
current_dpi=width_dpi
minimum=1000
#
for dval in dpi_values:
difference=abs(dval-width_dpi)
if difference<minimum:
minimum=difference
current_dpi=dval
#
#######
#
# output to screen
#
print('width: %i px, height: %i px'%(width,height))
print('width: %i mm, height: %i mm'%(width_mm,height_mm))
print('width: %0.f in, height: %0.f in'%(width_in,height_in))
print('width: %0.f dpi, height: %0.f dpi'%(width_dpi,height_dpi))
print('size is %0.f %0.f'%(width,height))
print('current DPI is %0.f' % (current_dpi))
#
#######
#
# open the plot data file(s)
# add plot_dataN for each plot_datafileN
#
plot_data=numpy.loadtxt(plot_datafile,dtype=float)
#
#######
#
# graph parameters
#
###
#
# font sizes
#
matplotlib.rcParams.update({'font.size': 48}) #axis numbers
#
title_fontsize=54 #plot title
axis_fontsize=48 #axis labels
annotate_fontsize=48 #annotation
#
###
#
# set up for two y axis
#
fig,left_axis=plot.subplots()
# right_axis=left_axis.twinx()
#
###
#
# plot text
#
title='Dose rate - Bottom plate'
xtitle='Wall thickness [cm]'
ytitle='Dose rate [$\mu$Sv/h]'
#
###
#
# legend
# add linecolorN for each plot_dataN
# add curve_textN for each plot_dataN
#
line_color0='blue' #color
line_color1='orange' #color
line_color2='red' #color
line_color3='green' #color
line_color4='cyan' #color
#
curve_text0='10 wt% $B_4C$' #legend text
curve_text1='30 wt% $B_4C$' #legend text
curve_text2='50 wt% $B_4C$' #legend text
curve_text3='70 wt% $B_4C$' #legend text
curve_text4='90 wt% $B_4C$' #legend text
#
legend_location='lower left' #location of legend on grid
legend_font=42
#
###
#
# annotate
# position of the annotation dependent on axis domain and range
#
annotate_title='T-4045'
annotate_x=23
annotate_y=10000
#
annotate_title2='Air-Glass backfill'
annotate_x2=23
annotate_y2=7000
#
annotate_title3='0 wt% $^{10}B$'
annotate_x3=23
annotate_y3=3000
#
###
#
# axis domain and range
#
xmin=1
xmax=31
#
ymin=1
ymax=15000
#
###
#
# axis ticks
#
xmajortick=5
ymajortick=5000
#
xminortick=1
yminortick=1000
#
###
#
# grid linewidth
#
major_grid_linewidth=2.5
minor_grid_linewidth=2.1
#
major_grid_tick_length=7
minor_grid_tick_length=5
#
###
#
# curve linewidth
#
curve_linewidth=4.0
#
#######
#
# set plot diagnostics
#
###
#
# titles
#
plot.title(title,fontsize=title_fontsize)
left_axis.set_xlabel(xtitle,fontsize=axis_fontsize)
left_axis.set_ylabel(ytitle,fontsize=axis_fontsize)
# right_axis.set_ylabel()
#
###
#
# grid
#
left_axis.grid(which='major',axis='both',linewidth=major_grid_linewidth)
left_axis.grid(which='minor',axis='both',linewidth=minor_grid_linewidth)
#
left_axis.tick_params(axis='both',which='major',direction='inout',length=major_grid_tick_length)
left_axis.tick_params(axis='both',which='minor',direction='inout',length=minor_grid_tick_length)
#
###
#
# axis domain and range
#
plot.xlim(xmin,xmax)
left_axis.axis(ymin=ymin,ymax=ymax)
###
#
# axis ticks
#
left_axis.xaxis.set_major_locator(MultipleLocator(xmajortick))
left_axis.xaxis.set_minor_locator(MultipleLocator(xminortick))
left_axis.yaxis.set_major_locator(MultipleLocator(ymajortick))
left_axis.yaxis.set_minor_locator(MultipleLocator(yminortick))
#
###
#
# log scale option
# xmin,ymin !=0 for log scale
#
#left_axis.set_xscale('log')
left_axis.set_yscale('log')
#
###
#
# annotation
# comment out if not needed
#
left_axis.annotate(annotate_title,xy=(annotate_x,annotate_y),xytext=(annotate_x,annotate_y),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title2,xy=(annotate_x2,annotate_y2),xytext=(annotate_x2,annotate_y2),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title3,xy=(annotate_x3,annotate_y3),xytext=(annotate_x3,annotate_y3),fontsize=annotate_fontsize)
#
#######
#
# plot data
#
left_axis.plot(plot_data[:,0],plot_data[:,1],marker='o',color=line_color0,label=curve_text0,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,3],marker='o',color=line_color1,label=curve_text1,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,5],marker='o',color=line_color2,label=curve_text2,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,7],marker='o',color=line_color3,label=curve_text3,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,9],marker='o',color=line_color4,label=curve_text4,linewidth=curve_linewidth,markersize=20)
left_axis.legend(loc=legend_location,fontsize=legend_font) #legend needs to be after all the plot data
plot.get_current_fig_manager().resize(width,height)
plot.gcf().set_size_inches((0.01*width),(0.01*height))
#
#######
#
# save
#
plot.savefig(title,dpi=current_dpi)
#
#######
#
# plot to screen
#
# plot.show()
#
########################################################################
#
# EOF
#
########################################################################
| [
"borrelli@localhost.localdomain"
] | borrelli@localhost.localdomain |
89c371ab6dc5ad908b6e19a063cb752e11d707e5 | fe8f10e3333954a09d135e159efbf161365033df | /myAssembler | d9cf64bbc2b2d6cc17ce8d5d18b1e2f1d4c29440 | [] | no_license | akachigerry/gerry-repo | b02973d3030cfbbe1f7144c6a6ddfa3b1b136095 | 91b9963c61b7aa97a4aa21f5186390e273e6cfe9 | refs/heads/master | 2021-01-15T14:50:09.511895 | 2018-08-12T05:34:52 | 2018-08-12T05:34:52 | 99,695,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,063 | #!/usr/bin/python
import sys, getopt
import itertools
from itertools import imap, starmap, izip
from multiprocessing import Pool
from multiprocessing import Process, Queue, cpu_count, sharedctypes, Manager
from multiprocessing.dummy import Pool as ThreadPool
import inspect
import random
import counter
import re
import operator
from ctypes import *
from multiprocessing.sharedctypes import Array
from collections import deque
def input_arg(argv):
FastqFile = ''
WordSize = ''
try:
opts, args = getopt.getopt(argv,"hf1:f2:k:rl:")
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'my_Assembler.py -f1 <FastqFile1> -f2 <FastqFile2> -k <WordSize>'
sys.exit()
elif opt in ("-f1"):
FastqFile1 = arg
elif opt in ("-f2"):
FastqFile2 = arg
elif opt in ("-k"):
WordSize = arg
input_arg(sys.argv[1:])
linecnt = 0
qualst = []
fastq_inp = open(sys.argv[2])
readcnt = 0
#define a dictionary with reads as keys and count of read occurences as values
reads=dict()
k=int(sys.argv[6])
#define a node class which holds the sink(s) of a source.
#there are two label attributes for this class because no source should
#have more than two sinks. This requirement is necessary in order to
#simplify the task of finding the Eulerian path.
class Node:
def __init__(self, par):
self.label1 = par
self.label2 = ''
self.indegree1 = 0
self.indegree2 = 0
def prep_reads(reads):
readlst = []
for i in xrange(len(reads)):
read = reads[i].rstrip('\n')
if ''.join(sorted(set(list(read)))) == 'ACGT':
readlst.append(read)
continue
if ''.join(sorted(set(list(read)))) == 'ACGNT':
read1 = ''
for i in xrange(len(read)):
if read[i] == 'N':
read1+='A'
else:
read1+=read[i]
readlst.append(read1)
return readlst;
def read_index_pair(read):
looplen = len(read)-(k+1)
loop_range = range(looplen)
readlst = [read]*looplen
return zip(readlst, loop_range);
def create_words(read):
looplen = len(read)-(k+1)
return [(read[x:x+k], read[x+1:x+1+k]) for x in xrange(looplen)];
#round count of outgoing edges per source word to the nearest multiple of 5.
def round_mult5(value):
cnt = int(5 * round(float(value)/5))
return cnt;
#generate freq of use numbers of source-to-sink outword connections, counted above.
#This is necessary to estimate the least connection size which is most frequent.
def edge_freq(x):
return x.indegree;
def class2list(x):
if isinstance(entire_graph[x], list) == False:
entire_graph[x] = [entire_graph[x]]
def suffixArray(s):
#Given T return suffix array SA(T). We use Python's sorted
#function here for simplicity, but we can do better.
# Empty suffix '' plays role of $.
satups = sorted([(s[i:], i) for i in xrange(0, len(s)+1)])
# Extract and return just the offsets
return map(lambda x: x[1], satups)
def bwt(t):
#Given T, returns BWT(T), by way of the suffix array.
bw = []
for si in iter(suffixArray(t)):
if si == 0:
bw.append('$')
else:
bw.append(t[si-1])
return bw;
def rankBwt(bw):
#Given BWT string bw, returns a parallel list of B-ranks. Also
#returns tots, a mapping from characters to # times the
#character appears in BWT.
tots = dict()
seen = dict()
grp = 0
lastchar = ''
for c in iter(bw):
if c not in seen.keys():
grp=1
seen[c] = grp
tots[c+str(grp)] = 1
lastchar = c
if c in seen.keys() and c == lastchar:
tots[c+str(grp)]+=1
lastchar = c
if c in seen.keys() and c != lastchar:
grp = seen[c]+1
tots[c+str(grp)] = 1
lastchar = c
seen[c]+=1
return tuple(tots.items());
def firstCol(tots):
#Return a map from characters to the range of cells in the first
#column containing the character.
first = {}
startpos = 0
for c, count in sorted(iter(tots)):
if c[0] in first.keys():
startpos+=count
if c[0] not in first.keys():
first[c[0]] = startpos
startpos+=count
return first
def reverseBwt(bw, first):
rowi = 0
t = "$"
seen = ''
bwranks = []
for c in iter(bw):
if c in seen:
rank = seen.count(c)
bwranks.append((c, rank))
seen+c
if c not in seen:
rank = 0
bwranks.append((c, rank))
seen+c
while bw[rowi][1] != '$':
c = bw[rowi]
t = c + t
rowi = first[c]+bwranks[rowi][0]
return t;
def uncompress(tup):
#step1:reproduce a bwt string from the compressed dictionary form
src_bw = ''
for innertup in iter(tup):
c = innertup[0][0]*innertup[1]
src_bw += c
#step2: reproduce original sting from bwt string
first_dic = firstCol(tup)
string = reverseBwt(src_bw, fisrt_dic)
return string;
#next, generate contig
def contig_gen(source):
#start building contigs from source strings with only outgoing edges
contig = source
current = source
for x in xrange(len(graph.keys())):
if current in graph.keys():
#pick next node(s) to be traversed (label1, then label2)
if len(graph[current].label1) > 0:
nxt = graph[current].label1
else:
print contig
return contig;
#test if current could be start of a repeat loop by checking if it has
#greater than or equal to two edge unit connections to its label1 sink
looptest = 0
if int(round(graph1[current].indegree1/depth)) >= 2:
looptest+=1
break
if looptest == 0:
#check label1 (i.e nxt) of the source 'current' for potential of being a source.
#This potential depends on the sink having outgoing connection to sink(s).
newcurrent = 0
if nxt in graph.keys():
contig+=nxt[-1]
graph[current].label1 = ''
current = nxt
nxt = graph[current].label1
continue
else:
print contig
return contig;
if looptest > 0:
#check if the source word named 'current'
#is the start of a repeat loop block. Do this by checking if number
#of its connection to its first sink is greater than double the depth.
loopstart = current
cnt = 0
repeatcnt = 0
#create a loop through source-sink edges in order to confirm the repeat sequence.
#The number of passes through the loop should be determined from the
#max number of repeat monomers(N) obtainable in a plant or animal genome
#and the k-word length (k) as follows: N-(k+2). The result is approximated to the
#closest higher integer.
yesRepeat = 0
for x in xrange(400):
if nxt in graph.keys():
#if loopstart is equal to nxt of next sink
if loopstart == graph[nxt].label1:
yesRepeat+=1
contig+=nxtlst[i][-1]
source = nxt
graph[current].label1=''
numRepeat = int(round(graph[source].indegree1/depth))+1
contig = contig*numRepeat
#delete the 'loopstart' sink of nxt before continuing contig extension from main loop.
#this will prevent potential of unnecessarily trasversing the repeat monomer again given that
#the entire repeat sequence has already been estimated from its monomer.
current = source
nxt = graph[current].label1
break
else:
print contig
return contig;
if yesRepeat == 0:
#else, make the first sink which is in turn a source of some other sink(s),
#the new current and continue through the (400X) loop.
if nxt in graph.keys():
if len(graph[nxt].label1) > 0:
contig+=nxt[-1]
graph[current].label1=''
current = nxt
else:
print contig
return contig;
else:
print contig
return contig;
return contig;
def tuple2dict(item):
dic={}
source = item[0][0]
sink = item[0][1]
dic.setdefault(source, {})[sink]=item[1]
return dic;
def wordCompress(tup):
source_bwt = bwt(tup[0])
source_bwt = rankBwt(source_bwt)
sink_bwt = bwt(tup[1])
sink_bwt = rankBwt(sink_bwt)
compdict[(source_bwt, sink_bwt)] = wordlst[tup]
#define function to get input from queue and put output to it.
def fun(f, X, q_in, q_out):
while True:
itm = q_in.get()
if itm is None:
break
q_out.put(f(itm))
def parmap(f, X, nprocs=cpu_count()):
q_in = Queue()
q_out = Queue()
proc = [Process(target=fun, args=(f, X, q_in, q_out)) for n in xrange(nprocs)]
for n in xrange(nprocs):
proc[n].daemon = True
proc[n].start()
sent = list(q_in.put(itm) for itm in X)
[q_in.put(None) for i in xrange(nprocs)]
res = list(q_out.get() for i in xrange(len(sent)))
[proc[n].join() for n in xrange(nprocs)]
print len(res)
return res;
#instance of class takes in a generator (which yields k, v tuples) and builds a dictionary-like class object on the fly.
#This is a more memory-efficient way of building a dictionary
class LazyDict():
"""A dictionary built on demand from an iterator."""
def __init__(self, iterator):
self._dict = dict(iterator)
self._iterator = iterator
def __getitem__(self, key):
if key in self:
return self._dict[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
return self._dict__setitem__(key, value)
def keys(self):
return self._dict.keys()
def iterkeys(self):
return self._dict.iterkeys()
def values(self):
return self._dict.values()
def itervalues(self):
return self._dict.itervalues()
def items(self):
return self._dict.items()
def iteritems(self):
return self._dict.iteritems()
def update(self, iterator):
return self._dict.update(self._iterator)
#__contain__ is an abstract base class implementation of
#'in' used in __getitem__ method above
def __contains__(self, key):
if key in self._dict:
return True
else:
return False
fastq_inp1 = open(sys.argv[2])
fastq_inp2 = open(sys.argv[4])
fastq1 = itertools.islice(fastq_inp1, 1, None, 4)
fastq2 = itertools.islice(fastq_inp2, 1, None, 4)
del fastq_inp1, fastq_inp2
fastqall = []
fastqall.extend(fastq1)
fastqall.extend(fastq2)
print len(fastqall)
del fastq2, fastq1
chunksize = int(round(len(fastqall)/cpu_count()))
print chunksize
print cpu_count()
def chunking(lst):
z=0
chunklst = []
for i in xrange(cpu_count()+1):
chunk = list(itertools.islice(lst[z:], chunksize))
chunklst.append(chunk)
z+=chunksize
if z >= len(lst) - chunksize:
if z >= len(lst):
return chunklst;
else:
chunklst.append(lst[z:])
return chunklst;
return chunklst;
fastqall_chunk = chunking(fastqall)
print len(fastqall_chunk[0])
print len(fastqall_chunk)
print sys.getsizeof(fastqall_chunk)
pool = Pool(cpu_count())
wordlst=[]
for result in pool.imap(prep_reads, fastqall_chunk):
wordGen = (create_words(read) for read in result)
resultLen=len(result)
print resultLen
for i in xrange(resultLen):
wordlst.extend(wordGen.next())
pool.close()
pool.join()
del fastqall_chunk
#define the function 'trim' to remove source words having total of 2 or less edge connections to a sink
def trim(itm):
if itm[1] > 2:
return itm;
print "size of wordlst is", sys.getsizeof(wordlst)
#break fastq files into chunks, then loop throuugh list of chunks creating words for each loop.
#a dynamic chunking is utilised to maximize available system memory and processes for speedy execution.
wordlst=counter.Counter(wordlst).most_common()
print wordlst[0:3]
pool = Pool(20)
wordlst1 = pool.map(trim, wordlst)
pool.close()
pool.join()
print len(wordlst1)
del wordlst
src2sink_cnt = []
allsinks = []
graph = {}
looptracker=0
for i in xrange(len(wordlst1)):
looptracker+=1
item = wordlst1[i]
if item != None:
source = item[0][0]
sink = Node(item[0][1])
if source not in graph.keys():
allsinks.append(item[0][1])
sink.indegree1 += int(item[1])
graph[source] = sink
src2sink_cnt.append(int(item[1]))
continue
else:
if graph[source].label2 == '':
allsinks.append(item[0][1])
graph[source].label2 += item[0][1]
graph[source].indegree2 += int(item[1])
src2sink_cnt.append(int(item[1]))
else:
print 'maximum number(2) of unique sinks allowed for each source has been reached'
print graph.items()[0:3]
print len(src2sink_cnt)
def merge_dic(diclst):
dic=dict()
cnt=0
for d in diclst:
if cnt == 0:
dic.update(d)
else:
dickeys = dic.keys()
next_dickeys = d.keys()
common_dickeys = list(set(dickeys).intersection(set(next_dickeys)))
diff_dickeys = list(set(dickeys).difference(set(next_dickeys)))
if len(common_dictkeys) == 0:
dic.update(d)
else:
for key in iter(diff_dickeys):
unitdic = {key:diff_dickeys[key]}
dic.update(unitdic)
#for set of keys common between new dictionary and current merged dictionary
for key in iter(common_dictkeys):
next_val_of_val = d[key].values()
next_key_of_val = d[key].keys()
if next_value != dic[key]:
graph2[tupl]+=next_value
#generate a dictionary from the list of source-sink tuples(wordlst)
#the list should have source strings as key. Values for each of these
#must hold all unique sinks of a source
#To fulfill the above, define a function holds the tuple in
#a dictionary as follows:
#source as main dictionary key
#all sinks of the source as a sub-dictionary consisting of each unique
#sink of the source as key and a number, reflecting the sink's frequeny
#as value.
chunksize = int(round(len(src2sink_cnt)/cpu_count()))
pool = Pool(cpu_count())
src2sink_cnt = pool.imap_unordered(round_mult5, src2sink_cnt, chunksize)
pool.close()
pool.join()
src2sink_cnt = counter.Counter(src2sink_cnt)
print len(src2sink_cnt)
kmatchFreq_dict = LazyDict(src2sink_cnt)
#sort above dictionary
kmatchFreq_dict = sorted(kmatchFreq_dict.items(), key=operator.itemgetter(1), reverse=True)
print kmatchFreq_dict
#The most frequent kword matches (number of source-to-sink connections)
#should represent the approximate sequencing depth.
kmatchFreq_dict = LazyDict(src2sink_cnt)
depth = kmatchFreq_dict[0][0]
del src2sink_cnt
print depth
#Now to the next stage: trace a Eularian path through the graph
#first off, generate list of contig start strings which will be used as input.
#after removing edges potentially arising from seq error (trim function),
#source words with more than one unique sink could result from one of two situations:
#1. existence of alleles of same locus
#2. existence of genome segment copies or paralogs on different loci of same chromosome
#the first case is not tolerable for contig generation as it could result in contigs
#containing homologous haplotype segments in adjacent positions. For this reason,
#the graph is prunned to remove instances of the first case.
#the second case is tolerable iff the difference between indegree and outdegree for the node is not greater than unity (1).
cnt = 0
cntmin = -1
prevkeylst = []
startsources = deque()
secondBranchStartList = []
graphkeys=graph.iterkeys()
for key in graphkeys:
#define a 'futre key i.e the sink to the current source or the next source after the current (futkey)'
#to allow a more accurate check of whether node split points (outgree - indgree =1)
#indicate potential heterozygotes or not
futkey = graph[key].label1
src_in_sinkcnt = allsinks.count(futkey)
src_in_sinkcnt1 = allsinks.count(key)
if futkey in graph.keys():
if graph[futkey].label2 != '':
currentEdge = graph[key].indegree1
futureEdge1 = graph[futkey].indegree1
futureEdge2 = graph[futkey].indegree2
#to check if a source connection to multiple sinks is due to het alleles:
#1. their must be 2 sinks connected to the source
#2. number of edges (connections) to the source must be approximately equal to sum for both sinks
#3. number of edges must be approximately half of the estimated depth
if int(round(futureEdge1/graph[futkey].indegree2)) == 1 and \
int(currentEdge/futureEdge1) >= 1.5 and \
int(currentEdge/futureEdge1) <= 2.5:
#the seconnd sink is eliminated if above test for heterozygosity is confirmed. While not useful for strict genome assembly as here,
#the above heterozygosity test would be invaluable for assemmbly of haplotypes and for reference-free variant extraction.
graph[futkey].label2 = ''
#Define potential contig starts. These would be source words not found in sinklist and
#label2 of branch nodes. This label2 must however be a source itself
if (src_in_sinkcnt1 == 0 and graph[key].label2 == '') or (src_in_sinkcnt1 == 1 and graph[key].label2 != ''):
#use appendleft property of deque to ensure that the starting word for the genome (source not found in sink list)
if (src_in_sinkcnt1 == 0 and graph[key].label2 == ''):
startsources.appendleft(key)
if (src_in_sinkcnt1 == 1 and graph[key].label2 != ''):
if graph[key].label2 in graph.keys():
startsources.append(graph[key].label2)
secondBranchStartList.append(graph[key].label2)
del graphkeys
print len(startsources)
#define function to put input data into queue, define specific number
#of processes for analysing each component of this data
#(according to a pre-defined function) and output the result in queue
contiglst = []
pool=Pool(cpu_count())
if cpu_count() >= len(startsources) or int(round(len(startsources)/cpu_count())) < 2:
for result in pool.imap_unordered(contig_gen, startsources):
contigs =[result, len(result)]
contiglst.append(result)
pool.close()
pool.join()
if int(round(len(startsources)/cpu_count())) >= 2:
for result in pool.imap_unordered(contig_gen, startsources, int(round(len(startsources)/cpu_count()))):
contigs = [result, len(result)]
contiglst.append(result)
pool.close()
pool.join()
print "length of contiglst before contig lengthning", len(contiglst)
#go through the contig list and search for branching nodes(sources) and use these as
#inter-contig connections to lengthen contigs
newcontiglst=[]
allcontig = '_'.join(contiglst)
for i in xrange(len(secondBranchStartList)):
longcontig = ''
#branchnode will be the entire length of a secondBranchStartList item except for last base.
#this strategy will ensure that the source word for the branching label2 sinks, from which secondBranchStartList items
#were populated, can be found and the branch contig extended toward the 5' direction.
branchnode = secondBranchStartList[i][0:-1]
if branchnode in allcontig:
branchstart = allcontig.find(branchnode)
#avoid matches at start of contig as they are branch node. interest is
#in longer contigs that incorporate source of branch node start (label2)
#somewhere along its length.
if branchstart == 0:
newcontiglst.append(secondBranchStartList[i])
continue
addcontig = allcontig[0:branchstart-1].split('_')[-1]
longcontig = addcontig + secondBranchStartList[i]
newcontiglst.append(longcontig)
else:
newcontiglst.append(secondBranchStartList[i])
with open("contig_out2", 'w') as outfile:
contiglen=[]
for itm in newcontiglst:
outfile.write('\t'.join([itm, len(itm)]))
contiglen.append(len(itm))
#calculate N50
halfassem = int(round(sum(contiglen)/2))
n50 = 0
contiglist = sorted(contiglen, reverse=True)
for num in iter(contiglist):
if n50 < halfassem:
n50+=num
else:
break
print "N50 =", n50
| [
"noreply@github.com"
] | akachigerry.noreply@github.com | |
9c7e3a54253670e920ac976c5c6efa681e7cb585 | 37f19a6a4369b5527253ffe350b9e8263f151f1e | /cDCheck/cDCheck.py | 6c7e01372209eecbe43ba5f234de7c671abf15e8 | [
"MIT"
] | permissive | csm10495/cDCheck | a63897a5665605e5da7622e6ec9d96d31059cdc6 | 5e19c938ad78a79dfad6f4a78fdff8c557dc49ce | refs/heads/master | 2021-01-23T06:54:20.623379 | 2015-05-03T20:13:37 | 2015-05-03T20:13:37 | 34,648,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,304 | py | #cDCheck: A Python script to check for, and delete duplicate files in a directory
#(C) Charles Machalow - MIT License
import os #for directory access
import sys #for args
import threading #for threading
import re #for regex matching
#processes the files in range
def processRange(r1, r2, file_dict, dup_file_dict, files):
for i in range(r1, r2):
#hash as binary
h = hash(open(files[i], "rb").read())
if h in file_dict:
#print("adding to dup")
if h in dup_file_dict:
dup_file_dict[h].append(files[i])
else:
dup_file_dict[h] = [files[i], file_dict[h]]
else:
file_dict[h] = files[i]
#print("adding to file_dict")
#alerts the user to duplicates
def callOutDups(dup_file_dict):
for i in dup_file_dict:
print("Duplicate file detected with hash: " + str(i))
print("Instances:")
for j, k in enumerate(dup_file_dict[i]):
print(str(j) + ": " + str(k))
#keep going to valid input
while True:
c = input("Choose a number for the file you would like to maintain. Other options are:\ns to skip this file\nr to delete all files that DON'T contain a regex match\n")
#break character
if str(c).lower() == "s":
break
#regex character
if str(c).lower() == "r":
r = input("Regex: ")
try:
reg = re.compile(str(r))
except Exception:
print("Unable to compile regex. Please try again.")
continue
for z in range(0, j + 1):
#delete all that don't match regex
if not reg.search(dup_file_dict[i][z]):
os.remove(dup_file_dict[i][z])
print("Deleted files that didn't match regex: " + str(r))
break
try:
c = int(c)
except ValueError:
print("Invalid input, choose one file (by number) to maintain")
continue
#make sure given int is valid
if c >= 0 and c <= j:
print("Performing requested action. Maintaining file " + str(c) + ". Deleting others.")
for z in range(0, j + 1):
if z != c:
os.remove(dup_file_dict[i][z])
break
else:
print("Invalid input, choose one file (by number) to maintain")
#does the iteration work
def checkPath(path, thread_count=4):
file_dict = {}
dup_file_dict = {}
file_count = 0
files = []
print("Processing files in directory: " + path)
ldir = os.listdir(path)
for i in ldir:
f_path = os.path.join(path, i)
if os.path.isfile(f_path):
file_count+=1
files.append(f_path)
print("Files found: " + str(file_count))
threads = []
f_slice = []
#handle if we can do more threads than files
if (thread_count > file_count):
thread_count = file_count
#starting per thread
per_thread = int(file_count / thread_count)
#set all threads
for i in range(thread_count):
f_slice.append(per_thread)
#remainder number of files that haven't been distributed to threads
extra_files = file_count - (per_thread * thread_count)
#add remainder to threads as equally as possible
for i in range(len(f_slice)):
if extra_files == 0:
break
f_slice[i]+=1
extra_files -= 1
#starts a thread_count threads
#fill threads list with threads that we can start
#f_slice is the number of files each thread should hash
counter = 0
for i in range(len(f_slice)):
s1 = counter
counter = counter + f_slice[i]
t = threading.Thread(target=processRange, args=(s1, counter, file_dict, dup_file_dict, files))
threads.append(t)
#start all threads
for i in threads:
i.start()
#join all threads
for i in threads:
i.join()
print("Done Processing Directory\n")
print("Found " + str(len(dup_file_dict)) + " files with duplicates")
callOutDups(dup_file_dict)
#entrant function
def main():
#make sure we have enough args
if len(sys.argv) >= 2:
print("Please do not remove files from the given directory while this is running")
path = sys.argv[1]
#make sure path exists
if os.path.exists(path):
if (len(sys.argv) == 3):
try:
t = int(sys.argv[2])
checkPath(path, t)
print("cDCheck Completed Successfully!")
except ValueError:
print("Number of threads is not an integer, please make it one and try again")
if (len(sys.argv) == 2):
checkPath(path)
print("cDCheck Completed Successfully!")
else:
print("Given path does not exist, please check and try again")
else:
print("Usage: python cDCheck.py folderpath <number of threads, defaults to 4>")
if __name__ == '__main__':
main() | [
"csm10495@gmail.com"
] | csm10495@gmail.com |
8b54cfc2b902f28ab710f11c95b3372f1a69a143 | bbd6af23c1c56bf090ddaa46ae639d128f91c5eb | /ourproj/ourproj/spiders/ourspider.py | 9965ea9c65a6dfbd60a27ecba67c4e360d3d92d2 | [
"BSD-3-Clause"
] | permissive | nichgaun/scrapy | 8f75bdfc713ec845e6f7d5666f28f4756fa1f819 | 493918742d8359583bdba950ad1aad43123fc394 | refs/heads/master | 2020-03-09T07:13:56.572918 | 2018-04-16T22:37:38 | 2018-04-16T22:37:38 | 128,659,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.lxmlhtml import LxmlLinkExtractor
class OurspiderSpider(CrawlSpider):
name = 'ourspider'
allowed_domains = ['https://en.wikipedia.org']
start_urls = ['https://en.wikipedia.org/wiki/Lewis_Tappan_Barney']
rules = (Rule(LxmlLinkExtractor(allow=()), callback='parse_obj', follow=True),)
#def start_requests(self):
# yield scrapy.Request('https://en.wikipedia.org', meta={'bindaddress': ('1234:5678:111::0a', 0)})
# def parse(self, response):
# pass
def parse_obj(self,response):
for link in LxmlLinkExtractor(allow=()).extract_links(response):
print(link);
# item = someItem()
# item['url'] = link.url
| [
"paulctroost@gmail.com"
] | paulctroost@gmail.com |
b6979f9099ddfa64a87e7398c649f7d22aafcedc | 603954bafbe9331e5f07b447b1147887ed8c9c39 | /polls/serializers.py | bb29f2e597cdcad26f56d70e2a169702e5c71bac | [] | no_license | tanmayawasekar/python-practice | 8bc8990e03805a532bed9939edbf4d65cbd106de | 19ed39a23f4fff64a53c66a0580791e0bebf894e | refs/heads/master | 2021-04-26T23:57:07.090753 | 2018-03-29T06:46:56 | 2018-03-29T06:46:56 | 123,884,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | from .models import Choice
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Choice
fields = ('choice_text', 'integer_field', 'votes')
| [
"tanmay.awasekar@coverfoxmail.com"
] | tanmay.awasekar@coverfoxmail.com |
96c01056fd75ce8b7db7ce7ccb777c86f47c27d1 | fcf73ee47db9df86631753a7faaa8c31ba7def16 | /hawkentracker/alembic/versions/55a540177a3_explict_opt_out_status.py | 2936d1ced640074545157dcbcd57a9882a3bc569 | [
"MIT"
] | permissive | ashfire908/hawken-tracker | 4079ea3df9603bc4216b5a1fdf95bf82fdbe1987 | 4db02a63237100da43f3b8f44a6ebd862cf5d8cc | refs/heads/master | 2021-03-22T04:55:57.822803 | 2018-01-02T20:48:40 | 2018-01-02T20:48:40 | 42,843,654 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | """Explict opt out status
Revision ID: 55a540177a3
Revises: 276a9c91812
Create Date: 2015-03-30 13:21:10.374065
"""
# revision identifiers, used by Alembic.
revision = "55a540177a3"
down_revision = "276a9c91812"
from alembic import op
import sqlalchemy as sa
def upgrade():
# Allow for nullable opt out
op.alter_column("players", "opt_out", nullable=True)
# Convert false opt out to null
players = sa.sql.table("players",
sa.Column("opt_out", sa.Boolean, nullable=True)
)
op.execute(
players.update().where(players.c.opt_out == False).values({"opt_out": None})
)
def downgrade():
# Convert null opt out to false
players = sa.sql.table("players",
sa.Column("opt_out", sa.Boolean, nullable=True)
)
op.execute(
players.update().where(players.c.opt_out == None).values({"opt_out": False})
)
# Disallow for nullable opt out
op.alter_column("players", "opt_out", nullable=False)
| [
"andrew.hampe@gmail.com"
] | andrew.hampe@gmail.com |
eef17bf33c968687f21b8f25b314038cbe056676 | c9923e7aef1ee6df8684a0604114033d0cef6d78 | /cnn3d/training/cutmix.py | 0eb91e5a83c6b1075e0f0c42e984706c39149d1e | [
"MIT"
] | permissive | poissonyzr/DFDC | 4ca1fb936829ddfa411e8d92e36dac3c27f3d75b | b08c31781406e303394642c2af1ab59222115683 | refs/heads/master | 2023-05-13T15:37:58.927522 | 2021-06-01T06:55:00 | 2021-06-01T06:55:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,532 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def rand_bbox_2d(size, lam):
# lam is a vector
B = size[0]
assert B == lam.shape[0]
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = (W * cut_rat).astype(np.int)
cut_h = (H * cut_rat).astype(np.int)
# uniform
cx = np.random.randint(0, W, B)
cy = np.random.randint(0, H, B)
#
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def rand_bbox_3d(size, lam):
# lam is a vector
B = size[0]
assert B == lam.shape[0]
T = size[2]
W = size[3]
H = size[4]
cut_rat = (1. - lam) ** (1/3.)
cut_t = (T * cut_rat).astype(np.int)
cut_w = (W * cut_rat).astype(np.int)
cut_h = (H * cut_rat).astype(np.int)
# uniform
ct = np.random.randint(0, T, B)
cx = np.random.randint(0, W, B)
cy = np.random.randint(0, H, B)
#
bbt1 = np.clip(ct - cut_t // 2, 0, T)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbt2 = np.clip(ct + cut_t // 2, 0, T)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbt1, bbx1, bby1, bbt2, bbx2, bby2
def cutmix_apply(batch, alpha):
batch_size = batch.size(0)
lam = np.random.beta(alpha, alpha, batch_size)
lam = np.max((lam, 1.-lam), axis=0)
index = torch.randperm(batch_size)
if batch.ndim == 5:
# 3D
t1, x1, y1, t2, x2, y2 = rand_bbox_3d(batch.size(), lam)
for b in range(batch.size(0)):
batch[b, :, t1[b]:t2[b], x1[b]:x2[b], y1[b]:y2[b]] = batch[index[b], :, t1[b]:t2[b], x1[b]:x2[b], y1[b]:y2[b]]
lam = 1. - ((t2 - t1) * (x2 - x1) * (y2 - y1) / float((batch.size()[-1] * batch.size()[-2] * batch.size()[-3])))
elif batch.ndim == 4:
# 2D
x1, y1, x2, y2 = rand_bbox_2d(batch.size(), lam)
for b in range(batch.size(0)):
batch[b, :, x1[b]:x2[b], y1[b]:y2[b]] = batch[index[b], :, x1[b]:x2[b], y1[b]:y2[b]]
lam = 1. - ((x2 - x1) * (y2 - y1) / float((batch.size()[-1] * batch.size()[-2])))
return batch, index, lam
def cutmix_double_apply(batch, labels, alpha):
batch_size = batch.size(0)
lam = np.random.beta(alpha, alpha, batch_size)
lam = np.max((lam, 1.-lam), axis=0)
index = torch.randperm(batch_size)
# 2D - does not support 3D right now
x1, y1, x2, y2 = rand_bbox_2d(batch.size(), lam)
for b in range(batch.size(0)):
batch[b, :, x1[b]:x2[b], y1[b]:y2[b]] = batch[index[b], :, x1[b]:x2[b], y1[b]:y2[b]]
labels['seg'][b, :, x1[b]:x2[b], y1[b]:y2[b]] = labels['seg'][index[b], :, x1[b]:x2[b], y1[b]:y2[b]]
lam = 1. - ((x2 - x1) * (y2 - y1) / float((batch.size()[-1] * batch.size()[-2])))
class MixupBCELoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, y_pred, y_true):
if type(y_true) == dict:
# Training
y_true1 = y_true['y_true1']
y_true2 = y_true['y_true2']
lam = y_true['lam']
mix_loss1 = F.cross_entropy(y_pred, y_true1, reduction='none')
mix_loss2 = F.cross_entropy(y_pred, y_true2, reduction='none')
return (lam * mix_loss1 + (1. - lam) * mix_loss2).mean()
else:
# Validation
return F.binary_cross_entropy_with_logits(y_pred, y_true)
| [
"github@jph.am"
] | github@jph.am |
9114ad8bbda5e123afc7de169cc2028aa55c0ce6 | 9ee5c351e72afafbcb2e85ab2180151789b27624 | /pwa_store_backend/pwas/signals.py | 6c3c4c6a1fcfa90b35f2395406864f245833ce99 | [
"MIT"
] | permissive | nathanhfoster/pwa-store-backend | 7d331343c2a6e5d53bf790569c4718a0961459f7 | 93a4bb1d347a9076b40600a2ec2fac92cada03cf | refs/heads/master | 2023-09-01T06:02:09.885737 | 2021-10-17T19:11:05 | 2021-10-17T19:11:05 | 391,519,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | from django.db.models.signals import post_save
from pwa_store_backend.pwas.models import Pwa, PwaAnalytics
def pwa_post_save_handler(sender, **kwargs):
''' created an instance of PWA Analytics when a new PWA is created '''
instance = kwargs.get('instance')
created = kwargs.get('created', False)
if created:
PwaAnalytics.objects.create(pwa=instance)
| [
"nabinbhusal80@gmail.com"
] | nabinbhusal80@gmail.com |
5147440f426f86123ef2afa942129888aa4c2655 | 50ee312e98af4531330b66d396013cafefae87e7 | /softlearning/environments/gym/mujoco/walker2d_env.py | 3ed2d86cc402c67b34f62fd22a2c1f2eec171a56 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | richardrl/softlearning | 5f38fd136f7bcb95797818ae5261d4058bcfbb13 | 125ee6ee137145947703018e9980d064c94b1666 | refs/heads/master | 2020-04-21T06:27:46.778091 | 2019-02-02T20:36:38 | 2019-02-02T20:36:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,899 | py | import numpy as np
from gym.envs.mujoco import mujoco_env
from gym import utils
DEFAULT_CAMERA_CONFIG = {
'trackbodyid': 2,
'distance': 4.0,
'lookat': (None, None, 1.15),
'elevation': -20.0,
}
class Walker2dEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
forward_reward_weight=1.0,
ctrl_cost_weight=1e-3,
healthy_reward=1.0,
terminate_when_unhealthy=True,
healthy_z_range=(0.8, 2.0),
healthy_angle_range=(-1.0, 1.0),
reset_noise_scale=5e-3,
exclude_current_positions_from_observation=True):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self._healthy_angle_range = healthy_angle_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation)
mujoco_env.MujocoEnv.__init__(self, "walker2d.xml", 4)
@property
def healthy_reward(self):
return float(
self.is_healthy
or self._terminate_when_unhealthy
) * self._healthy_reward
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def is_healthy(self):
z, angle = self.sim.data.qpos[1:3]
min_z, max_z = self._healthy_z_range
min_angle, max_angle = self._healthy_angle_range
healthy_z = min_z < z < max_z
healthy_angle = min_angle < angle < max_angle
is_healthy = healthy_z and healthy_angle
return is_healthy
@property
def done(self):
done = (not self.is_healthy
if self._terminate_when_unhealthy
else False)
return done
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = np.clip(
self.sim.data.qvel.flat.copy(), -10, 10)
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def step(self, action):
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before)
/ self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
costs = ctrl_cost
observation = self._get_obs()
reward = rewards - costs
done = self.done
info = {
'x_position': x_position_after,
'x_velocity': x_velocity,
}
return observation, reward, done, info
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq)
qvel = self.init_qvel + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nv)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
self.viewer.cam.trackbodyid = 2
self.viewer.cam.distance = self.model.stat.extent * 0.5
self.viewer.cam.lookat[2] = 1.15
self.viewer.cam.elevation = -20
| [
"hartikainen@berkeley.edu"
] | hartikainen@berkeley.edu |
83e185e53ee41e521bdd311be71ebf8b7318349e | 05b8143f004c6531a1d24a66888e2b02a41616cf | /mainApp/apis/cinemas_api.py | 905d41498de23e6efa289decd85035190b6c01d9 | [] | no_license | cangmingssir/flask_tpp | 1b0d8f40fd3298789beffca877874dd45d734987 | e6903a47aa2658a105f79c37a30ef5f44a4d1fab | refs/heads/master | 2020-03-19T12:04:37.056215 | 2018-06-17T08:07:48 | 2018-06-17T08:07:48 | 136,493,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,864 | py | # coding:utf-8
from flask import request, session
from flask_restful import Resource, reqparse, fields, marshal_with
from mainApp import dao
from mainApp.models import Cinemas, User, Qx
from mainApp.settings import QX
def check_login(qx):
def check(fn):
def wrapper(*args,**kwargs):
token = request.args.get('token')
if not token:
token = request.form.get('token')
user_id = session.get(token)
loginUser = dao.getById(User,user_id)
if not loginUser:
return {'msg':'请先登录!'}
if loginUser.rights & qx == qx:
return fn(*args,**kwargs)
qxObj = dao.queryOne(Qx).filter(Qx.right==qx).first()
return {'msg':'您没有 {} 权限'.format(qxObj.name)}
return wrapper
return check
class CinemasApi(Resource):
#定义输入字段
parser = reqparse.RequestParser()
parser.add_argument('token')
parser.add_argument('opt',required=True)
parser.add_argument('name',help='电影院名称')
parser.add_argument('city',help='影院城市不能为空')
parser.add_argument('district',help='城市区域不能为空')
parser.add_argument('sort',type=int,default=1)
parser.add_argument('orderby',default='hallnum')
parser.add_argument('limit',type=int,default=10)
parser.add_argument('page',type=int,default=1)
#定义输出字段
cinemas_fields = {
'id':fields.Integer,
'name':fields.String,
'city':fields.String,
'district':fields.String,
'address':fields.String,
'phone':fields.String,
'score':fields.Float,
'hallnum':fields.Integer,
'servicecharge':fields.Float,
'astrict':fields.Integer,
'flag':fields.Boolean,
'isdelete':fields.Boolean
}
out_fields={
'returnValue':fields.Nested(cinemas_fields)
}
def selectCinemas(self,cinemas):
args=self.parser.parse_args()
sort = args.get('sort')
cinemas = cinemas.order_by(('-' if sort ==1 else '')+args.get('orderby'))
pager = cinemas.paginate(args.get('page'),args.get('limit'))
return {'returnValue':pager.items}
@marshal_with(out_fields)
def get(self):
#验证请求参数
args=self.parser.parse_args()
opt =args.get('opt')
city = args.get('city')
district = args.get('district')
#用于查询某城市区域的影城信息
if opt == 'cityAndDistrict':
if city and district:
cinemas=dao.queryOne(Cinemas).filter(Cinemas.city==city,
Cinemas.district==district)
if not cinemas.count():
return {'msg':'该地区没有电影院'}
self.selectCinemas(cinemas)
return {'msg':'城市和城区区域不能为空'}
#用于查询某一城市的影城信息
elif opt == 'city':
if city:
cinemas=dao.queryOne(Cinemas).filter(Cinemas.city==city)
if not cinemas.count():
return {'msg':'该城市没有电影院'}
self.selectCinemas(cinemas)
return {'msg':'搜索城市不能为空'}
#查询所有的影城信息
else:
cinemas=dao.queryAll(Cinemas)
self.selectCinemas(cinemas)
@check_login(QX.DELETE_QX)
def delete(self):
cid = request.args.get('cid')
cinemas = dao.getById(Cinemas,cid)
if not cinemas:
return {'msg':'您删除的影院不存在'}
if not dao.delete(cinemas):
return {'msg':'删除失败'}
return {'msg':'删除成功'}
def post(self):
pass
| [
"mu_tongwu@163.com"
] | mu_tongwu@163.com |
6918cee393ce45548af443bf75a05328fc982ad2 | 96c3ca6ad097a024c6d7eabb79d715aca0d1ab66 | /Auth/migrations/0003_auto_20210626_1422.py | 1027129046e4682023e1972313d566602b210a8c | [] | no_license | aimperatori/RFID-API2 | 937cd4e29787688da962f21d95bae67fcad9ca07 | addf00fd1c9efcbea62910a4126e678e00d4cbdd | refs/heads/main | 2023-06-04T15:54:53.661517 | 2021-06-26T16:59:16 | 2021-06-26T16:59:16 | 380,553,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # Generated by Django 3.2.4 on 2021-06-26 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Auth', '0002_alter_auth_created_datetime'),
]
operations = [
migrations.RemoveField(
model_name='auth',
name='id',
),
migrations.AlterField(
model_name='auth',
name='rdifCode',
field=models.CharField(editable=False, max_length=16, primary_key=True, serialize=False),
),
]
| [
"aimperatori@ucs.br"
] | aimperatori@ucs.br |
9cd9d566ee77d3702c5658462da130d13dcc8e87 | 2789c64d06b08722528c4ee22169020dc7c22c4d | /karalamalar_src_idi/ilkmodul.py | d56052a0ccf400f71faa2c896129d55074e74a1d | [] | no_license | kadirkoyici/pythonkursu | d209f992a50c650cc6c79132a9ec41f0e4bd8f81 | f425a908353749791480624cbb26e1a409e805bb | refs/heads/master | 2021-01-19T10:10:58.906420 | 2015-03-09T06:50:32 | 2015-03-09T06:50:32 | 28,480,514 | 0 | 1 | null | 2015-01-06T08:44:51 | 2014-12-25T11:57:39 | Python | UTF-8 | Python | false | false | 179 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
program = "Python"
surum = "2.x"
def faktoriyel(sayi):
fak = 1
for i in range(sayi):
fak= fak * (i+1)
return fak | [
"kadirkoyici@hotmail.com"
] | kadirkoyici@hotmail.com |
ab69c6dd82887d2be098cbc3924977fc48cae5df | b367245af0a5e4284afab9f7177c5b136b41283c | /swa-paddle/models/vgg.py | a6858c389dafae27dc7b8100769bfad2f5e111f5 | [
"BSD-2-Clause"
] | permissive | ncpaddle/SWA-paddle | 0544c38a1eb50c9bcf0db460b91a4f0c7beec48c | 48c2642294062e2cf6f0d21c5a1cd30e1002b1f1 | refs/heads/master | 2023-08-24T02:09:06.385258 | 2021-11-03T02:05:35 | 2021-11-03T02:05:35 | 419,941,948 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,386 | py | """
VGG model definition
ported from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
"""
import math
import paddle
import paddle.nn as nn
from paddle.vision import transforms
__all__ = ['VGG16', 'VGG16BN', 'VGG19', 'VGG19BN']
def make_layers(cfg, batch_norm=False):
layers = list()
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2D(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2D(in_channels, v, kernel_size=3, padding=1, weight_attr=nn.initializer.KaimingNormal())
if batch_norm:
layers += [conv2d, nn.BatchNorm2D(v), nn.ReLU()]
else:
layers += [conv2d, nn.ReLU()]
in_channels = v
return nn.Sequential(*layers)
cfg = {
16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
class VGG(nn.Layer):
def __init__(self, num_classes=10, depth=16, batch_norm=False):
super(VGG, self).__init__()
self.features = make_layers(cfg[depth], batch_norm)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 512, bias_attr=True),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512, bias_attr=True),
nn.ReLU(True),
nn.Linear(512, num_classes, bias_attr=True),
)
def forward(self, x):
x = self.features(x)
x = paddle.reshape(x, [x.shape[0], -1])
# x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Base:
base = VGG
args = list()
kwargs = dict()
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
class VGG16(Base):
pass
class VGG16BN(Base):
kwargs = {'batch_norm': True}
class VGG19(Base):
kwargs = {'depth': 19}
class VGG19BN(Base):
kwargs = {'depth': 19, 'batch_norm': True}
| [
"845749146@qq.com"
] | 845749146@qq.com |
25b944adffbb62adf37669bd50cacc9ad4b63b47 | c6c61add7e33535e16bc2d6c53cc482ecf11262e | /manage.py | b7a8e3f0aadadd79d752e4aa271f02f5fabce82c | [] | no_license | dfreidin/DjangoGameNight | 300e95eb849f3588a72e415837fffa5091ca8ba3 | 2880c09f3d45a74089823d1abde1401ffa775781 | refs/heads/master | 2020-03-07T03:09:36.094097 | 2018-04-14T21:28:44 | 2018-04-14T21:28:44 | 127,227,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "GameNight.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"36039896+dfreidin@users.noreply.github.com"
] | 36039896+dfreidin@users.noreply.github.com |
140384afde407034a54ba2db872c23687b2803b5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /exeY2wDuEW4rFeYvL_18.py | df232bc3446da8ba44e538db19a12468c1434bda | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | """
Create an ordered 2D list (matrix). A matrix is ordered if its (0, 0) element
is 1, its (0, 1) element is 2, and so on. Your function needs to create an a ×
b matrix. `a` is the first argument and `b` is the second.
### Examples
ordered_matrix(5, 5) ➞ [
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25]
]
ordered_matrix(1, 1) ➞ [[1]]
ordered_matrix(1, 5) ➞ [[1, 2, 3, 4, 5]]
### Notes
* `a` is the height of the matrix (y coordinate), and `b` is the width (x coordinate).
* `a` and `b` will always be positive, and the matrix will always be square shaped (in each row are the same amount of columns).
* `a` and `b` are integers.
"""
def ordered_matrix(a, b):
return [[b*i+j for j in range(1, b+1)] for i in range(a)]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
64e20e5645269edc49ca0d18451cd287e6d86f65 | 7ff9de453f53b658d13896bb0c376d67489145a7 | /logics/exercise.py | 2a82386f9fc82b4d34d67b3a2bfb7ac928f3b22d | [
"MIT"
] | permissive | danielkpodo/python-zero-to-mastery | aa4851fd0bfe1f0bfa4191fa141fa9551fd7c7a9 | d39468f48211bc82e4e2613745d9107d433e05af | refs/heads/master | 2022-11-21T10:55:54.776490 | 2020-07-19T15:07:35 | 2020-07-19T15:07:35 | 275,909,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | # sum up the total of this list using looping
from functools import reduce
items = [1, 3, 4, 5, 6, 7, 7, 5, 7, 6, 6, 3]
total = 0
for num in items:
total += num
print(total)
print(sum(items))
# Higher order functions
def summer(accumulator, initial):
return accumulator + initial
result = reduce(summer, items)
print(result)
| [
"kpododanielnarh@gmail.com"
] | kpododanielnarh@gmail.com |
ea2c22d2bcc968840f2546a7797fd481f4baee63 | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-polardb/aliyunsdkpolardb/request/v20170801/ModifyAccountPasswordRequest.py | f043c7559542a8e3d3c3580f0a4b31e7c654201e | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,300 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyAccountPasswordRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'polardb', '2017-08-01', 'ModifyAccountPassword','polardb')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_AccountName(self):
return self.get_query_params().get('AccountName')
def set_AccountName(self,AccountName):
self.add_query_param('AccountName',AccountName)
def get_NewAccountPassword(self):
return self.get_query_params().get('NewAccountPassword')
def set_NewAccountPassword(self,NewAccountPassword):
self.add_query_param('NewAccountPassword',NewAccountPassword)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_DBClusterId(self):
return self.get_query_params().get('DBClusterId')
def set_DBClusterId(self,DBClusterId):
self.add_query_param('DBClusterId',DBClusterId)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | [
"1478458905@qq.com"
] | 1478458905@qq.com |
20a2640e2ad54b344e5be1bcbd8dfe4f8745ed6b | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Chess-py/gui/gui_functions.py | ee41fde1220a24a6a79a27e9b11f9b5729a73a9c | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:920dea71adf194f81da15c63d5ab5246c6637ed6329661630abdf4d56b12f7a6
size 9635
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
5e5ce0df1b1faf85f26ec4a9c54d6ac980b61e5a | 542f898adea1b36d627d4bf437731022f242d2dd | /projects/TridentNet/tridentnet/trident_backbone.py | 7789bd219b01d452e876ad2ad7f811502719465c | [
"Apache-2.0"
] | permissive | facebookresearch/detectron2 | 24bf508e374a98a5e5d1bd4cc96556d5914215f4 | 80307d2d5e06f06a8a677cc2653f23a4c56402ac | refs/heads/main | 2023-08-30T17:00:01.293772 | 2023-08-25T22:10:24 | 2023-08-25T22:10:24 | 206,660,580 | 27,469 | 8,047 | Apache-2.0 | 2023-09-13T09:25:57 | 2019-09-05T21:30:20 | Python | UTF-8 | Python | false | false | 7,846 | py | # Copyright (c) Facebook, Inc. and its affiliates.
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from detectron2.layers import Conv2d, FrozenBatchNorm2d, get_norm
from detectron2.modeling import BACKBONE_REGISTRY, ResNet, ResNetBlockBase
from detectron2.modeling.backbone.resnet import BasicStem, BottleneckBlock, DeformBottleneckBlock
from .trident_conv import TridentConv
__all__ = ["TridentBottleneckBlock", "make_trident_stage", "build_trident_resnet_backbone"]
class TridentBottleneckBlock(ResNetBlockBase):
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
num_branch=3,
dilations=(1, 2, 3),
concat_output=False,
test_branch_idx=-1,
):
"""
Args:
num_branch (int): the number of branches in TridentNet.
dilations (tuple): the dilations of multiple branches in TridentNet.
concat_output (bool): if concatenate outputs of multiple branches in TridentNet.
Use 'True' for the last trident block.
"""
super().__init__(in_channels, out_channels, stride)
assert num_branch == len(dilations)
self.num_branch = num_branch
self.concat_output = concat_output
self.test_branch_idx = test_branch_idx
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
self.conv2 = TridentConv(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
paddings=dilations,
bias=False,
groups=num_groups,
dilations=dilations,
num_branch=num_branch,
test_branch_idx=test_branch_idx,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
def forward(self, x):
num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1
if not isinstance(x, list):
x = [x] * num_branch
out = [self.conv1(b) for b in x]
out = [F.relu_(b) for b in out]
out = self.conv2(out)
out = [F.relu_(b) for b in out]
out = [self.conv3(b) for b in out]
if self.shortcut is not None:
shortcut = [self.shortcut(b) for b in x]
else:
shortcut = x
out = [out_b + shortcut_b for out_b, shortcut_b in zip(out, shortcut)]
out = [F.relu_(b) for b in out]
if self.concat_output:
out = torch.cat(out)
return out
def make_trident_stage(block_class, num_blocks, **kwargs):
"""
Create a resnet stage by creating many blocks for TridentNet.
"""
concat_output = [False] * (num_blocks - 1) + [True]
kwargs["concat_output_per_block"] = concat_output
return ResNet.make_stage(block_class, num_blocks, **kwargs)
@BACKBONE_REGISTRY.register()
def build_trident_resnet_backbone(cfg, input_shape):
"""
Create a ResNet instance from config for TridentNet.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETS.NORM
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
)
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)
# fmt: off
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH
branch_dilations = cfg.MODEL.TRIDENT.BRANCH_DILATIONS
trident_stage = cfg.MODEL.TRIDENT.TRIDENT_STAGE
test_branch_idx = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX
# fmt: on
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
res_stage_idx = {"res2": 2, "res3": 3, "res4": 4, "res5": 5}
out_stage_idx = [res_stage_idx[f] for f in out_features]
trident_stage_idx = res_stage_idx[trident_stage]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1),
"in_channels": in_channels,
"bottleneck_channels": bottleneck_channels,
"out_channels": out_channels,
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation,
}
if stage_idx == trident_stage_idx:
assert not deform_on_per_stage[
idx
], "Not support deformable conv in Trident blocks yet."
stage_kargs["block_class"] = TridentBottleneckBlock
stage_kargs["num_branch"] = num_branch
stage_kargs["dilations"] = branch_dilations
stage_kargs["test_branch_idx"] = test_branch_idx
stage_kargs.pop("dilation")
elif deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
blocks = (
make_trident_stage(**stage_kargs)
if stage_idx == trident_stage_idx
else ResNet.make_stage(**stage_kargs)
)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
if freeze_at >= stage_idx:
for block in blocks:
block.freeze()
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
3686504c9548d45692d16c538ab9fac95904d340 | 23cb0778282544457b76cc5d90501a3e142b89b9 | /simplified_scrapy/core/xml_helper.py | bc0c20ac8fd6f940b2f2b1ad7363f02924c72cb3 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | ra2003/simplified-scrapy | bcc4bba8e6a07cceebe0017a5538cb2909b6a6b1 | 4a9b1913b716685e57baa5e8032bf4367060fc98 | refs/heads/master | 2022-04-17T07:11:12.087373 | 2020-04-03T00:10:28 | 2020-04-03T00:10:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,529 | py | #!/usr/bin/python
#coding=utf-8
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import sys,re
from simplified_scrapy.core.utils import printInfo
from simplified_scrapy.core.dictex import Dict
class XmlDictConfig(Dict):
def __init__(self, parent_element):
if parent_element.items():
self.update(Dict(parent_element.items()))
flag = False
for element in parent_element:
flag = True
if(not self.get(element.tag)):
self.update({element.tag: []})
dic = self.getDic(element)
self[element.tag].append(dic)
count = len(element)
if(count>0):
self.ele2arr(dic,element)
if(not flag):
self.update({'tag':parent_element.tag})
def convert2Dic(html):
try:
tag=''
if(html.find('</')<0 and html.find('/>')<0):
start = html.find('<')
end = html.find(' ',start+1)
tag = '</'+html[start+1:end]+'>'
tree = ET.XML(html+tag)
return XmlDictConfig(tree)
except Exception as err:
try:
start = html.find('<')
end = html.find('>')
html = html[start+1:end].strip('/').strip()
html = re.sub('(\\s| )+', ' ', html, 0)
html = re.sub('(\')+', '"', html, 0)
html = re.sub('(=\s*")+', '="', html, 0)
lstC = []#list(html)
N=len(html)
i=0
first = False
flag = False
while i<N:
if html[i]=='"':
lstC.append(html[i])
first = not first
elif not first and html[i]=='=' and html[i+1]!='"':
lstC.append(html[i])
lstC.append('"')
flag=True
elif not first and flag and html[i]==' ':
flag=False
lstC.append('"')
lstC.append(html[i])
else:
lstC.append(html[i])
i+=1
html = ''.join(lstC)
paras = html.split('"')
dic = Dict()
lastP=None
first = True
for para in paras:
if(first):
first=False
tmp=para.split()
dic['tag']=tmp[0]
if(len(tmp)>1):
lastP=tmp[1].strip().strip('=').strip()
continue
if(lastP):
if(not dic[lastP]):
dic[lastP]=para
else:
dic[lastP]+=' '
dic[lastP]+=para
lastP=None
elif para:
if(para.find('=')>0):
lastP=para.strip().strip('=').strip()
else:
dic[para]=''
return dic
except Exception as err:
printInfo(err)
return None
| [
"3095069599@qq.com"
] | 3095069599@qq.com |
55fbd18c40598e0709acb94e5d35fafdbae90167 | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/ggH_SF/Full2017_v7/DYMVA_SYS/samples_recoil.py | dff83e4862c9124922705ab5e015b0c636e3c2f2 | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 16,488 | py | import inspect
configurations = os.path.realpath(inspect.getfile(inspect.currentframe())) # this file
configurations = os.path.dirname(configurations) # DYMVA_SYS
configurations = os.path.dirname(configurations) # Full2017_v7
configurations = os.path.dirname(configurations) # ggH_SF
configurations = os.path.dirname(configurations) # Configurations
from LatinoAnalysis.Tools.commonTools import getSampleFiles, getBaseW, addSampleWeight
def nanoGetSampleFiles(inputDir, sample):
try:
if _samples_noload:
return [sample]
except NameError:
pass
return getSampleFiles(inputDir, sample, True, 'nanoLatino_')
# samples
try:
len(samples)
except NameError:
import collections
samples = collections.OrderedDict()
################################################
################# SKIMS ########################
################################################
dataReco = 'Run2017_102X_nAODv7_Full2017v7'
fakeReco = dataReco
mcProduction = 'Fall2017_102X_nAODv7_Full2017v7'
mcSteps = 'MCl1loose2017v7__MCCorr2017v7__l2loose__l2tightOR2017v7{var}'
mcSteps_met = 'MCl1loose2017v7__MCCorr2017v7__l2loose__l2tightOR2017v7__recoilDY' #with MET recoil
fakeSteps = 'DATAl1loose2017v7__l2loose__fakeW'
dataSteps = 'DATAl1loose2017v7__l2loose__l2tightOR2017v7'
##############################################
###### Tree base directory for the site ######
##############################################
SITE=os.uname()[1]
if 'iihe' in SITE:
#treeBaseDir = '/pnfs/iihe/cms/store/user/xjanssen/HWW2015'
treeBaseDir = '/pnfs/iihe/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano'
elif 'cern' in SITE:
treeBaseDir = '/eos/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano'
def makeMCDirectory(var=''):
if var:
return os.path.join(treeBaseDir, mcProduction, mcSteps.format(var='__' + var))
else:
return os.path.join(treeBaseDir, mcProduction, mcSteps.format(var=''))
mcDirectory = makeMCDirectory()
metmcDirectory = os.path.join(treeBaseDir, mcProduction, mcSteps_met)
fakeDirectory = os.path.join(treeBaseDir, fakeReco, fakeSteps)
dataDirectory = os.path.join(treeBaseDir, dataReco, dataSteps)
################################################
############ DATA DECLARATION ##################
################################################
DataRun = [
['B','Run2017B-02Apr2020-v1'],
['C','Run2017C-02Apr2020-v1'],
['D','Run2017D-02Apr2020-v1'],
['E','Run2017E-02Apr2020-v1'],
['F','Run2017F-02Apr2020-v1']
]
DataSets = ['MuonEG','SingleMuon','SingleElectron','DoubleMuon', 'DoubleEG']
DataTrig = {
'MuonEG' : ' Trigger_ElMu' ,
'SingleMuon' : '!Trigger_ElMu && Trigger_sngMu' ,
'SingleElectron' : '!Trigger_ElMu && !Trigger_sngMu && Trigger_sngEl',
'DoubleMuon' : '!Trigger_ElMu && !Trigger_sngMu && !Trigger_sngEl && Trigger_dblMu',
'DoubleEG' : '!Trigger_ElMu && !Trigger_sngMu && !Trigger_sngEl && !Trigger_dblMu && Trigger_dblEl'
}
#########################################
############ MC COMMON ##################
#########################################
# SFweight does not include btag weights
mcCommonWeightNoMatch = 'XSWeight*SFweight*METFilter_MC'
mcCommonWeight = 'XSWeight*SFweight*PromptGenLepMatch2l*METFilter_MC'
###########################################
############# BACKGROUNDS ###############
###########################################
###### DY #######
useDYtt = False
useDYHT = True
# ptllDYW_NLO = '(((0.623108 + 0.0722934*gen_ptll - 0.00364918*gen_ptll*gen_ptll + 6.97227e-05*gen_ptll*gen_ptll*gen_ptll - 4.52903e-07*gen_ptll*gen_ptll*gen_ptll*gen_ptll)*(gen_ptll<45)*(gen_ptll>0) + 1*(gen_ptll>=45))*(abs(gen_mll-90)<3) + (abs(gen_mll-90)>3))'
# ptllDYW_LO = '((0.632927+0.0456956*gen_ptll-0.00154485*gen_ptll*gen_ptll+2.64397e-05*gen_ptll*gen_ptll*gen_ptll-2.19374e-07*gen_ptll*gen_ptll*gen_ptll*gen_ptll+6.99751e-10*gen_ptll*gen_ptll*gen_ptll*gen_ptll*gen_ptll)*(gen_ptll>0)*(gen_ptll<100)+(1.41713-0.00165342*gen_ptll)*(gen_ptll>=100)*(gen_ptll<300)+1*(gen_ptll>=300))'
if useDYtt:
files = nanoGetSampleFiles(mcDirectory, 'DYJetsToTT_MuEle_M-50') + \
nanoGetSampleFiles(mcDirectory, 'DYJetsToLL_M-10to50-LO_ext1')
samples['DY'] = {
'name': files,
'weight': mcCommonWeight + "*( !(Sum$(PhotonGen_isPrompt==1 && PhotonGen_pt>15 && abs(PhotonGen_eta)<2.6) > 0 &&\
Sum$(LeptonGen_isPrompt==1 && LeptonGen_pt>15)>=2) )",
'FilesPerJob': 5,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
}
addSampleWeight(samples,'DY','DYJetsToTT_MuEle_M-50','DY_NLO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-10to50-LO_ext1','DY_LO_pTllrw')
else:
files = nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-50') + \
nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-10to50-LO_ext1')
samples['DY'] = {
'name': files,
'weight': mcCommonWeight + "*( !(Sum$(PhotonGen_isPrompt==1 && PhotonGen_pt>15 && abs(PhotonGen_eta)<2.6) > 0 &&\
Sum$(LeptonGen_isPrompt==1 && LeptonGen_pt>15)>=2) )",
'FilesPerJob': 8,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
}
# Add DY HT Samples
if useDYHT :
samples['DY']['name'] += nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-4to50_HT-100to200_ext1') \
+ nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-4to50_HT-200to400_newpmx') \
+ nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-4to50_HT-400to600') \
+ nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-4to50_HT-600toInf') \
+ nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-50_HT-100to200') \
+ nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-50_HT-200to400') \
+ nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-50_HT-400to600_ext1') \
+ nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-50_HT-600to800') \
+ nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-50_HT-800to1200') \
+ nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-50_HT-1200to2500') \
+ nanoGetSampleFiles(metmcDirectory, 'DYJetsToLL_M-50_HT-2500toInf')
addSampleWeight(samples,'DY','DYJetsToLL_M-50','DY_NLO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-10to50-LO_ext1','DY_LO_pTllrw')
if useDYHT :
# Remove high HT from inclusive samples
addSampleWeight(samples,'DY','DYJetsToLL_M-50' , 'LHE_HT<100.0')
addSampleWeight(samples,'DY','DYJetsToLL_M-10to50-LO_ext1', 'LHE_HT<100.0')
# pt_ll weight
addSampleWeight(samples,'DY','DYJetsToLL_M-4to50_HT-100to200_ext1' ,'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-4to50_HT-200to400_newpmx' ,'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-4to50_HT-400to600' ,'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-4to50_HT-600toInf' ,'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-100to200' ,'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-200to400' ,'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-400to600_ext1' ,'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-600to800' ,'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-800to1200' ,'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-1200to2500' ,'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-2500toInf' ,'DY_LO_pTllrw')
###### Top #######
files = nanoGetSampleFiles(mcDirectory, 'TTTo2L2Nu') + \
nanoGetSampleFiles(mcDirectory, 'ST_s-channel') + \
nanoGetSampleFiles(mcDirectory, 'ST_t-channel_antitop') + \
nanoGetSampleFiles(mcDirectory, 'ST_t-channel_top') + \
nanoGetSampleFiles(mcDirectory, 'ST_tW_antitop') + \
nanoGetSampleFiles(mcDirectory, 'ST_tW_top')
samples['top'] = {
'name': files,
'weight': mcCommonWeight,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 1,
}
addSampleWeight(samples,'top','TTTo2L2Nu','Top_pTrw')
###### WW ########
samples['WW'] = {
'name': nanoGetSampleFiles(mcDirectory, 'WWTo2L2Nu'),
'weight': mcCommonWeight + '*nllW',
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 1
}
samples['WWewk'] = {
'name': nanoGetSampleFiles(mcDirectory, 'WpWmJJ_EWK_noTop'),
'weight': mcCommonWeight + '*(Sum$(abs(GenPart_pdgId)==6 || GenPart_pdgId==25)==0)', #filter tops and Higgs
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 2
}
# k-factor 1.4 already taken into account in XSWeight
files = nanoGetSampleFiles(mcDirectory, 'GluGluToWWToENEN') + \
nanoGetSampleFiles(mcDirectory, 'GluGluToWWToENMN') + \
nanoGetSampleFiles(mcDirectory, 'GluGluToWWToENTN') + \
nanoGetSampleFiles(mcDirectory, 'GluGluToWWToMNEN') + \
nanoGetSampleFiles(mcDirectory, 'GluGluToWWToMNMN') + \
nanoGetSampleFiles(mcDirectory, 'GluGluToWWToMNTN') + \
nanoGetSampleFiles(mcDirectory, 'GluGluToWWToTNEN') + \
nanoGetSampleFiles(mcDirectory, 'GluGluToWWToTNMN') + \
nanoGetSampleFiles(mcDirectory, 'GluGluToWWToTNTN')
samples['ggWW'] = {
'name': files,
'weight': mcCommonWeight + '*1.53/1.4', # updating k-factor
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 10
}
######## Vg ########
files = nanoGetSampleFiles(mcDirectory, 'Wg_MADGRAPHMLM') + \
nanoGetSampleFiles(mcDirectory, 'ZGToLLG')
samples['Vg'] = {
'name': files,
'weight': mcCommonWeightNoMatch + '*!(Gen_ZGstar_mass > 0)',
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 10
}
######## VgS ########
files = nanoGetSampleFiles(mcDirectory, 'Wg_MADGRAPHMLM') + \
nanoGetSampleFiles(mcDirectory, 'ZGToLLG') + \
nanoGetSampleFiles(mcDirectory, 'WZTo3LNu_mllmin01')
samples['VgS'] = {
'name': files,
'weight': mcCommonWeight + ' * (gstarLow * 0.94 + gstarHigh * 1.14)',
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 15,
'subsamples': {
'L': 'gstarLow',
'H': 'gstarHigh'
}
}
addSampleWeight(samples, 'VgS', 'Wg_MADGRAPHMLM', '(Gen_ZGstar_mass > 0 && Gen_ZGstar_mass < 0.1)')
addSampleWeight(samples, 'VgS', 'ZGToLLG', '(Gen_ZGstar_mass > 0)')
addSampleWeight(samples, 'VgS', 'WZTo3LNu_mllmin01', '(Gen_ZGstar_mass > 0.1)')
############ VZ ############
files = nanoGetSampleFiles(mcDirectory, 'ZZTo2L2Nu') + \
nanoGetSampleFiles(mcDirectory, 'ZZTo2L2Q') + \
nanoGetSampleFiles(mcDirectory, 'ZZTo4L') + \
nanoGetSampleFiles(mcDirectory, 'WZTo2L2Q')
samples['VZ'] = {
'name': files,
'weight': mcCommonWeight + '*1.11',
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 2
}
########## VVV #########
files = nanoGetSampleFiles(mcDirectory, 'ZZZ') + \
nanoGetSampleFiles(mcDirectory, 'WZZ') + \
nanoGetSampleFiles(mcDirectory, 'WWZ') + \
nanoGetSampleFiles(mcDirectory, 'WWW')
#+ nanoGetSampleFiles(mcDirectory, 'WWG'), #should this be included? or is it already taken into account in the WW sample?
samples['VVV'] = {
'name': files,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'weight': mcCommonWeight
}
###########################################
############# SIGNALS ##################
###########################################
signals = []
#### ggH -> WW
samples['ggH_hww'] = {
'name': nanoGetSampleFiles(mcDirectory, 'GluGluHToWWTo2L2Nu_Powheg_M125')+nanoGetSampleFiles(mcDirectory, 'GGHjjToWWTo2L2Nu_minloHJJ_M125'),
'weight': mcCommonWeight,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 4,
}
addSampleWeight(samples, 'ggH_hww', 'GluGluHToWWTo2L2Nu_Powheg_M125', '(HTXS_stage1_1_cat_pTjet30GeV<107)*Weight2MINLO*1093.8199/1073.9094') #only non GE2J categories with the weight to NNLOPS and renormalize integral
addSampleWeight(samples, 'ggH_hww', 'GGHjjToWWTo2L2Nu_minloHJJ_M125', '(HTXS_stage1_1_cat_pTjet30GeV>106)*1093.8199/1073.9094')
signals.append('ggH_hww')
############ VBF H->WW ############
samples['qqH_hww'] = {
'name': nanoGetSampleFiles(mcDirectory, 'VBFHToWWTo2L2Nu_M125'),
'weight': mcCommonWeight,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 3
}
signals.append('qqH_hww')
############# ZH H->WW ############
samples['ZH_hww'] = {
'name': nanoGetSampleFiles(mcDirectory, 'HZJ_HToWWTo2L2Nu_M125'),
'weight': mcCommonWeight,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 1
}
signals.append('ZH_hww')
samples['ggZH_hww'] = {
'name': nanoGetSampleFiles(mcDirectory, 'GluGluZH_HToWWTo2L2Nu_M125'),
'weight': mcCommonWeight,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 2
}
signals.append('ggZH_hww')
############ WH H->WW ############
samples['WH_hww'] = {
'name': nanoGetSampleFiles(mcDirectory, 'HWplusJ_HToWW_M125') + nanoGetSampleFiles(mcDirectory, 'HWminusJ_HToWW_M125'),
'weight': mcCommonWeight,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 2
}
signals.append('WH_hww')
############ ttH ############
samples['ttH_hww'] = {
'name': nanoGetSampleFiles(mcDirectory, 'ttHToNonbb_M125'),
'weight': mcCommonWeight,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 1
}
signals.append('ttH_hww')
############ H->TauTau ############
samples['ggH_htt'] = {
'name': nanoGetSampleFiles(mcDirectory, 'GluGluHToTauTau_M125_ext1'),
'weight': mcCommonWeight,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 1
}
#signals.append('ggH_htt')
samples['qqH_htt'] = {
'name': nanoGetSampleFiles(mcDirectory, 'VBFHToTauTau_M125'),
'weight': mcCommonWeight,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 2
}
#signals.append('qqH_htt')
samples['ZH_htt'] = {
'name': nanoGetSampleFiles(mcDirectory, 'HZJ_HToTauTau_M125'),
'weight': mcCommonWeight,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 2
}
#signals.append('ZH_htt')
samples['WH_htt'] = {
'name': nanoGetSampleFiles(mcDirectory, 'HWplusJ_HToTauTau_M125') + nanoGetSampleFiles(mcDirectory, 'HWminusJ_HToTauTau_M125'),
'weight': mcCommonWeight,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 2
}
#signals.append('WH_htt')
###########################################
################## FAKE ###################
###########################################
samples['Fake'] = {
'name': [],
'weight': 'METFilter_DATA*fakeW',
'weights': [],
'isData': ['all'],
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
'FilesPerJob': 30
}
for _, sd in DataRun:
for pd in DataSets:
files = nanoGetSampleFiles(fakeDirectory, pd + '_' + sd)
samples['Fake']['name'].extend(files)
samples['Fake']['weights'].extend([DataTrig[pd]] * len(files))
samples['Fake']['subsamples'] = {
'ee': 'abs(Lepton_pdgId[0]) == 11 && abs(Lepton_pdgId[1]) == 11',
'mm': 'abs(Lepton_pdgId[0]) == 13 && abs(Lepton_pdgId[1]) == 13',
'df': '(Lepton_pdgId[0]*Lepton_pdgId[1] == -11*13)'
}
###########################################
################## DATA ###################
###########################################
samples['DATA'] = {
'name': [],
'weight': 'METFilter_DATA*LepWPCut',
'weights': [],
'isData': ['all'],
'FilesPerJob': 40
}
for _, sd in DataRun:
for pd in DataSets:
files = nanoGetSampleFiles(dataDirectory, pd + '_' + sd)
samples['DATA']['name'].extend(files)
samples['DATA']['weights'].extend([DataTrig[pd]] * len(files))
| [
"calderon@cern.ch"
] | calderon@cern.ch |
4539fad165238fc206bb83ea657ad5f85e84cb86 | 642911284dff300708f9a777c9792eae2bd4c256 | /orgCodes/test.py | 9e5bab8c012a36aa0276473a0a3457f3b0981563 | [] | no_license | keyman9848/bncgit | 3b6aea4bb3bd229e0ae96d15becb9170be134b41 | 915972fe2012024b6e87aaa48a5350dfe815e6e3 | refs/heads/master | 2021-01-25T16:53:45.188797 | 2017-08-24T08:23:54 | 2017-08-24T08:23:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,007 | py | #-*_coding:utf8-*-
from multiprocessing.dummy import Pool as ThreadPool
import re,requests
import urllib.request
class spider(object):
def get_source(self,url):
hds={
'Connection': 'Keep-Alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
'User-Agent': 'Googlebot/2.1 (+http://www.googlebot.com/bot.html)',
'Host': 'www.tianyancha.com',
'Referer': 'http://antirobot.tianyancha.com/captcha/verify?return_url=http://www.tianyancha.com/search/%E6%B1%9F%E8%A5%BF%20%20%20%E4%BA%BA%E5%8A%9B%E8%B5%84%E6%BA%90/11'
}
req = urllib.request.Request(url, data=None, headers=hds)
response = urllib.request.urlopen(req)
return response.read()
def get_companyurl(self,source):
companyurl=re.findall('<a href="(.*)" ng-click',source)
companyurl_group=[]
for j in companyurl:
every_companyurl='http://www.tianyancha.com' + j
companyurl_group.append(every_companyurl)
return companyurl_group
def get_companyinfo(self,source):
try:
info={}
company_baseinfo=re.findall('class="ng-binding">(.*?)</p>',source)
# while company_baseinfo: #不知道为何无法实现换业 我的心在滴血。。。
print( company_baseinfo[0])
info['company_name']=company_baseinfo[0]
print( '公司名称:'+company_baseinfo[1])
info['Registered_Capital']=company_baseinfo[1].replace(' ','')
print( '注册资本:'+info['Registered_Capital'])
info['register_date']=company_baseinfo[2]
print( '注册时间:'+info['register_date'])
info['shareholder_info']=re.search('<meta name="description" content="(.*?)"',source,re.S).group(1)
print( '股东信息:'+info['shareholder_info'])
info['scope_of_business']=re.search('经营范围:</span>([\s\S]*)</p><!-- end ngIf: company.baseInfo.businessScope -->',source).group(1)
print( '经营范围:'+info['scope_of_business'])
info['register_place']=re.search('注册地址:</span>([\s\S]*)</p><!-- end ngIf: company.baseInfo.regLocation -->',source,re.S).group(1)
print( '注册地址:'+info['register_place'])
# info['conection_info']=re.search('<span class="contact_way_title">邮箱:</span>([\s\S]*)@qq.com',source,re.S).group(1) 如果抓取为空就会影响整个程序运行。。。
# print( '联系方式'+info['conection_info']
return info
except IndexError:
print(('No organization match for {}'))
def saveinfo(self,companyinfo):
f=open('jiangxiHr.txt','a')
for each in companyinfo:
f.writelines('公司名称:'+each['company_name']+'\n')
f.writelines('注册资本:'+each['Registered_Capital']+'\n')
f.writelines('注册时间:'+each['register_date']+'\n')
f.writelines('股东信息:'+each['shareholder_info']+'\n')
f.writelines('经营范围:'+each['scope_of_business']+'\n')
f.writelines('注册地址:'+each['register_place']+'\n')
f.close()
if __name__ == '__main__':
pool = ThreadPool(4)
classinfo = []
HRspider = spider()
for i in range(1,15):
url='http://www.tianyancha.com/search/%E6%B1%9F%E8%A5%BF%20%20%20%E4%BA%BA%E5%8A%9B%E8%B5%84%E6%BA%90/'+ str(i)
print( u'正在处理页面:' + url)
html=HRspider.get_source(url)
get_companylink=HRspider.get_companyurl(html)
for eachlink in get_companylink[1:19]:
companysource=HRspider.get_source(eachlink)
companyinfo=HRspider.get_companyinfo(companysource)
classinfo.append(companyinfo)
HRspider.saveinfo(classinfo)
pool.close()
pool.join() | [
"1938036263@qq.com"
] | 1938036263@qq.com |
72b950570c7fa0784332ef7fa8b80fda93f2945a | 104d097ee850d8643b97d6bc17722e322400c358 | /desafio6.py | 16e05c478f7c8d95ac97577e322d925cc1717659 | [] | no_license | rodolfocr92/desafio | 8ba1b031b1f8b3e266af62a7d0b2678ffa0dda14 | 65cc9570797e33b885dc0d21252ecfd674774428 | refs/heads/master | 2022-11-17T22:24:48.713745 | 2020-07-12T19:01:08 | 2020-07-12T19:01:08 | 277,664,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py |
io = float(input("Insira sua idade atual: "))
ia = float(input("Insira a idade que deseja sacar o investimento: "))
vf = float(input("Insira o montante final do investimento: "))
i = float(input("Insira a taxa de juros mensais em número decimal: "))
meses = (ia - io)*12
dep = (vf*i)/((1+i)**meses - 1)
depr = round(dep, 2)
print("Valor do depósito mensal: R$" + str(depr))
| [
"noreply@github.com"
] | rodolfocr92.noreply@github.com |
050d720d2e8a80dea21839c64f966f5092d892a2 | 1f2a5ca2f1631d30cd1a6b7916a13ec1299352d7 | /treasure/my_progress/apps.py | ae53c090de3372e80b757046c0b91d6a33592d6b | [] | no_license | danyyoo3/Personal-Website | 5449349e27e38865a10f06143c8c384c4be5a63b | b24ffd0c7d4fa311105fd158f459f98b3b8ef14d | refs/heads/main | 2023-03-03T13:33:07.408336 | 2021-02-15T04:03:47 | 2021-02-15T04:03:47 | 338,951,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | from django.apps import AppConfig
class MyProgressConfig(AppConfig):
name = 'my_progress'
| [
"noreply@github.com"
] | danyyoo3.noreply@github.com |
b001375d4646517af6ed057a18134e3829056b8a | ffbf665d491cafdfd88e10cea3628375ac7e7dbe | /mysite/env/bin/easy_install | 4651492f793dc04ad5c5479b0c61d850ad7c5a19 | [] | no_license | mnbqwe10/blackmores | fc885a6813ff6a3ba75da64e799612923e182669 | c3430727c9586a169e19179b430f75e4bd870e9b | refs/heads/master | 2021-04-06T00:15:27.314049 | 2018-04-11T14:21:06 | 2018-04-11T14:21:06 | 124,758,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/home/qqlivewell/mysite/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"qulong627@gmail.com"
] | qulong627@gmail.com | |
c74a079a4da583c78d6b6231f9aed74ecc76caab | ee543767f2af3113645adc2db6ffa50029a1b58e | /tests/dataset/convert_dataset.py | 6c82d5adcab5c9ef2c9c821e103eab1662975cc9 | [
"MIT"
] | permissive | parkertomf/WrightTools | 97bb466d2a83512120b43d5aa87a034a92f94db9 | 86c6f71fc4d157f4d112fb94e5b8055246912dc8 | refs/heads/master | 2020-03-18T22:20:38.587304 | 2018-06-11T23:11:20 | 2018-06-11T23:11:20 | 135,339,820 | 0 | 0 | MIT | 2018-05-29T18:52:58 | 2018-05-29T18:52:57 | null | UTF-8 | Python | false | false | 1,245 | py | """Test dataset unit conversion."""
# --- import --------------------------------------------------------------------------------------
import numpy as np
import WrightTools as wt
from WrightTools import datasets
# --- define --------------------------------------------------------------------------------------
def test_exception():
p = datasets.PyCMDS.w1_000
data = wt.data.from_PyCMDS(p)
try:
data['w1'].convert('fs')
except wt.exceptions.UnitsError:
assert True
else:
assert False
assert data['w1'].units == 'nm'
data.close()
def test_w1_wa():
p = datasets.PyCMDS.w1_wa_000
data = wt.data.from_PyCMDS(p)
assert data['wa'].units == 'nm'
data['wa'].convert('eV')
assert np.isclose(data['wa'].max(), 1.5802564757220569)
assert np.isclose(data['wa'].min(), 0.6726385958618104)
data.close()
def test_wigner():
p = datasets.COLORS.v2p2_WL_wigner
data = wt.data.from_COLORS(p)
data['d1'].convert('ns')
assert data['d1'].units == 'ns'
data.close()
# --- run -----------------------------------------------------------------------------------------
if __name__ == '__main__':
test_exception()
test_w1_wa()
test_wigner()
| [
"ksunden@users.noreply.github.com"
] | ksunden@users.noreply.github.com |
346ab1f2ac22d28a59f677348f56c7887974c730 | e22a4330d906f51932bf55eeeba705f447b74699 | /python/HorizonPlot.py | e8240f75787975dcc9c2d56e122a9f5cafffe902 | [
"Apache-2.0"
] | permissive | titodalcanton/flare | 5dc39c18152205b1fe2f1a0ed4f7e0607adf6fd4 | 4ffb02977d19786ab8c1a767cc495a799d9575ae | refs/heads/master | 2020-03-29T06:21:10.932174 | 2018-07-23T15:51:24 | 2018-07-23T15:51:24 | 149,621,008 | 0 | 0 | Apache-2.0 | 2018-09-20T14:19:13 | 2018-09-20T14:19:13 | null | UTF-8 | Python | false | false | 2,020 | py | import flare
import numpy as np
import argparse
eps=0.3
parser = argparse.ArgumentParser(description="generate a discover queue script for a flare run");
parser.add_argument('label',help="The basename/directory for the run")
args=parser.parse_args()
flare.flare_dir="../flare"
#file="FisherStudy-most-HH.dat"
#file="FisherStudy.dat"
label=args.label+"/"
#label="test11.15"
#label="L3LISA-v1-sens-but-5Gm-test-wide"
#label="L3LISARef"
#label="LISA2017camp_10yr/"
#label="2arm-LISA/"
#label="tRef-redef-LISA2017/"
#label="LISA2017-Nov-flaretest/"
#label="slow-orbit-LISA/"
#label="fast-orbit-LISA/"
#label="big-orbit-LISA/"
#label="slower-orbit-LISA/"
#label="tiny-orbit-LISA/"
file=label+"FisherStudy.dat"
#flare.FisherPlot(label,9,[1.1,2.0,4.0,10.0],[10,100,1000],[0.1,0.3],file)
#flare.FisherPlot(label,0,[1.1,2,4,10],[10,100,1000],[0.1,.3],file)
#flare.FisherPlot(label,0,[2],[10],[.3],file)
flare.HorizonPlot(label,0,[2],10,eps,file,[0.001,0.003,0.01,0.03,0.10,0.30],scaled=True)
flare.HorizonPlot(label,0,[2],10,eps,file,[0.001,0.01,0.10],scaled=True,show_range=True)
flare.HorizonPlot(label,1,[2],10,eps,file,[0.001,0.003,0.01,0.03,0.10,0.30],scaled=True)
flare.HorizonPlot(label,1,[2],10,eps,file,[0.001,0.01,0.10],scaled=True,show_range=True)
#flare.HorizonPlot(label,2,[2],10,[.3],file,[0.10,0.30,1.0,3.0,10.0,30.0,100.0],scaled=True)
flare.HorizonPlot(label,3,[2],10,eps,file,[0.003,0.01,0.03,0.10,0.30],scaled=True)
flare.HorizonPlot(label,3,[2],10,eps,file,[0.01,0.10,0.5],scaled=True,show_range=True)
#flare.HorizonPlot(label,9,[2],10,[.3],file,[8.4e-7,8.4e-6,8.4e-5,3.0e-4,3.0e-3,3.0e-2],scaled=True)
#flare.HorizonPlot(label,9,[2],10,[.3],file,[100,900,3600,28800,360000],scaled=True)
#flare.HorizonPlot(label,9,[2],10,[.3],file,[100,3600,90000],scaled=True,show_range=True)
flare.HorizonPlot(label,9,[2],10,eps,file,[0.0278,0.25,1.0,9.0,100.0],scaled=True)
flare.HorizonPlot(label,9,[2],10,eps,file,[0.0278,1.0,25.0],scaled=True,show_range=True)
#flare.FisherPlot(label,0,[2],10,[.3],file,scaled=False)
| [
"john.g.baker@nasa.gov"
] | john.g.baker@nasa.gov |
977e71a983cd038695cbd3ea08c4705d1f871367 | 9671c2b41ff71727d72fd7f43006a2a968561200 | /local/neuralNetworks/classifiers/lstm.py | f3108e7903c37548e9740b50908d34460233f2ab | [] | no_license | didadida-r/tf-cnn | 4adb6ceefad6d828e8e847bfe3680bc8095e17b1 | 8cc39bdace55630a999607c19091dce2342281a9 | refs/heads/master | 2023-01-10T22:51:51.771840 | 2017-06-03T04:11:55 | 2017-06-03T04:11:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,147 | py | '''@file lstm.py
The LSTM neural network classifier'''
import seq_convertors
import tensorflow as tf
from classifier import Classifier
from layer import FFLayer
from activation import TfActivation
import inspect
class LSTM(Classifier):
'''This class is a graph for lstm neural nets.'''
def __init__(self, output_dim, num_layers, num_units, activation,
layerwise_init=True):
'''
DNN constructor
Args:
output_dim: the DNN output dimension
num_layers: number of hidden layers
num_units: number of hidden units
activation: the activation function
layerwise_init: if True the layers will be added one by one,
otherwise all layers will be added to the network in the
beginning
'''
#super constructor
super(LSTM, self).__init__(output_dim)
#save all the DNN properties
self.num_layers = num_layers
print(self.num_layers)
self.num_units = num_units
print(self.num_units)
self.activation = activation
self.layerwise_init = layerwise_init
self.layerwise_init = None
def __call__(self, inputs, seq_length, is_training=False, reuse=False,
scope=None):
'''
Add the LSTM variables and operations to the graph
Args:
inputs: the inputs to the neural network, this is a list containing
a [batch_size, input_dim] tensor for each time step
seq_length: The sequence lengths of the input utterances, if None
the maximal sequence length will be taken
is_training: whether or not the network is in training mode
reuse: wheter or not the variables in the network should be reused
scope: the name scope
Returns:
A triple containing:
- output logits
- the output logits sequence lengths as a vector
- a saver object
- a dictionary of control operations:
-add: add a layer to the network
-init: initialise the final layer
'''
with tf.variable_scope(scope or type(self).__name__, reuse=reuse):
weights = {'out':
tf.get_variable('weights_out', [self.num_units, self.output_dim], initializer=tf.contrib.layers.xavier_initializer())
}
biases = {'out':
tf.get_variable('biases_out', [self.output_dim], initializer=tf.constant_initializer(0))
}
#convert the sequential data to non sequential data
nonseq_inputs = seq_convertors.seq2nonseq(inputs, seq_length)
input_dim = nonseq_inputs.shape[1]
nonseq_inputs = tf.reshape(nonseq_inputs,[-1,11,40])
n_steps = 11
nonseq_inputs = tf.transpose(nonseq_inputs, [1, 0, 2])
keep_prob = 1
# define the lstm cell
# use the dropout in training mode
if is_training and keep_prob < 1:
lstm_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(self.num_units, forget_bias=0.0,
input_size=None, activation=tf.nn.relu, layer_norm=False, norm_gain=1.0,
norm_shift=0.0, dropout_keep_prob=keep_prob, dropout_prob_seed=None)
lstm_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(self.num_units, forget_bias=0.0,
input_size=None, activation=tf.nn.relu, layer_norm=False, norm_gain=1.0,
norm_shift=0.0, dropout_keep_prob=1, dropout_prob_seed=None)
# stack the lstm to form multi-layers
cell = tf.contrib.rnn.MultiRNNCell(
[lstm_cell]*self.num_layers, state_is_tuple=True)
# print(int(nonseq_inputs.shape[0]))
# self._initial_state = cell.zero_state(int(nonseq_inputs.shape[0]), tf.float32)
# apply the dropout for the inputs to the first hidden layer
if is_training and keep_prob < 1:
nonseq_inputs = tf.nn.dropout(nonseq_inputs, keep_prob)
final_nonseq_inputs = tf.unstack(nonseq_inputs, num=n_steps, axis=0)
# Get lstm cell output initial_state=self._initial_state,
outputs, states = tf.contrib.rnn.static_rnn(cell, final_nonseq_inputs, dtype=tf.float32)
outputs = outputs[-1]
# Linear activation, using rnn inner loop last output
logits = tf.matmul(outputs, weights['out']) + biases['out']
# # if self.layerwise_init:
# # #variable that determines how many layers are initialised
# # #in the neural net
# # initialisedlayers = tf.get_variable(
# # 'initialisedlayers', [],
# # initializer=tf.constant_initializer(0),
# # trainable=False,
# # dtype=tf.int32)
# # #operation to increment the number of layers
# # add_layer_op = initialisedlayers.assign(initialisedlayers+1).op
# # #compute the logits by selecting the activations at the layer
# # #that has last been added to the network, this is used for layer
# # #by layer initialisation
# # logits = tf.case(
# # [(tf.equal(initialisedlayers, tf.constant(l)),
# # Callable(activations[l]))
# # for l in range(len(activations))],
# # default=Callable(activations[-1]),
# # exclusive=True, name='layerSelector')
# # logits.set_shape([None, self.num_units])
if self.layerwise_init:
#operation to initialise the final layer
init_last_layer_op = tf.initialize_variables(
tf.get_collection(
tf.GraphKeys.VARIABLES,
scope=(tf.get_variable_scope().name + '/layer'
+ str(self.num_layers))))
control_ops = {'add':add_layer_op, 'init':init_last_layer_op}
else:
control_ops = None
#convert the logits to sequence logits to match expected output
seq_logits = seq_convertors.nonseq2seq(logits, seq_length,
len(inputs))
#create a saver
saver = tf.train.Saver()
return seq_logits, seq_length, saver, control_ops
class Callable(object):
'''A class for an object that is callable'''
def __init__(self, value):
'''
Callable constructor
Args:
tensor: a tensor
'''
self.value = value
def __call__(self):
'''
get the object
Returns:
the object
'''
return self.value | [
"605467297@qq.com"
] | 605467297@qq.com |
2bc90326c40d23b700ea9c755c27c0b0e2ed9867 | 138909a17b9f4b82ec91a209443864fbd18c1248 | /ExtraLongFactorial.py | f91e86d87985bb9d190fbd2252dde385422c8a42 | [] | no_license | surbhilakhani/Hackerrank | 70fc0a7bf85e73dbc6bd1f4695e148f7080a0c59 | f6cea99c5787c10ea5817bb9c4f3be8da1f6a73c | refs/heads/master | 2021-01-19T03:03:05.435417 | 2016-07-01T13:45:19 | 2016-07-01T13:45:19 | 62,326,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | import sys
import math
n = int(raw_input().strip())
print math.factorial(n)
| [
"noreply@github.com"
] | surbhilakhani.noreply@github.com |
3435a3efe4f2d2b77b6cd5f74d53ddf05fa774d4 | 473f6841c545e7edc40370782393eab947b25bc2 | /NeuralNetworks/NNmodel.py | 0c3099e617696d614d4e30fb90db3d71c385841d | [] | no_license | SJYbetter/supervised-machine-learning | 5dcb592458fa8a1009fcec64b0624a60b3ce5dde | b8ed96ae34c5160af078dfe2fc1d771df31f0f9a | refs/heads/master | 2020-04-12T12:36:21.888430 | 2019-04-22T13:14:19 | 2019-04-22T13:14:19 | 162,497,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,401 | py | import numpy as np
import math
import random
import matplotlib.pyplot as plt
import scipy.io
def plot(x, y, tittle):
m, n = np.shape(x)
xcord1 = []
xcord2 = []
ycord1 = []
ycord2 = []
for i in range(n):
if int(y[0, i]) == 1:
xcord1.append(x[0, i])
ycord1.append(x[0, i])
else:
xcord2.append(x[0, i])
ycord2.append(x[1, i])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=20, c='blue', marker='cycle', alpha=0.5)
ax.scatter(xcord1, ycord1, s=20, c='red', marker='square', alpha=0.5)
plt.title(tittle)
def read(filename):
result = scipy.io.loadmat(filename)
return np.asmatrix(result['X_test'].T), \
np.asmatrix(result['X_train'].T), \
np.asmatrix(result['X_validation'].T), \
np.asmatrix(result['Y_test'].T), \
np.asmatrix(result['Y_train'].T), \
np.asmatrix(result['Y_validation'].T)
# Make a matrix (we could use NumPy to speed this up)
def makeMatrix(I, J, fill=0.0):
m = []
for i in range(I):
m.append([fill] * J)
return m
# our sigmoid function, tanh is a little nicer than the standard 1/(1+e^-x)
def sigmoid(x):
return math.tanh(x)
# derivative of our sigmoid function, in terms of the output (i.e. y)
def dsigmoid(y):
return 1.0 - y ** 2
def tanh(z):
n = np.exp(z)
m = np.exp(-z)
return (n - m) / (n + m)
def dtanh(z):
k = tanh(z)
return 1 - k * k
def relu(z):
return max(z, 0)
def drelu(z):
if z > 0:
return 1
elif z < 0:
return 0
else:
raise ArithmeticError("undefined")
class NN:
def __init__(self, ni, nh, no):
# number of input, hidden, and output nodes
self.ni = ni + 1 # +1 for bias node
self.nh = nh
self.no = no
# activations for nodes
self.ai = [1.0] * self.ni
self.ah = [1.0] * self.nh
self.ao = [1.0] * self.no
# create weights
self.wi = makeMatrix(self.ni, self.nh)
self.wo = makeMatrix(self.nh, self.no)
# set them to random vaules
for i in range(self.ni):
for j in range(self.nh):
self.wi[i][j] = random.normalvariate(0, 0.01)
for j in range(self.nh):
for k in range(self.no):
self.wo[j][k] = random.normalvariate(0, 0.01)
# last change in weights for momentum
self.ci = makeMatrix(self.ni, self.nh)
self.co = makeMatrix(self.nh, self.no)
def update(self, inputs):
if len(inputs) != self.ni - 1:
raise ValueError('wrong number of inputs')
# input activations
for i in range(self.ni - 1):
# self.ai[i] = sigmoid(inputs[i])
self.ai[i] = inputs[i]
# hidden activations
for j in range(self.nh):
sum = 0.0
for i in range(self.ni):
sum = sum + self.ai[i] * self.wi[i][j]
self.ah[j] = sigmoid(sum)
# output activations
for k in range(self.no):
sum = 0.0
for j in range(self.nh):
sum = sum + self.ah[j] * self.wo[j][k]
self.ao[k] = sigmoid(sum)
return self.ao[:]
def backPropagate(self, targets, N, M):
"""if len(targets) != self.no:
raise ValueError('wrong number of target values')"""
# calculate error terms for output
output_deltas = [0.0] * self.no
for k in range(self.no):
error = targets[k] - self.ao[k]
output_deltas[k] = dsigmoid(self.ao[k]) * error
# calculate error terms for hidden
hidden_deltas = [0.0] * self.nh
for j in range(self.nh):
error = 0.0
for k in range(self.no):
error = error + output_deltas[k] * self.wo[j][k]
hidden_deltas[j] = dsigmoid(self.ah[j]) * error
# update output weights
for j in range(self.nh):
for k in range(self.no):
change = output_deltas[k] * self.ah[j]
self.wo[j][k] = self.wo[j][k] + N * change + M * self.co[j][k]
self.co[j][k] = change
# print N*change, M*self.co[j][k]
# update input weights
for i in range(self.ni):
for j in range(self.nh):
change = hidden_deltas[j] * self.ai[i]
self.wi[i][j] = self.wi[i][j] + N * change + M * self.ci[i][j]
self.ci[i][j] = change
# calculate error
error = 0.0
for k in range(len(targets)):
error = error + 0.5 * (targets[k] - self.ao[k]) ** 2
return error
def test(self, pattern):
for p in pattern:
print(p[0], '->', self.update(p[0]))
def weights(self):
print('Input weights:')
for i in range(self.ni):
print(self.wi[i])
print()
print('Output weights:')
for j in range(self.nh):
print(self.wo[j])
def train(self, x, y, iterations=1000, N=0.5, M=0.1):
# N: learning rate
# M: momentum factor
for i in range(iterations):
error = 0.0
for p in range(120):
inputs = x[p, :]
targets = y[p, 0]
self.update(inputs)
error = error + self.backPropagate(targets, N, M)
if i % 100 == 0:
print('error %-.5f' % error)
class NNv2:
def __init__(self):
pass
def forward(self, x, y):
pass
def back_propagate(self):
pass
"""def demo():
# Teach network XOR function
pat = [
[[0,0], [0]],
[[0,1], [1]],
[[1,0], [1]],
[[1,1], [0]]
]
# create a network with two input, two hidden, and one output nodes
n = NN(2, 2, 1)
# train it with some patterns
n.train(pat)
# test it
n.test(pat)"""
def preprocess(x, y):
x_t = x.T
y_t = y.T
input = np.matrix([120, 2])
for i in range(120):
input[i, 0] = x_t[i, :]
for j in range(120):
input[j, 1] = y_t[j, :]
return input
if __name__ == '__main__':
X_tst, X_train, X_vad, Y_tst, Y_train, Y_vad = read("./dataset.mat")
print(X_tst, Y_tst)
# n = NN(2, 2, 1)
# n.train(VAD)
# n.test(TST)
| [
"sjyhyn@gmail.com"
] | sjyhyn@gmail.com |
f98441cac34e408cb07f0ac6db2686cacde92265 | 35d979d4dd1110525fd4c31a78db147d59ec585d | /contact/admin.py | f08502ddc58c7319cf1090cc3a729abfaa7e1e05 | [] | no_license | Code-Institute-Submissions/gymnazium | 0eb32a1e61bde381e7c3716be2a99c9004c67d65 | 4161fafc4ffd6bf37e3a430c169defb19f02b04d | refs/heads/master | 2023-04-13T16:09:01.786918 | 2021-04-28T18:55:59 | 2021-04-28T18:55:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from django.contrib import admin
from.models import Contact
class ContactAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'phone', 'message', 'contact_date')
list_display_links = ('name',)
search_fields = ('name',)
list_per_page = 25
admin.site.register(Contact, ContactAdmin)
| [
"coreyhoward@live.co.uk"
] | coreyhoward@live.co.uk |
6daee3b2a326fe785f8f51e527b35085f9128fff | 9f2577f0e5e0fc1a5e7f2d997ddac91be359f019 | /python-web/p39.py | 2a1909cb54791f7fcc26f17b28862f7f4c586cf2 | [] | no_license | diqiu11/python | f82cc02a2065b0f933b4d47b352e0f14072350d4 | 7af93ac8eed8667b3be26f4490d74f0aa74924ae | refs/heads/master | 2022-12-11T15:26:09.091309 | 2018-10-17T14:29:01 | 2018-10-17T14:29:01 | 149,900,411 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | #!/usr/bin/env python
# coding=utf-8
import pickle
d = dict(name='bod',age=20)
print(pickle.dumps(d))
pickle.dumps(d)
f = open('test.txt','wb')
pickle.dump(d,f)
f.close
| [
"di15218143233@outlook.com"
] | di15218143233@outlook.com |
7f43ad6eb669411e466e915a50d1333fea97f289 | dd2f95416ec8107680fe72dc997eaff55fb79698 | /PixelDBTools/test/SiPixelLorentzAngleDBLoader_cfg.py | 61b6392c97fa0aedacc997546de0db95a1cb6505 | [] | no_license | odysei/DPGAnalysis-SiPixelTools | f433e75a642aea9dbc5574e86a8359826e83e556 | ac4fdc9fa3ad8a865acb64dc3bbefe66b1bdd45a | refs/heads/master | 2021-01-18T05:55:15.102609 | 2015-09-15T13:10:41 | 2015-09-15T13:10:41 | 42,519,902 | 0 | 0 | null | 2015-09-15T13:07:20 | 2015-09-15T13:07:19 | null | UTF-8 | Python | false | false | 9,799 | py | #
import FWCore.ParameterSet.Config as cms
process = cms.Process("SiPixelLorentzAngleLoader")
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
#process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("CalibTracker.Configuration.TrackerAlignment.TrackerAlignment_Fake_cff")
#process.load("Geometry.TrackerGeometryBuilder.trackerGeometry_cfi")
#process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("CondTools.SiPixel.SiPixelGainCalibrationService_cfi")
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff")
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
#from Configuration.AlCa.GlobalTag import GlobalTag
#process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data', '')
#process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run1_data', '')
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
#process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_design', '')
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.destinations = cms.untracked.vstring("cout")
process.MessageLogger.cout = cms.untracked.PSet(threshold = cms.untracked.string("ERROR"))
process.source = cms.Source("EmptyIOVSource",
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
interval = cms.uint64(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
# has to be deleted if it exist
file = "la.db"
sqlfile = "sqlite_file:" + file
print '\n-> Uploading into file %s, i.e. %s\n' % (file, sqlfile)
##### DATABASE CONNNECTION AND INPUT TAGS ######
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
authenticationPath = cms.untracked.string('.'),
connectionRetrialPeriod = cms.untracked.int32(10),
idleConnectionCleanupPeriod = cms.untracked.int32(10),
messageLevel = cms.untracked.int32(1),
enablePoolAutomaticCleanUp = cms.untracked.bool(False),
enableConnectionSharing = cms.untracked.bool(True),
connectionRetrialTimeOut = cms.untracked.int32(60),
connectionTimeOut = cms.untracked.int32(0),
enableReadOnlySessionOnUpdateConnection = cms.untracked.bool(False)
),
timetype = cms.untracked.string('runnumber'),
connect = cms.string(sqlfile),
toPut = cms.VPSet(
cms.PSet(
record = cms.string('SiPixelLorentzAngleRcd'),
tag = cms.string('SiPixelLorentzAngle_test')
# tag = cms.string("SiPixelLorentzAngle_fromAlignment_v01_mc")
# tag = cms.string("SiPixelLorentzAngle_fromAlignment_v01")
# tag = cms.string("SiPixelLorentzAngle_forWidth_v01_mc")
# tag = cms.string("SiPixelLorentzAngle_forWidth_v01")
),
cms.PSet(
record = cms.string('SiPixelLorentzAngleSimRcd'),
tag = cms.string('SiPixelLorentzAngleSim_test')
),
)
)
###### LORENTZ ANGLE OBJECT ######
process.SiPixelLorentzAngle = cms.EDAnalyzer("SiPixelLorentzAngleDBLoader",
# common input for all rings
bPixLorentzAnglePerTesla = cms.double(0.10),
fPixLorentzAnglePerTesla = cms.double(0.06),
# bPixLorentzAnglePerTesla = cms.double(0.05),
# fPixLorentzAnglePerTesla = cms.double(0.03),
# enter -9999 if individual input for rings
# bPixLorentzAnglePerTesla = cms.double(-9999.),
# fPixLorentzAnglePerTesla = cms.double(-9999.),
#in case of PSet
BPixParameters = cms.untracked.VPSet(
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(1),
angle = cms.double(0.0948)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(2),
angle = cms.double(0.0948)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(3),
angle = cms.double(0.0948)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(4),
angle = cms.double(0.0948)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(5),
angle = cms.double(0.0964)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(6),
angle = cms.double(0.0964)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(7),
angle = cms.double(0.0964)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(8),
angle = cms.double(0.0964)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(1),
angle = cms.double(0.0916)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(2),
angle = cms.double(0.0916)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(3),
angle = cms.double(0.0916)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(4),
angle = cms.double(0.0916)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(5),
angle = cms.double(0.0931)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(6),
angle = cms.double(0.0931)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(7),
angle = cms.double(0.0931)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(8),
angle = cms.double(0.0931)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(1),
angle = cms.double(0.0920)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(2),
angle = cms.double(0.0920)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(3),
angle = cms.double(0.0920)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(4),
angle = cms.double(0.0920)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(5),
angle = cms.double(0.0935)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(6),
angle = cms.double(0.0935)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(7),
angle = cms.double(0.0935)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(8),
angle = cms.double(0.0935)
),
),
FPixParameters = cms.untracked.VPSet(
cms.PSet(
side = cms.uint32(1),
disk = cms.uint32(1),
HVgroup = cms.uint32(1),
angle = cms.double(0.081)
),
cms.PSet(
side = cms.uint32(1),
disk = cms.uint32(2),
HVgroup = cms.uint32(1),
angle = cms.double(0.081)
),
cms.PSet(
side = cms.uint32(2),
disk = cms.uint32(1),
HVgroup = cms.uint32(1),
angle = cms.double(0.081)
),
cms.PSet(
side = cms.uint32(2),
disk = cms.uint32(2),
HVgroup = cms.uint32(1),
angle = cms.double(0.081)
),
cms.PSet(
side = cms.uint32(1),
disk = cms.uint32(1),
HVgroup = cms.uint32(2),
angle = cms.double(0.081)
),
cms.PSet(
side = cms.uint32(1),
disk = cms.uint32(2),
HVgroup = cms.uint32(2),
angle = cms.double(0.081)
),
cms.PSet(
side = cms.uint32(2),
disk = cms.uint32(1),
HVgroup = cms.uint32(2),
angle = cms.double(0.081)
),
cms.PSet(
side = cms.uint32(2),
disk = cms.uint32(2),
HVgroup = cms.uint32(2),
angle = cms.double(0.081)
),
),
#in case lorentz angle values for bpix should be read from file -> not implemented yet
useFile = cms.bool(False),
record = cms.untracked.string('SiPixelLorentzAngleRcd'),
fileName = cms.string('lorentzFit.txt')
)
process.SiPixelLorentzAngleSim = cms.EDAnalyzer("SiPixelLorentzAngleDBLoader",
# magneticField = cms.double(3.8),
bPixLorentzAnglePerTesla = cms.double(0.10),
fPixLorentzAnglePerTesla = cms.double(0.06),
#in case lorentz angle values for bpix should be read from file -> not implemented yet
useFile = cms.bool(False),
record = cms.untracked.string('SiPixelLorentzAngleSimRcd'),
fileName = cms.string('lorentzFit.txt'),
#in case of PSet
BPixParameters = cms.untracked.VPSet(
cms.PSet(
layer = cms.uint32(0),
module = cms.uint32(0),
angle = cms.double(0.0)
),
),
FPixParameters = cms.untracked.VPSet(
cms.PSet(
side = cms.uint32(0),
disk = cms.uint32(0),
HVgroup = cms.uint32(0),
angle = cms.double(0.0)
),
),
)
process.p = cms.Path(
# process.SiPixelLorentzAngleSim
process.SiPixelLorentzAngle
)
| [
"danek.kotlinski@psi.ch"
] | danek.kotlinski@psi.ch |
b5bf55019dfe3ec3859227a556b6f25df7dab10f | d89280c2cef8c17fc34a3a82a77f5bdb0e4c07a7 | /Veterinaria/wsgi.py | 3c14942e3f1266f018f2ab104d1a3dc38c0d3d3b | [] | no_license | Alejandro-Gutierrez/Vetesoftv2 | 821593233f70bf7b7d816157cd29ab41c645b58d | 84380ea51132f71d685c9b4b1f5f3a92a7a973b5 | refs/heads/master | 2022-06-20T06:36:44.633345 | 2019-09-19T20:57:55 | 2019-09-19T20:57:55 | 223,872,623 | 0 | 0 | null | 2019-11-25T05:59:32 | 2019-11-25T05:59:30 | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for Veterinaria project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Veterinaria.settings')
application = get_wsgi_application()
| [
"cristopherduarte50@gmail.com"
] | cristopherduarte50@gmail.com |
230253b7557aeac008a120659cb5432ff4aad187 | 042a3e00c2107d26589cb052f78be87c02b5a4c3 | /models/xception.py | 41363d857475121b236c8ea3558a5e3fb1de706e | [] | no_license | KosukeMizufune/segmentation | bdcf5cf4894b4b6c5d4645033f183169c7ff67de | 78d5cd303690829b04b0a54629462ec175ca12ec | refs/heads/master | 2020-05-15T23:27:32.935257 | 2019-05-05T14:35:14 | 2019-05-05T14:35:14 | 182,553,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,189 | py | import chainer
from chainer import links as L
from chainer import functions as F
def fixed_padding(inputs, kernel_size, dilate):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class SeperableConv(chainer.Chain):
def __init__(self, in_size, out_size, kernel_size=3, stride=1, dilate=1):
super(SeperableConv, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(in_size, in_size, kernel_size, stride, 0, dilate=dilate,
groups=in_size, nobias=True)
self.bn = L.BatchNormalization(in_size)
self.pointwise = L.Convolution2D(in_size, out_size, 1, 1, 0, dilate=1, groups=1, nobias=True)
def __call__(self, x):
h = fixed_padding(x, self.conv1.ksize, dilate=self.conv1.dilate[0])
h = self.conv1(h)
h = self.bn(h)
h = self.pointwise(h)
return h
class EntryFlowBlock(chainer.Chain):
def __init__(self, in_size, out_size, stride=1):
super(EntryFlowBlock, self).__init__()
with self.init_scope():
self.sep1 = SeperableConv(in_size, out_size, 3, 1)
self.bn1 = L.BatchNormalization(out_size)
self.sep2 = SeperableConv(out_size, out_size, 3, 1)
self.bn2 = L.BatchNormalization(out_size)
self.sep3 = SeperableConv(out_size, out_size, 3, 2)
self.bn3 = L.BatchNormalization(out_size)
self.conv = L.Convolution2D(in_size, out_size, 1, stride, nobias=True)
self.bn_conv = L.BatchNormalization(out_size)
def __call__(self, x):
h = F.relu(self.bn1(self.sep1(x)))
h = F.relu(self.bn2(self.sep2(h)))
h = self.bn3(self.sep3(h))
skip = self.bn_conv(self.conv(x))
return F.relu(h + skip)
class MiddleFlowBlock(chainer.Chain):
def __init__(self, in_size, out_size, dilate=1):
super(MiddleFlowBlock, self).__init__()
with self.init_scope():
self.sep1 = SeperableConv(in_size, out_size, 3, 1, dilate=dilate)
self.bn1 = L.BatchNormalization(out_size)
self.sep2 = SeperableConv(out_size, out_size, 3, 1, dilate=dilate)
self.bn2 = L.BatchNormalization(out_size)
self.sep3 = SeperableConv(out_size, out_size, 3, 1, dilate=dilate)
self.bn3 = L.BatchNormalization(out_size)
def __call__(self, x):
h = F.relu(self.bn1(self.sep1(x)))
h = F.relu(self.bn2(self.sep2(h)))
h = self.bn3(self.sep3(h))
return F.relu(h + x)
class ExitFlow(chainer.Chain):
def __init__(self, dilate):
super(ExitFlow, self).__init__()
with self.init_scope():
self.sep1 = SeperableConv(728, 728, 3, 1, dilate[0])
self.bn1 = L.BatchNormalization(728)
self.sep2 = SeperableConv(728, 1024, 3, 1, dilate[0])
self.bn2 = L.BatchNormalization(1024)
self.sep3 = SeperableConv(1024, 1024, 3, 2, dilate[0])
self.bn3 = L.BatchNormalization(1024)
self.sep4 = SeperableConv(1024, 1536, 3, 1, dilate[1])
self.bn4 = L.BatchNormalization(1536)
self.sep5 = SeperableConv(1536, 1536, 3, 1, dilate[1])
self.bn5 = L.BatchNormalization(1536)
self.sep6 = SeperableConv(1536, 2048, 3, 1, dilate[1])
self.bn6 = L.BatchNormalization(2048)
self.conv = L.Convolution2D(728, 1024, 1, 2)
self.conv_bn = L.BatchNormalization(1024)
def __call__(self, x):
h = F.relu(self.bn1(self.sep1(x)))
h = F.relu(self.bn2(self.sep2(h)))
h = self.bn3(self.sep3(h))
h += self.conv_bn(self.conv(x))
h = F.relu(h)
h = F.relu(self.bn4(self.sep4(h)))
h = F.relu(self.bn5(self.sep5(h)))
h = F.relu(self.bn6(self.sep6(h)))
return h
class AlignedXception(chainer.Chain):
def __init__(self, output_stride):
super(AlignedXception, self).__init__()
if output_stride == 16:
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif output_stride == 8:
entry_block3_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
with self.init_scope():
# Entry Flow
self.conv1 = L.Convolution2D(3, 32, 3, stride=2, pad=1, nobias=True)
self.bn1 = L.BatchNormalization(32)
self.conv2 = L.Convolution2D(32, 64, 3, stride=1, pad=1, nobias=True)
self.bn2 = L.BatchNormalization(64)
self.block1 = EntryFlowBlock(64, 128)
self.block2 = EntryFlowBlock(128, 256)
self.block3 = EntryFlowBlock(256, 728, stride=entry_block3_stride)
# Middle Flow
self.block4 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block5 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block6 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block7 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block8 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block9 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block10 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block11 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block12 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block13 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block14 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block15 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block16 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block17 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block18 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
self.block19 = MiddleFlowBlock(728, 728, dilate=middle_block_dilation)
# Exit Flow
self.exit = ExitFlow(exit_block_dilations)
def __call__(self, x):
x = self.bn1(self.conv1(x))
x = self.bn2(self.conv2(x)) # relu may be unnecessary
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
x = self.exit(x)
return x
| [
"mfs.coldvdgas4645@gmail.com"
] | mfs.coldvdgas4645@gmail.com |
1575b08a2c652e7cdf3d3da4db1c9005fb2a2b5b | 3da6b8a0c049a403374e787149d9523012a1f0fc | /Coder_Old/几个好玩有趣的Python入门实例/简单统计/main.py | 36fcf153c6ae58736a502713a8e34905eff3b104 | [] | no_license | AndersonHJB/PyCharm_Coder | d65250d943e84b523f022f65ef74b13e7c5bc348 | 32f2866f68cc3a391795247d6aba69a7156e6196 | refs/heads/master | 2022-07-25T11:43:58.057376 | 2021-08-03T02:50:01 | 2021-08-03T02:50:01 | 348,922,058 | 3 | 3 | null | 2021-09-05T02:20:10 | 2021-03-18T02:57:16 | Python | UTF-8 | Python | false | false | 1,270 | py | # 输入一组数据,计算均值,方差,中位数,绝对相对误差。
# -*- coding: utf-8 -*-
# 输入数据
def getNum():
nums = []
iNumStr = input('please input a sequence of numbers (enter to exit): ')
while iNumStr != '':
nums.append(eval(iNumStr))
iNumStr = input('please input a sequence of numbers (enter to exit): ')
return nums
# 平均数
def average(numbers):
return sum(numbers) / len(numbers)
# 标准差
def dev(numbers, average):
sdev = 0.0
for num in numbers:
sdev += (num - average) ** 2
return pow(sdev / len(numbers), 0.5)
# 中位数
def median(numbers):
sorted(numbers)
size = len(numbers)
if size % 2 == 0:
return (numbers[size // 2 - 1] + numbers[size // 2]) / 2
else:
return numbers[size // 2]
# 绝对与相对误差
def rel_dev(numbers, average):
_max = max(abs(max(numbers) - average), abs(min(numbers) - average))
return _max, _max / average
def main():
nums = getNum()
if len(nums) == 0:
print('no data')
else:
ave = average(nums)
devs = rel_dev(nums, ave)
print('和:{:.4f},平均数:{:.4f},中位数:{:.4f},方差:{:.4f},绝对误差:{:4f},相对误差:{:.4f}' \
.format(sum(nums), ave, median(nums), dev(nums, ave), devs[0], devs[1]))
if __name__ == '__main__':
main()
| [
"1432803776@qq.com"
] | 1432803776@qq.com |
05c708c3a242b5a6ac1b32a631a8455d98419b06 | 36a8ac888405e310c551713de49b6ceee9416257 | /automate_boring_stuff/isPhoneNumber.py | 9458cb2429c012943bd7d9b8c70fb2ed45f05c60 | [] | no_license | gonkun/programming_practices | 8a0615e96714565a347973aeba813d4c79814c46 | 41757e9e04de7c6c6e1e1e408072800d1f5eb312 | refs/heads/master | 2023-02-23T07:39:52.024638 | 2022-04-08T10:39:41 | 2022-04-08T10:39:41 | 149,608,231 | 0 | 0 | null | 2023-02-16T00:59:09 | 2018-09-20T12:47:30 | Python | UTF-8 | Python | false | false | 661 | py | def isPhoneNumber(text):
if len(text) != 12:
return False
for i in range(0, 3):
if not text[i].isdecimal():
return False
if text[3] != "-":
return False
for i in range(4, 7):
if not text[i].isdecimal():
return False
if text[7] != "-":
return False
for i in range(8, 12):
if not text[i].isdecimal():
return False
return True
message = "Call me at 415-555-1011 tomorrow. 415-555-9999 is my office."
for i in range(len(message)):
chunk = message[i: i + 12]
if isPhoneNumber(chunk):
print("Phone number found: " + chunk)
print("Done")
| [
"gonzalo.sanchter@gmail.com"
] | gonzalo.sanchter@gmail.com |
7045ae2111e975c1900c7f15ec0532dbbf283c3d | 9a076ee891aa04dd1522662838dda63ad554e835 | /manage.py | 6c345e52c5342bfb1e480ee19abe93787dd7e988 | [
"MIT"
] | permissive | Albert-Byrone/Pitches | fc018b3f46ea325456212154f27426c7d18ef435 | d9ae032ff0a00b135d03404477e07a8405247b5e | refs/heads/master | 2022-10-16T14:46:38.758963 | 2019-10-22T11:53:35 | 2019-10-22T11:53:35 | 216,051,524 | 0 | 1 | MIT | 2022-09-16T18:11:23 | 2019-10-18T15:12:54 | Python | UTF-8 | Python | false | false | 619 | py | from flask_script import Manager, Server
from flask_migrate import Migrate, MigrateCommand
from app import create_app,db
from app.models import User
app = create_app('production')
manager = Manager(app)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
manager.add_command('server',Server(use_debugger=True))
@manager.shell
def make_shell_context():
return dict(app = app,db = db,User = User)
@manager.command
def test():
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == "__main__":
manager.run()
| [
"albertbyrone1677@gmail.com"
] | albertbyrone1677@gmail.com |
179f582b2c0b1b3db4ccb771da4908bdf2846481 | 2d93e25b8b7efc7ea57cb6421894fc81b31e5c8f | /devel/lib/python2.7/dist-packages/bitcraze_lps_estimator/msg/__init__.py | 54dbe5073cc27273c6646ff5533f8a96774c0fd1 | [] | no_license | Supredan/UINS | 5b63a33f0a143d5138cd2e59883e5820b0b23076 | 7a70d31944a89b00bbf302dd1f318ab93f8134dc | refs/heads/master | 2020-07-23T21:39:46.500920 | 2019-09-11T03:23:34 | 2019-09-11T03:23:34 | 207,712,812 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | from ._RangeArray import *
| [
"961401327@qq.com"
] | 961401327@qq.com |
9d42895a3f6bc02128cf335966e69e08e46e0ba6 | 1863049b98d36e50fcd95bae5645675d900c5da1 | /10.ES/ES Find Top.py | b7db3546c45fc0902c5c7e2c0ba0499445234381 | [] | no_license | Nh-touch/TensorFlowPractice | 0928bd17a87df6d9bd35dea77a28ba557f764255 | 2fb965a10a6a6019b4799e3f8df30ff1ea8f5d60 | refs/heads/master | 2018-08-25T17:50:14.611371 | 2018-07-08T01:29:30 | 2018-07-08T01:29:30 | 117,306,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,895 | py | import numpy as np
import matplotlib.pyplot as plt
# 利用ES算法,计算函数f(x) = sin(10x) * x + cos(2x)*x 在区间[0, 5]之间最大值点
DNA_SIZE = 1 # DNA长度
POP_SIZE = 100 # 种群大小
NUM_KIDS = 50 # 每次繁衍的后代数量
NUM_GENERATIONS = 200 # 主循环次数
DNA_BOUND = [0, 5] # 自变量区间
BIRTH_RATE = 0.6 # 交叉配对率
VARIATION_RATE = 0.01 # 变异率
# 函数表达式
def F(x):
return np.sin(10 * x) * x + np.cos(2 * x) * x
class ES(object):
# 默认函数
def __init__(self, dna_size, pop_size, birth_rate, variation_rate, dna_bound, num_kids):
self._dna_size = dna_size
self._pop_size = pop_size
self._birth_rate = birth_rate
self._variation_rate = variation_rate
self._dna_bound = dna_bound
self._num_kids = num_kids
# 生成目标DNA
# 生成随机种群
self._pop = dict(dna = self._dna_bound[1] * np.random.rand(1, self._dna_size).repeat(self._pop_size, axis=0),
viriation_strength = np.random.rand(self._pop_size, self._dna_size))
# 生成绘图:
plt.ion()
x = np.linspace(*self._dna_bound, 200)
plt.plot(x, F(x))
def __del__(self):
plt.ioff()
plt.show()
# 内部函数
# 翻译DNA
def _translate_dna(self, pop):
return F(pop)
# 获取适应度
def _get_fitness(self, product):
return product.flatten()
# 交叉配对
def _generation(self):
kids = {'dna': np.empty((self._num_kids, self._dna_size))}
kids['viriation_strength'] = np.empty_like(kids['dna'])
for kv, ks in zip(kids['dna'], kids['viriation_strength']):
p1, p2 = np.random.choice(np.arange(self._pop_size), size=2, replace=False)
cp = np.random.randint(0, 2, self._dna_size, dtype = np.bool)
kv[cp] = self._pop['dna'][p1, cp]
kv[~cp] = self._pop['dna'][p2, ~cp]
ks[cp] = self._pop['viriation_strength'][p1, cp]
ks[~cp] = self._pop['viriation_strength'][p2, ~cp]
return kids
# 变异
def _variation(self, kids):
for kv, ks in zip(kids['dna'], kids['viriation_strength']):
ks[:] = np.maximum(ks + (np.random.rand(*ks.shape)-0.5), 0.)
kv += ks * np.random.randn(*kv.shape)
kv[:] = np.clip(kv, *self._dna_bound)
# 自然选择
def _select(self, kids):
for key in ['dna', 'viriation_strength']:
self._pop[key] = np.vstack((self._pop[key], kids[key]))
fitness = self._get_fitness(F(self._pop['dna']))
idx = np.arange(self._pop['dna'].shape[0])
good_idx = idx[fitness.argsort()][-self._pop_size:]
for key in ['dna', 'viriation_strength']:
self._pop[key] = self._pop[key][good_idx]
# 外部接口
def evolve(self):
kids = self._generation()
self._variation(kids)
self._select(kids)
# dump
def dump(self):
pass
if __name__ == '__main__':
es = ES(dna_size = DNA_SIZE
, pop_size = POP_SIZE
, birth_rate = BIRTH_RATE
, variation_rate = VARIATION_RATE
, dna_bound = DNA_BOUND
, num_kids = NUM_KIDS)
for generation in range(NUM_GENERATIONS):
es.evolve()
if 'sca' in globals():
sca.remove()
sca = plt.scatter(es._pop['dna']
, F(es._pop['dna'])
, s = 200
, lw = 0
, c = 'red'
, alpha = 0.5)
plt.pause(0.05)
#es.dump()
| [
"1669159448@qq.com"
] | 1669159448@qq.com |
7b045879714b0543348f9c2c3e0f6e3debfef07e | d9ffdfc842fd3aef47194132828bdca8e7d97a35 | /state_categorizer/experiment_parsel3.py | 546a70f5e3b72adbda3066470d316981711a3239 | [] | no_license | pollo/categorizers | 98d29192129a932efe257b32bbad7d6079fa7b72 | 018cf1e6884ea6c23386c5539c0a9418feadffcf | refs/heads/master | 2016-09-05T19:13:14.486091 | 2014-07-29T13:19:53 | 2014-07-29T13:19:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | from experiment_parsel import ExperimentParamsSelectionBase
from sys import argv
import os.path
"""
In this experiment the features extracted will be the 3 components and the module of velocity and accelleration of each point. The svm will be used with gaussian kernel instead of linear.
"""
class Experiment(ExperimentParamsSelectionBase):
@property
def KERNEL_TYPE(self):
return "rbf"
@property
def FEATURES_PER_SAMPLE(self):
return self.WINDOW_SIZE*8
@property
def WINDOW_SIZE(self):
return 11
def _extract_features(self, points):
features = []
for point in points:
try:
features.append(float(point['categorizers']['vel']))
features.append(float(point['categorizers']['velx']))
features.append(float(point['categorizers']['vely']))
features.append(float(point['categorizers']['velz']))
features.append(float(point['categorizers']['acc']))
features.append(float(point['categorizers']['accx']))
features.append(float(point['categorizers']['accy']))
features.append(float(point['categorizers']['accz']))
except KeyError as e:
print e
return []
return features
if __name__ == '__main__':
auth_ids = [32, 51]
experiment_name = os.path.splitext(os.path.basename(argv[0]))[0]
try:
subsampling = float(argv[1])
experiment_name += "_s"+str(subsampling)
except (ValueError, IndexError) as e:
subsampling = 1.0
Experiment().run(auth_ids, experiment_name,
subsampling)
| [
"pollo1_91@yahoo.it"
] | pollo1_91@yahoo.it |
285294c3c1fac8f3add68e76b2a6286d591a5183 | 504be4ce361b3c0da04beb1f0ca2441bc375d64e | /bloody/urls.py | 1e0a1f710bbcc9ca0e553855cb7159d997fe4e70 | [] | no_license | NipunGupta27/bloodowners | b7b8b625364e13fd85918449e9f2a818364a2098 | 289667b665b9483d209c45090ba67a8084a04e6b | refs/heads/master | 2020-06-01T01:07:41.733356 | 2019-09-17T19:14:30 | 2019-09-17T19:14:30 | 190,570,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | from django.urls import path
from . import views
urlpatterns=[
path('', views.BD.index),
path('sign', views.BD.sign, name="log"),
path('signup', views.BD.register, name="signup"),
path('signin', views.BD.login, name="signin"),
path('logout', views.BD.logout, name="logout"),
path('bloodsearch', views.BD.finddonor, name="bloodsearch")
]
| [
"nipungupta.2702@gmail.com"
] | nipungupta.2702@gmail.com |
62f61b10377b859a94e7cd74a9a808e563b91524 | 8103ec56c3a412f186078738966466494e65cb35 | /player.py | b4d1216727144ed14f92092f28b0df7ce9c1c2a3 | [] | no_license | ankushKun/TicTacToe | 8065cc1d64075da35aaef50e5dd3f499b5ee66e1 | 2892ce940f75efeefdf0912f5aafd08cffb7d1b2 | refs/heads/main | 2022-12-30T18:38:51.368878 | 2020-10-19T12:19:24 | 2020-10-19T12:19:24 | 305,075,511 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | #from board import Board
class Player:
def __init__(self,name:str,piece:str) -> None:
piece=piece.upper()
if not (piece=='O' or piece=='X'):
raise Exception("piece should be either 'X' or 'O'")
self.name = name
self.piece = piece
def place(self,b,x:int,y:int) -> None:
b.place(x,y,self.piece)
print(f"{self.name} >> placed an {self.piece} on {x},{y}")
b.print_board()
| [
"ankush4singh@gmail.com"
] | ankush4singh@gmail.com |
fb58e6b016ef706a0ea9fe64f5a1a25eecfcf477 | 5065377d29c8965c3d9b445b26526154d5bfbd72 | /blog/admin.py | 49c4b842918b5aad9764d056950b9bda4d124e90 | [] | no_license | chechitajr20/aplicacionblog | 095944382c46106d0275c55c55df1d4a42521701 | a10b16740877ade416a5ce6cba3b976de1d38998 | refs/heads/master | 2023-01-14T06:14:00.287576 | 2020-11-14T22:23:12 | 2020-11-14T22:23:12 | 294,600,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from django.contrib import admin
from .models import Publicacion
admin.site.register(Publicacion)
# Register your models here.
| [
"kevin17@mesoamericana.edu.gt"
] | kevin17@mesoamericana.edu.gt |
e651b351aeca8a6f33aec76c1801f3599fa076b4 | bc32158f2085ae8ebf1b3a95911fd0087fd090f2 | /importPkg/generate.py | e4282eef355774091820fc02b7c39d09601b8406 | [] | no_license | mwintersperger-tgm/prototype | 77a69558f204b1fbc012f0c877ed95e70419e745 | 9bb1da7119fd17fcf889f42a97e46420ce7a120a | refs/heads/master | 2020-04-07T16:52:08.032189 | 2019-04-07T11:44:45 | 2019-04-07T11:44:45 | 158,546,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | import json
import csv
from random import randint
true = True
false = False
def generate(generator):
"""
Generates a value based on the given generator, which has to be a string
Read the generate_readme for further information
:param generator: string
:return:
"""
if generator.startswith("name"):
stri = ""
stri += chr(randint(65, 90))
for xa in range(0, randint(5, 11)):
stri += chr(randint(97, 122))
return stri
elif generator.startswith("randchar"):
stri = ""
for xb in range(0, int(generator[8:])):
num = randint(0, 2)
if num == 2:
stri += chr(randint(48, 57))
elif num == 1:
stri += chr(randint(65, 90))
else:
stri += chr(randint(95, 122))
return stri
elif generator.startswith("randint"):
stri = "1"
for xc in range(0, int(generator[7:])):
stri += "0"
return randint(0, int(stri) - 1)
else:
return "hi"
def lel(argsin):
try:
ret = argsin['return']
except KeyError:
ret = False
with open("../resources/param.json") as file:
columns = json.load(file)
result = []
ccsv = columns['createcsv']
try:
if argsin['writecsv']:
ccsv = True
except KeyError:
pass
cjson = columns['createjson']
try:
if argsin['writejson']:
cjson = True
except KeyError:
pass
delim = columns['delimiter']
try:
if isinstance(delim, int):
delim = chr(delim)
except KeyError:
pass
try:
jsonloc = argsin['jsonname']
except KeyError:
pass
try:
csvloc = argsin['csvname']
except KeyError:
pass
try:
if argsin['lines'] > 0:
lines = argsin['lines']
except KeyError:
lines = columns['lines']
try:
param = argsin['param']
except KeyError:
param = columns['param']
if ccsv:
with open(csvloc, "a") as csvfile:
w = None
if ccsv:
csvfile.truncate(0)
w = csv.writer(csvfile, delimiter=delim)
linecount = 0
for i in range(0, lines):
newres = {}
for x in param:
newres[x['propname']] = generate(x['generator'])
pass
# result.append(newres)
if linecount == 0:
if ccsv:
w.writerow(newres.keys())
if ccsv:
w.writerow(newres.values())
if cjson or ret:
result.append(newres)
linecount += 1
else:
for i in range(0, lines):
newres = {}
for x in param:
newres[x['propname']] = generate(x['generator'])
pass
# result.append(newres)
if cjson or ret:
result.append(newres)
if cjson:
with open(jsonloc, "w") as file:
file.truncate(0)
file.write(json.dumps(result))
if ret:
return result
| [
"akramreiter@student.tgm.ac.at"
] | akramreiter@student.tgm.ac.at |
89fbee613da8412cb0e79b72e54245fc9d1ad0d5 | 12d654d2f2caa42469dacc7e331b6fa61e41bf61 | /polls/migrations/0001_initial.py | a557ab78b242348220f98d2d5155974a1d879169 | [] | no_license | makon57/redhat-tech | d618a4a9a96e89ce09b4db469adae40c3b79b139 | ed7e915fd5e63ddf9e9c30d9a14c9d0e281a7f1e | refs/heads/main | 2023-09-01T07:51:48.790823 | 2021-10-05T19:57:24 | 2021-10-05T19:57:24 | 413,950,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | # Generated by Django 3.2.7 on 2021-09-29 01:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.question')),
],
),
]
| [
"m.kong.57@gmail.com"
] | m.kong.57@gmail.com |
737e3ae44d09c3a751214f2cf1bc08fc8669bdc1 | 72478ff5ab229b87c1092a0858605e3b13f349e0 | /leveldb.py | 2e2bd5a30d01c29938bfa491bcb0567fc4e82580 | [] | no_license | jamiels/pyethdata | 514e692610ff8ae65d3be2c6c94a329559e597ed | ac28ad9a1881802423aaa74e5d9ecd7d9cb2b2d6 | refs/heads/master | 2020-04-22T08:44:51.258156 | 2019-02-12T04:15:42 | 2019-02-12T04:15:42 | 170,250,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | # Jamiel Sheikh - jamiel@chainhaus.com
# pip install plyvel
import plyvel
### Connect and pull all keys in LevelDB - Good luck getting this to work on Windows
db = plyvel.DB('/geth/chaindata/000016.ldb',create_if_missing=False)
for k,v in db:
print(k,":",v) | [
"i@jamiel.net"
] | i@jamiel.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.