blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3b8859a2e3f8c45f6ca45fa9304a5c59b2491364
|
579e08a3ec73bc1c15bcd1ec47d5dbdc19078f5d
|
/products/views.py
|
74d89b64a56f7cfe455e8b276b5f3f08231b8fdc
|
[] |
no_license
|
Code-Institute-Submissions/Top_Notch_Coffee
|
180b3c39b5eb4e9cb524638aff4f917962bfe865
|
4a0e4d27228552a9d35278a7c9b5a8a6f17e621d
|
refs/heads/master
| 2023-08-20T17:15:43.907846
| 2021-10-15T20:10:00
| 2021-10-15T20:10:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,566
|
py
|
from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.db.models.functions import Lower
from .models import Product, Category
from .forms import ProductForm
# Create your views here.
def all_products(request):
""" A view to show all products, including sorting and search queries """
products = Product.objects.all()
query = None
categories = None
sort = None
direction = None
if request.GET:
if 'sort' in request.GET:
sortkey = request.GET['sort']
sort = sortkey
if sortkey == 'name':
sortkey = 'lower_name'
products = products.annotate(lower_name=Lower('name'))
if sortkey == 'category':
sortkey = 'category__name'
if 'direction' in request.GET:
direction = request.GET['direction']
if direction == 'desc':
sortkey = f'-{sortkey}'
products = products.order_by(sortkey)
if 'category' in request.GET:
categories = request.GET['category'].split(',')
products = products.filter(category__name__in=categories)
categories = Category.objects.filter(name__in=categories)
if 'q' in request.GET:
query = request.GET['q']
if not query:
messages.error(request, "You didn't enter any search criteria!")
return redirect(reverse('products'))
queries = Q(name__icontains=query) | Q(description__icontains=query)
products = products.filter(queries)
current_sorting = f'{sort}_{direction}'
context = {
'products': products,
'search_term': query,
'current_categories': categories,
'current_sorting': current_sorting,
}
return render(request, 'products/products.html', context)
def product_detail(request, product_id):
""" A view to show individual product details """
product = get_object_or_404(Product, pk=product_id)
context = {
'product': product,
}
return render(request, 'products/product_detail.html', context)
@login_required
def add_product(request):
""" Add a product to the store """
if not request.user.is_superuser:
messages.error(request, 'Sorry, only store owners can do that.')
return redirect(reverse('home'))
if request.method == 'POST':
form = ProductForm(request.POST, request.FILES)
if form.is_valid():
product = form.save()
messages.success(request, 'Successfully added product!')
return redirect(reverse('product_detail', args=[product.id]))
else:
messages.error(request, ('Failed to add product. Please ensure the form is valid.'))
else:
form = ProductForm()
template = 'products/add_product.html'
context = {
'form': form,
}
return render(request, template, context)
@login_required
def edit_product(request, product_id):
""" Edit a product in the store """
if not request.user.is_superuser:
messages.error(request, 'Sorry, only store owners can do that.')
return redirect(reverse('home'))
product = get_object_or_404(Product, pk=product_id)
if request.method == 'POST':
form = ProductForm(request.POST, request.FILES, instance=product)
if form.is_valid():
form.save()
messages.success(request, 'Successfully updated product!')
return redirect(reverse('product_detail', args=[product.id]))
else:
messages.error(request, 'Failed to update product. Please ensure the form is valid.')
else:
form = ProductForm(instance=product)
messages.info(request, f'You are editing {product.name}')
template = 'products/edit_product.html'
context = {
'form': form,
'product': product,
}
return render(request, template, context)
@login_required
def delete_product(request, product_id):
""" Delete a product from the store """
if not request.user.is_superuser:
messages.error(request, 'Sorry, only store owners can do that.')
return redirect(reverse('home'))
product = get_object_or_404(Product, pk=product_id)
product.delete()
messages.success(request, 'Product deleted!')
return redirect(reverse('products'))
|
[
"santiagoyanez@mailbox.org"
] |
santiagoyanez@mailbox.org
|
bcdfe16c3d5ef6047b39bfbfc2b3b3cb9d9213ab
|
578fc780692e50e7a94b565986957eafb01bde7f
|
/pool/client2.py
|
070dccf3ebc27a96925e28171efea5315bf0be65
|
[] |
no_license
|
peter-zsn/python-rabbitmq
|
b7a6673a7ad277e8d899a61568b3eb06417c0d47
|
0145d3b76186e02bc65c917459538bdd976ae33f
|
refs/heads/master
| 2023-03-24T20:24:47.516805
| 2021-03-15T09:36:15
| 2021-03-15T09:36:15
| 346,262,575
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
# coding: utf-8
"""
@auther: 张帅男
@data: 2021/3/12 上午11:42
@文件名字:client.py
"""
import datetime
from ramq_pool import RabbitmqPool as Pool
rabbitmq_host = "127.0.0.1"
rabbitmq_port = "5672"
rabbitmq_user = "admin"
rabbitmq_password = "123456"
rabbitmq_virtual_host = "/"
Pool = Pool(3, 20)
cert = Pool.get_certtemplate()
cert['rabbitmq_host'] = rabbitmq_host
cert['rabbitmq_virtual_host'] = rabbitmq_virtual_host
cert['rabbitmq_user'] = rabbitmq_user
cert['rabbitmq_password'] = rabbitmq_password
cert['rabbitmq_port'] = rabbitmq_port
Pool.addcert(cert)
try:
channel, cname = Pool.get_channel()
finally:
Pool.delconnection(cname)
channel, cname = Pool.get_channel()
channel.exchange_declare(exchange='pool-test', durable=True, exchange_type='fanout')
message = f'this is message id : 123'
channel.basic_publish(exchange='pool-test', routing_key='secend', body=message)
|
[
"15294627382@tbkt.cn"
] |
15294627382@tbkt.cn
|
d9ee46cca88bf07c076edb24a1ce750b997a0af3
|
8cf1b6c09f0769d147335f4fbe5ff1d61d9597ff
|
/App_Login/migrations/0001_initial.py
|
4938d2dcea8e46368fbc91a7bb4956a85088cdbe
|
[] |
no_license
|
foysal-mahmud/Blog-Project---Django
|
e1b0f448e49ec4a4a4065230d0f29595b4781e42
|
d668dc26c19e1a91588d53142a1f45fdc36ebbdd
|
refs/heads/main
| 2023-01-23T14:45:07.341693
| 2020-11-28T07:06:10
| 2020-11-28T07:06:10
| 316,401,015
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
# Generated by Django 3.1.3 on 2020-11-26 22:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user_profile', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"foysal.bu.cse28@gmail.com"
] |
foysal.bu.cse28@gmail.com
|
242d359948e9476c542299857d1470278df1e762
|
c5339971cd8b527e101b2cd36e0a4b7fc1682eef
|
/gltflib/models/sparse_indices.py
|
6a065102fc233344eade782647c48f0bb9177cc3
|
[
"MIT"
] |
permissive
|
pacovicoarjona/gltflib
|
3ac6467f32429d62151322b7e274293c2c3821ed
|
09adb42c4b597d0aae5bb54e212b5263c463976b
|
refs/heads/master
| 2022-06-09T17:25:52.323242
| 2020-05-08T07:38:17
| 2020-05-08T07:38:17
| 262,255,560
| 0
| 0
|
MIT
| 2020-05-08T07:25:32
| 2020-05-08T07:25:31
| null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from .base_model import BaseModel
@dataclass_json
@dataclass
class SparseIndices(BaseModel):
"""
Indices of those attributes that deviate from their initialization value.
Properties:
bufferView (integer) The index of the bufferView with sparse indices. Referenced bufferView can't have ARRAY_BUFFER
or ELEMENT_ARRAY_BUFFER target. (Required)
byteOffset (integer) The offset relative to the start of the bufferView in bytes. Must be aligned. (Optional)
componentType (integer) The indices data type. (Required)
extensions (object) Dictionary object with extension-specific objects. (Optional)
extras (any) Application-specific data. (Optional)
"""
bufferView: int = None
byteOffset: int = None
componentType: int = None
|
[
"serg.kr@gmail.com"
] |
serg.kr@gmail.com
|
93b5cfbb4d3dbb7a3748fe1c0312c402853cb69a
|
671f8f374ab512a5e0b2742101252b8bea266c03
|
/src/videoReader.py
|
6f288ec5219ab643d671271ec64702c81f8c6e7e
|
[] |
no_license
|
MirceaDavidEsc/placozoa_internal_coordination
|
f8c3f2abd06ddeda876bfc3a73d05923334e0e9e
|
65657e04dd810baadbda03911dd304970cfdad7c
|
refs/heads/master
| 2023-07-15T01:23:21.069337
| 2021-08-26T09:19:11
| 2021-08-26T09:19:11
| 261,339,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 27 00:28:45 2016
@author: Mircea
"""
import imageio
filename = '/tmp/file.mp4'
vid = imageio.get_reader(filename, 'ffmpeg')
|
[
"mdavidescu@protonmail.com"
] |
mdavidescu@protonmail.com
|
20f45a59812b15be7ada56605aef1b2ee5b09990
|
7f226ac5e54752573a79f5062f52ef7bf5617fa3
|
/src/app/auth/service.py
|
134a21af809439397e20d18b446b610012a161bf
|
[
"BSD-3-Clause"
] |
permissive
|
baxik88/useful
|
0fef64e54b2f979988ccea110e15fc14b64f5aeb
|
ed081ebed68cf31178484e18ca96961f74344360
|
refs/heads/master
| 2021-06-13T21:29:14.303718
| 2020-05-23T20:06:59
| 2020-05-23T20:06:59
| 254,449,660
| 1
| 0
|
BSD-3-Clause
| 2020-04-09T18:33:31
| 2020-04-09T18:33:30
| null |
UTF-8
|
Python
| false
| false
| 2,881
|
py
|
import jwt
from jwt.exceptions import InvalidTokenError
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional
from src.core import config
from src.app.base.utils.email import send_email
password_reset_jwt_subject = "preset"
def send_test_email(email_to: str):
project_name = config.PROJECT_NAME
subject = f"{project_name} - Test email"
with open(Path(config.EMAIL_TEMPLATES_DIR) / "test_email.html") as f:
template_str = f.read()
send_email(
email_to=email_to,
subject_template=subject,
html_template=template_str,
environment={"project_name": config.PROJECT_NAME, "email": email_to},
)
def send_reset_password_email(email_to: str, email: str, token: str):
project_name = config.PROJECT_NAME
subject = f"{project_name} - Password recovery for user {email}"
with open(Path(config.EMAIL_TEMPLATES_DIR) / "reset_password.html") as f:
template_str = f.read()
if hasattr(token, "decode"):
use_token = token.decode()
else:
use_token = token
server_host = config.SERVER_HOST
link = f"{server_host}/reset-password?token={use_token}"
send_email(
email_to=email_to,
subject_template=subject,
html_template=template_str,
environment={
"project_name": config.PROJECT_NAME,
"username": email,
"email": email_to,
"valid_hours": config.EMAIL_RESET_TOKEN_EXPIRE_HOURS,
"link": link,
},
)
def send_new_account_email(email_to: str, username: str, password: str):
project_name = config.PROJECT_NAME
subject = f"{project_name} - New account for user {username}"
with open(Path(config.EMAIL_TEMPLATES_DIR) / "new_account.html") as f:
template_str = f.read()
link = config.SERVER_HOST
send_email(
email_to=email_to,
subject_template=subject,
html_template=template_str,
environment={
"project_name": config.PROJECT_NAME,
"username": username,
"password": password,
"email": email_to,
"link": link,
},
)
def generate_password_reset_token(email):
delta = timedelta(hours=config.EMAIL_RESET_TOKEN_EXPIRE_HOURS)
now = datetime.utcnow()
expires = now + delta
exp = expires.timestamp()
encoded_jwt = jwt.encode(
{"exp": exp, "nbf": now, "sub": password_reset_jwt_subject, "email": email},
config.SECRET_KEY,
algorithm="HS256",
)
return encoded_jwt
def verify_password_reset_token(token) -> Optional[str]:
try:
decoded_token = jwt.decode(token, config.SECRET_KEY, algorithms=["HS256"])
assert decoded_token["sub"] == password_reset_jwt_subject
return decoded_token["email"]
except InvalidTokenError:
return None
|
[
"socanime@gmail.com"
] |
socanime@gmail.com
|
c0aa63a35bda6223f2be8b78982a9a3e7b9184ed
|
2c7eed223ab8bdeb33698d079e70fc8203d83a12
|
/Django_VeriTure/context_processors.py
|
8ab2c3c373f35abd4ce0637202376ad3f658cae6
|
[] |
no_license
|
OscarZeng/Django_VeriTure
|
a5bb375a2a44a07d31240076b8f2beeb2f9ab554
|
422ccca0f85268cfeb35b26afa1bef2224ca535b
|
refs/heads/master
| 2020-04-10T21:35:10.864329
| 2018-12-28T06:17:38
| 2018-12-28T06:17:38
| 161,300,295
| 0
| 0
| null | 2018-12-28T06:03:37
| 2018-12-11T08:15:41
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
from django.contrib.sites.models import Site
from django.conf import settings as django_settings
def pinax_apps_filter(app):
return app.startswith("pinax.") or app in ["account", "mailer"]
def package_names(names):
apps = []
for x in names:
if x.startswith("pinax."):
apps.append(x.replace(".", "-"))
if x == "account":
apps.append("django-user-accounts")
if x == "mailer":
apps.append("django-mailer")
return apps
def settings(request):
ctx = {
"ADMIN_URL": django_settings.ADMIN_URL,
"CONTACT_EMAIL": django_settings.CONTACT_EMAIL,
"pinax_notifications_installed": "pinax.notifications" in django_settings.INSTALLED_APPS,
"pinax_stripe_installed": "pinax.stripe" in django_settings.INSTALLED_APPS,
"pinax_apps": package_names(filter(pinax_apps_filter, django_settings.INSTALLED_APPS))
}
if Site._meta.installed:
site = Site.objects.get_current(request)
ctx.update({
"SITE_NAME": site.name,
"SITE_DOMAIN": site.domain
})
return ctx
|
[
"noreply@github.com"
] |
OscarZeng.noreply@github.com
|
8b877b8b35c7c47de89a72f19ca6c484341ff04a
|
60d8229c8b1aa20f0de10c3d463a746fc38db127
|
/pollster/pollster/settings.py
|
879112dba8df33d93cb1ca547639c923826d482e
|
[] |
no_license
|
Shaila-B/Django_Pollster_Project
|
90364ba7c0485fde78dc7ad846cb96b24d0c426d
|
99b6e296f4c02ea44c0ad5ca2204ce8528a0ccba
|
refs/heads/master
| 2023-01-19T16:24:30.324848
| 2020-11-25T05:10:25
| 2020-11-25T05:10:25
| 315,821,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,174
|
py
|
"""
Django settings for pollster project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fxe@p)e*&w4=)s9p4+^ez*ybix74#ei0gtg59p9bwgi533i6%m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'pages.apps.PagesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pollster.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pollster.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"35136064+BShaila@users.noreply.github.com"
] |
35136064+BShaila@users.noreply.github.com
|
fea28fdec84143876c06e246763c89abdace7ef2
|
102987872cce8d2ed9129a2d6acd9093d2545d9d
|
/four_dot/settings.py
|
3b1c16aa8c7b69ce2de187fadb02c2f68c7afc03
|
[] |
no_license
|
Praveen-S-98/Four_Dot
|
20b60faaac151e7e72678b188a985683e1b682fa
|
8e9304132c9a12a8ad5b5af75dcd80e9f577045b
|
refs/heads/main
| 2023-01-11T19:15:05.811719
| 2020-11-17T09:14:45
| 2020-11-17T09:14:45
| 313,565,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,261
|
py
|
"""
Django settings for four_dot project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
STATIC_DIR = os.path.join(BASE_DIR, 'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l0jy_epw&@i9u2!ot8yat_(t@mib%797e^8_td_tu$t^&@8%93'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'four_dot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR, ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'four_dot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
|
[
"praveenmadrass98@gmail.com"
] |
praveenmadrass98@gmail.com
|
93a6c99bd8d276350af34ccd0d974b01b5788d15
|
9d7455482bc08289493d0a5080908214263e5867
|
/oldbak/pytest/test1.py
|
a64163735c6485b074a3659c76e92861c7512201
|
[] |
no_license
|
ld911/code
|
7ffa8339b4bb421f549984113d8795e2e86c458b
|
0c79013171fef4582c02d92819b2d77b128d978d
|
refs/heads/master
| 2021-09-14T23:56:20.263361
| 2021-08-13T14:47:34
| 2021-08-13T14:47:34
| 242,180,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,772
|
py
|
# Generated by Selenium IDE
import time
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
class TestTest():
def setup_method(self, method):
self.driver = webdriver.Chrome()
self.vars = {}
def teardown_method(self, method):
self.driver.quit()
def wait_for_window(self, timeout=2):
time.sleep(round(timeout / 1000))
wh_now = self.driver.window_handles
wh_then = self.vars["window_handles"]
if len(wh_now) > len(wh_then):
return set(wh_now).difference(set(wh_then)).pop()
def test_test(self):
self.driver.get("https://www.hongxiu.com/book/12115534503935401")
self.driver.set_window_size(1366, 768)
self.driver.execute_script("window.scrollTo(0,100)")
self.driver.find_element(By.ID, "j-closeGuide").click()
self.driver.find_element(By.ID, "readBtn").click()
self.driver.find_element(By.ID, "j-closeGuide").click()
element = self.driver.find_element(By.CSS_SELECTOR, "#j_navCatalogBtn i")
actions = ActionChains(self.driver)
actions.move_to_element(element).perform()
self.driver.find_element(By.CSS_SELECTOR, "#j_navCatalogBtn i").click()
element = self.driver.find_element(By.CSS_SELECTOR, "body")
actions = ActionChains(self.driver)
actions.move_to_element(element, 0, 0).perform()
self.vars["window_handles"] = self.driver.window_handles
self.driver.find_element(By.LINK_TEXT, "目录").click()
self.vars["win2628"] = self.wait_for_window(2000)
self.driver.switch_to.window(self.vars["win2628"])
self.driver.find_element(By.ID, "readBtn").click()
|
[
"f@tshb.w10"
] |
f@tshb.w10
|
e18d3a656a6c89f572a6a3571116fc5d1a013e79
|
ba898c119d7c9876719bdde67037b98d31620616
|
/ex10ec3.py
|
5f3e73725b3e529586387f8046934648206dba00
|
[] |
no_license
|
adamomfg/lpthw
|
71db13424c0cb8b29dccb8eba8e12818dee00028
|
a3485752b9d32cb357ea076b45a4a24f51212a3b
|
refs/heads/master
| 2021-01-18T19:18:50.772186
| 2012-05-11T03:05:30
| 2012-05-11T03:05:30
| 3,345,544
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
#!/usr/bin/python
tabby_cat = "\tI'm tabbed in."
persian_cat = "I'm split\non a line."
backslash_cat = "I'm \\ a \\ cat."
fat_cat = '''
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
'''
print tabby_cat
print persian_cat
print backslash_cat
print fat_cat
stupid = "cat"
horrible = "\t\t\tI'm a triple tabby %s." % stupid
print horrible
amount = 10
lard_ass_cat_wants = 15
fatter_cat = """
Here's what your dumb cat wants:
"""
print fatter_cat
print "%s wants %r fish and %d fishies." % (stupid, amount, lard_ass_cat_wants)
|
[
"adampsyche@gmail.com"
] |
adampsyche@gmail.com
|
8002c1abe3d5a561505d0561888fd5fc223a6221
|
cb46a9c0b79aae33943b92878aa7eaab961c5815
|
/src/kcf/track.py
|
4e26081fbf2a73e57277d6722e8026a5d511d598
|
[] |
no_license
|
heyanbo1996/gmm_sort_track
|
1ea2d9cfe40c1fe823e91dbf66f5a0f1571d0d5a
|
0283fc44d7dea0b8c689da0b794fada7b8b775d4
|
refs/heads/master
| 2023-04-28T02:48:41.050769
| 2021-05-21T15:15:19
| 2021-05-21T15:15:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,382
|
py
|
import numpy as np
from kcf_tracker import KCFTracker
# 将 bbox 由 [x1,y1,x2,y2] 形式转为 [框中心点 x, 框中心点 y, 框面积 s, 宽高比例 r].T
def convert_bbox_to_z(bbox):
"""
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
the aspect ratio
"""
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w / 2.
y = bbox[1] + h / 2.
s = w * h # scale is just area
r = w / float(h)
# 将数组 [x,y,s,r] 转为 4 行 1 列形式,即 [x,y,s,r].T
return np.array([x, y, s, r]).reshape((4, 1))
# 将 bbox 由 [x,y,s,r] 形式转为 [x1,y1,x2,y2] 形式
def convert_x_to_bbox(x, score=None):
"""
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
"""
w = np.sqrt(x[2] * x[3])
h = x[2] / w
if score is None:
return np.array([x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2.]).reshape((1, 4))
else:
return np.array([x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2., score]).reshape((1, 5))
class TrackState:
Tentative = 1
Confirmed = 2
Deleted = 3
class Track(object):
"""
This class represents the internal state of individual tracked objects observed as bbox.
"""
count = 0
def __init__(self, frame_count, bbox, mean, covariance, track_id, n_init=30, max_age=30, cn=True, hog=True, fixed_window=True, multi_scale=True, peak_threshold=0.4):
"""
Initialises a tracker using initial bounding box.
使用初始化边界框初始化跟踪器
"""
# define constant velocity model
# 定义匀速模型,状态变量是 8 维,观测值是 4 维,按照需要的维度构建目标
self.kcf = KCFTracker(cn, hog, fixed_window, multi_scale, peak_threshold)
self.mean = mean
self.covariance = covariance
self.track_id = track_id
self.state = TrackState.Tentative
self.hits = 1
self.time_since_update = 0
self.path = []
self.path.append([frame_count, bbox[0], bbox[1], bbox[2], bbox[3]])
self.n_init = n_init
self.max_age = max_age
# 在计算 mot 指标的时候使用
self.print_path = False
def init_kcf(self, frame, tlwh):
self.kcf.init(tlwh, frame)
def update_kcf(self, frame):
return self.kcf.update(frame)
def retrain_kcf(self, frame, roi):
self.kcf.retrain(frame, roi)
def update(self, frame, frame_count, kf, tlwh, tlbr, bbox):
"""
Updates the state vector with observed bbox.
"""
self.kcf.retrain(frame, tlwh)
self.mean, self.covariance = kf.update(self.mean, self.covariance, bbox)
self.hits += 1
self.time_since_update = 0
self.path.append([frame_count, tlbr[0], tlbr[1], tlbr[2], tlbr[3]])
if self.state == TrackState.Tentative and self.hits >= self.n_init:
self.state = TrackState.Confirmed
def predict(self, kf):
"""
Advances the state vector and returns the predicted bounding box estimate.
"""
self.mean, self.covariance = kf.predict(self.mean, self.covariance)
self.time_since_update += 1
# 返回跟踪器的状态 [top_x, top_y, bottom_x, bottom_y]
def to_tlbr(self):
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
ret[2:] = ret[:2] + ret[2:]
return ret
def mark_missed(self):
if self.state == TrackState.Tentative:
self.state = TrackState.Deleted
elif self.time_since_update > self.max_age:
self.state = TrackState.Deleted
def is_tentative(self):
return self.state == TrackState.Tentative
def is_confirmed(self):
return self.state == TrackState.Confirmed
def is_deleted(self):
return self.state == TrackState.Deleted
def downward(self):
if len(self.path) >= self.n_init:
for i in range(len(self.path) - 1, len(self.path) - self.n_init + 1, -1):
if self.path[i][4] - self.path[i - 1][4] < 10:
return False
return True
return False
|
[
"xuweilin2014@outlook.com"
] |
xuweilin2014@outlook.com
|
ab123a0906bcbf93c6e6b0a05e50506f88cf171b
|
9b614a562b9472cc4abaad1d75a9afef982fd1ed
|
/blog/admin.py
|
c94cc4ec4906bc749785f83d6e924a450337aeed
|
[] |
no_license
|
Navayuvan-SB/Mini-Blog-App
|
bbca589474c358e91468597946d58757be59f2dc
|
1148160570cd50445e567a8b165f7c071464a18a
|
refs/heads/master
| 2023-04-14T13:33:08.674879
| 2021-04-15T04:27:09
| 2021-04-15T04:27:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
from django.contrib import admin
from .models import Author, Blog, Content, Comment
class ContentInline(admin.TabularInline):
model = Content
extra = 2
class CommentInline(admin.TabularInline):
model = Comment
extra = 0
@admin.register(Blog)
class BlogAdmin(admin.ModelAdmin):
list_display = ('title', 'blogger', 'post_date')
inlines = [ContentInline, CommentInline]
@admin.register(Author)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('user', 'date_of_birth')
|
[
"sb.navayuvan@gmail.com"
] |
sb.navayuvan@gmail.com
|
2fe1e5105250cdb1c1f198ff4f61fb1dd28cb376
|
faeba40d233277d67c404d05e5cc82c8930e8447
|
/auction/models.py
|
0a98b865b9369a23b753caad09702e2176627fe1
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
chi-ng/the-weather-app
|
8cf5f144ce55fd3b35c2ed439e5c83a776821b69
|
368a0a3f86ad1bf4fcf05c8b500f82809239c442
|
refs/heads/master
| 2023-01-02T22:23:12.280072
| 2020-10-28T15:51:55
| 2020-10-28T15:51:55
| 305,204,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,887
|
py
|
from django.db import models
import datetime
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser
)
from decimal import Decimal
class MyUserManager(BaseUserManager):
def create_user(self, email, date_of_birth, password=None):
"""
Creates and saves a User with the given email, date of
birth and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
date_of_birth=date_of_birth,
)
def create_superuser(self, email, date_of_birth, password):
user = self.create_user(email,
password=password,
date_of_birth=date_of_birth,
)
user.is_staff = True
user.save(using=self._db)
return user
class MyUser(AbstractBaseUser):
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
date_of_birth = models.DateField()
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
objects = MyUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
def get_full_name(self):
# The user is identified by their email address
return self.email
def get_short_name(self):
# The user is identified by their email address
return self.email
def __str__(self): # __unicode__ on Python 2
return self.email
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
class item(models.Model):
title = models.CharField(max_length=50, default='')
description = models.TextField(default='')
enddate = models.DateField()
picture = models.ImageField()
available = models.BooleanField()
MyUser = models.ForeignKey(MyUser, on_delete=models.CASCADE)
def __str__(self):
return self.bid_id
class bid(models.Model):
price = models.DecimalField(decimal_places=2, max_digits=6, default=Decimal(0))
item = models.ForeignKey(item, on_delete=models.CASCADE)
MyUser = models.ForeignKey(MyUser, on_delete=models.CASCADE)
def __str__(self):
return self.bid_id
|
[
"tri.nguyen@se17.qmul.ac.uk"
] |
tri.nguyen@se17.qmul.ac.uk
|
59b7d6e5674d01e2fac7d6d57309c839f771eb14
|
40609c292f020a47c021181660e003180867e5ef
|
/torreto/asgi.py
|
51cc4876e046a1c48c8396d1a5d20f74b6c7fd7b
|
[] |
no_license
|
ZhibekSolp/toretto
|
4149ce3a4154dfebf5d391da313c3488450875e8
|
393171f7870dc76ccea06c6564879ebc17256d86
|
refs/heads/master
| 2023-07-18T18:52:07.427930
| 2021-09-14T16:03:01
| 2021-09-14T16:03:01
| 406,434,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
ASGI config for torreto project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'torreto.settings')
application = get_asgi_application()
|
[
"ZhibekSolp.solpieva.zhibek@gmail.com"
] |
ZhibekSolp.solpieva.zhibek@gmail.com
|
a9952070651bd31495303c9d163906c1619dd208
|
458c7d8a560658fcc61a629bc6397bf515717b61
|
/catkin_ws/build/turtlesim_cleaner/catkin_generated/pkg.installspace.context.pc.py
|
b3de5f6cbdc7d20d81366513873aa5840f200e31
|
[] |
no_license
|
aboughatane/Tb3_with_OM
|
52e6ee855e4b2b773289c4c9ea9684b08206aa24
|
8ee25ff9a6ce2ad770471baf1f51710e6848ebf0
|
refs/heads/main
| 2023-03-15T15:19:31.296028
| 2020-12-20T23:35:55
| 2020-12-20T23:35:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlesim_cleaner"
PROJECT_SPACE_DIR = "/home/diakhaby/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"thiernodd@gmail.com"
] |
thiernodd@gmail.com
|
a21ddc661c01e54ef6d999a6ce489ce13c7ddaef
|
8a50d4349533c4bfb2adb9b2760c147b180ee658
|
/software/experimenting/openGLShaders.py
|
38c8436afb3bd91494d188b33e3a5b33f1262bb5
|
[] |
no_license
|
Bellspringsteen/blocks
|
4efe86ae2cf16df45e8af3e7f64697b159d708b7
|
e5b8257d5909134ab164a333938634827d1a47a0
|
refs/heads/master
| 2016-09-06T03:53:44.155679
| 2015-06-25T03:50:56
| 2015-06-25T03:50:56
| 38,026,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
from OpenGLContext import testingcontext
BaseContext = testingcontext.getInteractive()
from OpenGL.GL import *
from OpenGL.arrays import vbo
from OpenGLContext.arrays import *
from OpenGL.GL import shaders
class TestContext( BaseContext ):
"""Creates a simple vertex shader..."""
def OnInit( self ):
VERTEX_SHADER = shaders.compileShader("""#version 320
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}""", GL_VERTEX_SHADER)
FRAGMENT_SHADER = shaders.compileShader("""#version 320
void main() {
gl_FragColor = vec4( 0, 1, 0, 1 );
}""", GL_FRAGMENT_SHADER)
self.shader = shaders.compileProgram(VERTEX_SHADER,FRAGMENT_SHADER)
self.vbo = vbo.VBO(
array( [
[ 0, 1, 0 ],
[ -1,-1, 0 ],
[ 1,-1, 0 ],
[ 2,-1, 0 ],
[ 4,-1, 0 ],
[ 4, 1, 0 ],
[ 2,-1, 0 ],
[ 4, 1, 0 ],
[ 2, 1, 0 ],
],'f')
)
def Render( self, mode):
"""Render the geometry for the scene."""
shaders.glUseProgram(self.shader)
try:
self.vbo.bind()
try:
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointerf( self.vbo )
glDrawArrays(GL_TRIANGLES, 0, 9)
finally:
self.vbo.unbind()
glDisableClientState(GL_VERTEX_ARRAY);
finally:
shaders.glUseProgram( 0 )
if __name__ == "__main__":
TestContext.ContextMainLoop()
|
[
"Alex.Morgan.Bell@gmail.com"
] |
Alex.Morgan.Bell@gmail.com
|
ea92bfa37b0a7cf7dfb14ebe3db3b8bc9df3ce1d
|
c69422b3b24928cb7f1bec375edf0edbb0eca791
|
/setssh_model/setssh/admin_urls.py
|
6f043ceb62a5755fc7fe14401b0d1ed572fc6eb8
|
[] |
no_license
|
pcr/reviewboard_git_hooks
|
aff0101167fa343790a6235c8b50327db4abdce5
|
2e38cf3303f6699fb2653c492169da213059d719
|
refs/heads/master
| 2016-09-10T23:10:54.240772
| 2014-02-20T02:47:54
| 2014-02-20T02:47:54
| 15,449,656
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
from django.conf.urls import patterns, url
patterns('setssh.views',
url(r'^$', 'configure'),
)
|
[
"zhangqingyong@kingsoft.com"
] |
zhangqingyong@kingsoft.com
|
cb38fa4c2c044dfe085f214502a52fdb44f02eca
|
b182a3407b56c14b830b6ff3a543ba29d5996f84
|
/beartype_test/a00_unit/a10_pep/test_pep484.py
|
355c85aa7398f512c083c606de8ce63f813d32bc
|
[
"MIT"
] |
permissive
|
yamgent/beartype
|
9d1899a6e6dacd1dd74652a81a2c1f275b1fd775
|
afaaa0d8c25f8e5c06dd093982787b794ee48f2d
|
refs/heads/main
| 2023-03-19T18:27:44.326772
| 2021-03-08T06:20:57
| 2021-03-08T06:26:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,607
|
py
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype decorator PEP 484-compliant type hint unit tests.**
This submodule unit tests the :func:`beartype.beartype` decorator with respect
to **PEP 484-compliant type hints** (i.e., :mod:`beartype`-agnostic annotations
specifically compliant with `PEP 484`_).
See Also
----------
:mod:`beartype_test.a00_unit.decor.code.test_code_pep`
Submodule generically unit testing PEP-compliant type hints.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype_test.util.pyterror import raises_uncached
from typing import NoReturn, Union, no_type_check
# ....................{ TESTS ~ decor : no_type_check }....................
def test_pep484_decor_no_type_check() -> None:
'''
Test the :func:`beartype.beartype` decorator against all edge cases of the
`PEP 484`_-compliant :attr:`typing.no_type_check` decorator.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484
'''
# Defer heavyweight imports.
from beartype import beartype
# Callable decorated by @typing.no_type_check whose otherwise PEP-compliant
# type hints *SHOULD* be subsequently ignored by @beartype.
@no_type_check
def of_beechen_green(and_shadows_numberless: Union[int, str]) -> str:
return and_shadows_numberless
# The same callable additionally decorated by @beartype.
of_beechen_green_beartyped = beartype(of_beechen_green)
# Assert these two callables to be the same, implying @beartype silently
# reduced to a noop by returning this callable undecorated.
assert of_beechen_green is of_beechen_green_beartyped
# ....................{ TESTS ~ hint : noreturn }....................
def test_pep484_hint_noreturn() -> None:
'''
Test the :func:`beartype.beartype` decorator against all edge cases of the
`PEP 484`_-compliant :attr:`typing.NoReturn` type hint, which is
contextually permissible *only* as an unsubscripted return annotation.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484
'''
# Defer heavyweight imports.
from beartype import beartype
from beartype.roar import (
BeartypeCallHintPepException,
BeartypeDecorHintPep484Exception,
)
# Exception guaranteed to be raised *ONLY* by the mending_wall() function.
class BeforeIBuiltAWallIdAskToKnow(Exception): pass
# Callable unconditionally raising an exception correctly annotating its
# return as "NoReturn".
@beartype
def mending_wall() -> NoReturn:
raise BeforeIBuiltAWallIdAskToKnow(
"Something there is that doesn't love a wall,")
# Assert this callable raises the expected exception when called.
with raises_uncached(BeforeIBuiltAWallIdAskToKnow):
mending_wall()
# Callable explicitly returning a value incorrectly annotating its return
# as "NoReturn".
@beartype
def frozen_ground_swell() -> NoReturn:
return 'That sends the frozen-ground-swell under it,'
# Assert this callable raises the expected exception when called.
with raises_uncached(BeartypeCallHintPepException):
frozen_ground_swell()
# Callable implicitly returning a value incorrectly annotating its return
# as "NoReturn".
@beartype
def we_do_not_need_the_wall() -> NoReturn:
'There where it is we do not need the wall:'
# Assert this callable raises the expected exception when called.
with raises_uncached(BeartypeCallHintPepException):
we_do_not_need_the_wall()
# Assert this decorator raises the expected exception when decorating a
# callable returning a value incorrectly annotating its return as
# "NoReturn".
with raises_uncached(BeartypeDecorHintPep484Exception):
# Callable returning a value incorrectly annotating a parameter as
# "NoReturn".
@beartype
def upper_boulders(spills: NoReturn):
return 'And spills the upper boulders in the sun;'
# Assert this decorator raises the expected exception when decorating a
# callable returning a value annotating a parameter as a supported PEP
# 484-compliant type hint incorrectly subscripted by "NoReturn".
with raises_uncached(BeartypeDecorHintPep484Exception):
@beartype
def makes_gaps(abreast: Union[str, NoReturn]):
return 'And makes gaps even two can pass abreast.'
# ....................{ TESTS ~ hint : sequence }....................
def test_pep484_hint_sequence_standard_cached() -> None:
'''
Test that a `subtle issue <issue #5_>`__ of the :func:`beartype.beartype`
decorator with respect to metadata describing **PEP-compliant standard
sequence hints** (e.g., :attr:`typing.List`) cached via memoization across
calls to that decorator has been resolved and *not* regressed.
Note that the more general-purpose :func:`test_p484` test *should* already
exercise this issue, but that this issue was sufficiently dire to warrant
special-purposed testing exercising this exact issue.
.. _issue #5:
https://github.com/beartype/beartype/issues/5
'''
# Defer heavyweight imports.
from beartype import beartype
# Callable annotated by an arbitrary PEP 484 standard sequence type hint.
@beartype
def fern_hill(prince_of_the_apple_towns: Union[int, str]) -> str:
return prince_of_the_apple_towns
# A different callable annotated by the same hint and another arbitrary
# non-"typing" type hint.
@beartype
def apple_boughs(
famous_among_the_barns: Union[int, str],
first_spinning_place: str
) -> str:
return famous_among_the_barns + first_spinning_place
# Validate that these callables behave as expected.
assert fern_hill(
'Now as I was young and easy under the apple boughs'
'About the lilting house and happy as the grass was green,'
' The night above the dingle starry,'
' Time let me hail and climb'
' Golden in the heydays of his eyes,'
'And honoured among wagons I was prince of the apple towns'
'And once below a time I lordly had the trees and leaves'
' Trail with daisies and barley'
' Down the rivers of the windfall light. '
).startswith('Now as I was young and easy under the apple boughs')
assert apple_boughs((
'And as I was green and carefree, famous among the barns'
'About the happy yard and singing as the farm was home,'
' In the sun that is young once only,'
' Time let me play and be'
' Golden in the mercy of his means,'
'And green and golden I was huntsman and herdsman, the calves'
'Sang to my horn, the foxes on the hills barked clear and cold,'
' And the sabbath rang slowly'
' In the pebbles of the holy streams.'
), 'All the sun long it was running, it was lovely, the hay').startswith(
'And as I was green and carefree, famous among the barns')
# ....................{ TESTS ~ hint : invalid }....................
def test_pep484_hint_invalid_types_nongeneric() -> None:
'''
Test the :func:`beartype.beartype` decorator against **invalid non-generic
classes** (i.e., classes declared by the :mod:`typing` module used to
instantiate PEP-compliant type hints but themselves invalid as
PEP-compliant type hints).
'''
# Defer heavyweight imports.
from beartype import beartype
from beartype.roar import BeartypeDecorHintPepSignException
from beartype_test.a00_unit.data.hint.pep.data_hintpep import (
HINTS_PEP_INVALID_CLASS_NONGENERIC)
# Assert that decorating a callable annotated by a non-generic class raises
# the expected exception.
for type_nongeneric in HINTS_PEP_INVALID_CLASS_NONGENERIC:
with raises_uncached(BeartypeDecorHintPepSignException):
@beartype
def childe_roland(to_the_dark_tower_came: type_nongeneric) -> (
type_nongeneric):
raise to_the_dark_tower_came
|
[
"leycec@gmail.com"
] |
leycec@gmail.com
|
d272b9365344ccaa4c50d031bef1c9c2cf416081
|
db578636f604f3529c111e3bfe6c9bd49c87bf47
|
/tests/logout_test.py
|
a2815d2f62f8cea69302b0c41a0d699fdfa5f281
|
[
"MIT"
] |
permissive
|
pcc426/Betty
|
162153e230072b9d53ca451b8090d06a8818fa6a
|
595bcd2f58d72b67e6d2cc7e7771f8f1ee7f0fd6
|
refs/heads/master
| 2021-01-13T08:23:06.740670
| 2017-06-28T07:56:03
| 2017-06-28T07:56:03
| 69,548,495
| 0
| 0
| null | 2017-06-28T07:56:04
| 2016-09-29T08:42:17
|
Python
|
UTF-8
|
Python
| false
| false
| 609
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Pcc on 6/28/17
import sys
import time
import unittest
from appium import webdriver
from wheel.signatures import assertTrue
import DriverConfig
class MyTestCase(unittest.TestCase):
"""Here comes comments"""
def setUp(self):
pass
def test_logout(self):
# 跳过广告
time.sleep(10)
# 第二种方法使用缓慢拖动swipe来拖动屏幕,duration表示持续时间
self.driver.swipe(start_x=0, start_y=1500, end_x=0, end_y=550, duration=1000)
if __name__ == '__main__':
unittest.main()
|
[
"peng.chengcheng@bestv.com.cn"
] |
peng.chengcheng@bestv.com.cn
|
c2dba55554af242de70d02098e3d4199daa790bf
|
30f0200eaee8bee1d3d0faab0e9f5f33ff35f2c0
|
/congress_mentions.py
|
adddf94018a825cd463e971d4e3ac94d0ad9e906
|
[
"MIT"
] |
permissive
|
stephenmcardle/DCInbox
|
43e8382442632f7b810b4821d32b56b056000aa3
|
eedd36d4e1ed49e0f111e6aae81409ed0886a1c9
|
refs/heads/master
| 2021-01-17T14:05:38.096866
| 2019-02-26T02:59:45
| 2019-02-26T02:59:45
| 83,476,180
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46,393
|
py
|
# -*- coding: utf-8 -*-
import json,csv
rep_name_list = ["Jo Bonner", "Martha Roby", "Robert Aderholt", "Mo Brooks", "Spencer Bachus", "Terri Sewell", "Don Young", "Eni Faleomavaega", "Paul Gosar", "Trent Franks", "Ben Quayle", "Ed Pastor", "David Schweikert", "Jeff Flake", "Raúl Grijalva", "Gabrielle Giffords", "Ron Barber", "Rick Crawford", "Tim Griffin", "Steve Womack", "Mike Ross", "Mike Thompson", "Wally Herger", "Daniel Lungren", "Tom McClintock", "Doris Matsui", "Lynn Woolsey", "George Miller", "Nancy Pelosi", "Barbara Lee", "John Garamendi", "Jerry McNerney", "Jackie Speier", "Pete Stark", "Anna Eshoo", "Mike Honda", "Zoe Lofgren", "Sam Farr", "Dennis Cardoza", "Jeff Denham", "Jim Costa", "Devin Nunes", "Kevin McCarthy", "Lois Capps", "Elton Gallegly", "Buck McKeon", "David Dreier", "Brad Sherman", "Howard Berman", "Adam Schiff", "Henry Waxman", "Xavier Becerra", "Judy Chu", "Karen Bass", "Lucille Roybal-Allard", "Maxine Waters", "Jane Harman", "Janice Hahn", "Laura Richardson", "Grace Napolitano", "Linda Sánchez", "Ed Royce", "Jerry Lewis", "Gary Miller", "Joe Baca", "Ken Calvert", "Mary Bono Mack", "Dana Rohrabacher", "Loretta Sanchez", "John Campbell", "Darrell Issa", "Brian Bilbray", "Bob Filner", "Duncan Hunter", "Susan Davis", "Diana DeGette", "Jared Polis", "Scott Tipton", "Cory Gardner", "Doug Lamborn", "Mike Coffman", "Ed Perlmutter", "John Larson", "Joe Courtney", "Rosa DeLauro", "James Himes", "Christopher Murphy", "John Carney", "Eleanor Holmes Norton", "Jeff Miller", "Steve Southerland", "Corrine Brown", "Ander Crenshaw", "Richard Nugent", "Cliff Stearns", "John Mica", "Daniel Webster", "Gus Bilirakis", "C. W. Bill Young", "Kathy Castor", "Dennis Ross", "Vern Buchanan", "Connie Mack", "Bill Posey", "Thomas Rooney", "Frederica Wilson", "Ileana Ros-Lehtinen", "Theodore Deutch", "Debbie Wasserman Schultz", "Mario Diaz-Balart", "Allen West", "Alcee Hastings", "Sandy Adams", "David Rivera", "Jack Kingston", "Sanford Bishop", "Lynn Westmoreland", "Henry Johnson", "John Lewis", "Tom Price", "Rob Woodall", "Austin Scott", "Tom Graves", "Paul Broun", "Phil Gingrey", "John Barrow", "David Scott", "Madeleine Bordallo", "Colleen Hanabusa", "Mazie Hirono", "Raúl Labrador", "Mike Simpson", "Bobby Rush", "Jesse Jackson", "Daniel Lipinski", "Luis Gutiérrez", "Mike Quigley", "Peter Roskam", "Danny Davis", "Joe Walsh", "Jan Schakowsky", "Bob Dold", "Adam Kinzinger", "Jerry Costello", "Judy Biggert", "Randy Hultgren", "Donald Manzullo", "Bobby Schilling", "Aaron Schock", "John Shimkus", "Peter Visclosky", "Joe Donnelly", "Marlin Stutzman", "Todd Rokita", "Dan Burton", "Mike Pence", "André Carson", "Larry Bucshon", "Todd Young", "Bruce Braley", "David Loebsack", "Leonard Boswell", "Tom Latham", "Steve King", "Tim Huelskamp", "Lynn Jenkins", "Kevin Yoder", "Mike Pompeo", "Ed Whitfield", "Brett Guthrie", "John Yarmuth", "Geoff Davis", "Harold Rogers", "Ben Chandler", "Steve Scalise", "Cedric Richmond", "Jeffrey Landry", "John Fleming", "Rodney Alexander", "Bill Cassidy", "Charles Boustany", "Chellie Pingree", "Michael Michaud", "Andy Harris", "C. A. Dutch Ruppersberger", "John Sarbanes", "Donna Edwards", "Steny Hoyer", "Roscoe Bartlett", "Elijah Cummings", "Chris Van Hollen", "John Olver", "Richard Neal", "James McGovern", "Barney Frank", "Niki Tsongas", "John Tierney", "Ed Markey", "Michael Capuano", "Stephen Lynch", "William Keating", "Dan Benishek", "Bill Huizenga", "Justin Amash", "Dave Camp", "Dale Kildee", "Fred Upton", "Tim Walberg", "Gary Peters", "Candice Miller", "Thaddeus McCotter", "Sander Levin", "Hansen Clarke", "John Conyers", "John Dingell", "Timothy Walz", "John Kline", "Erik Paulsen", "Betty McCollum", "Keith Ellison", "Michele Bachmann", "Collin Peterson", "Chip Cravaack", "Alan Nunnelee", "Bennie Thompson", "Gregg Harper", "Steven Palazzo", "Wm. Clay", "W. Todd Akin", "Russ Carnahan", "Vicky Hartzler", "Emanuel Cleaver", "Sam Graves", "Billy Long", "Jo Ann Emerson", "Blaine Luetkemeyer", "Dennis Rehberg", "Jeff Fortenberry", "Lee Terry", "Adrian Smith", "Shelley Berkley", "Mark Amodei", "Joseph Heck", "Frank Guinta", "Charles Bass", "Robert Andrews", "Frank LoBiondo", "Jon Runyan", "Chris Smith", "Scott Garrett", "Frank Pallone", "Leonard Lance", "Bill Pascrell", "Steven Rothman", "Donald Payne", "Rodney Frelinghuysen", "Rush Holt", "Albio Sires", "Martin Heinrich", "Steve Pearce", "Ben Ray Luján", "Timothy Bishop", "Steve Israel", "Pete King", "Carolyn McCarthy", "Gary Ackerman", "Gregory Meeks", "Joseph Crowley", "Jerrold Nadler", "Anthony Weiner", "Robert Turner", "Edolphus Towns", "Yvette Clarke", "Nydia Velázquez", "Michael Grimm", "Carolyn Maloney", "Charles Rangel", "José Serrano", "Eliot Engel", "Nita Lowey", "Nan Hayworth", "Christopher Gibson", "Paul Tonko", "Maurice Hinchey", "William Owens", "Richard Hanna", "Ann Marie Buerkle", "Christopher Lee", "Kathleen Hochul", "Brian Higgins", "Louise Slaughter", "Tom Reed", "G.K. Butterfield", "Renee Ellmers", "Walter Jones", "David Price", "Virginia Foxx", "Howard Coble", "Mike McIntyre", "Larry Kissell", "Sue Myrick", "Patrick McHenry", "Heath Shuler", "Mel Watt", "Brad Miller", "Rick Berg", "Gregorio Sablan", "Steve Chabot", "Jean Schmidt", "Michael Turner", "Jim Jordan", "Robert Latta", "Bill Johnson", "Steve Austria", "John Boehner", "Marcy Kaptur", "Dennis Kucinich", "Marcia Fudge", "Pat Tiberi", "Betty Sutton", "Steven LaTourette", "Steve Stivers", "James Renacci", "Tim Ryan", "Bob Gibbs", "John Sullivan", "Dan Boren", "Frank Lucas", "Tom Cole", "James Lankford", "David Wu", "Suzanne Bonamici", "Greg Walden", "Earl Blumenauer", "Peter DeFazio", "Kurt Schrader", "Robert Brady", "Chaka Fattah", "Mike Kelly", "Jason Altmire", "Glenn Thompson", "Jim Gerlach", "Patrick Meehan", "Michael Fitzpatrick", "Bill Shuster", "Tom Marino", "Lou Barletta", "Mark Critz", "Allyson Schwartz", "Mike Doyle", "Charles Dent", "Joseph Pitts", "Tim Holden", "Tim Murphy", "Todd Platts", "Pedro Pierluisi", "David Cicilline", "Jim Langevin", "Tim Scott", "Joe Wilson", "Jeff Duncan", "Trey Gowdy", "Mick Mulvaney", "James Clyburn", "Kristi Noem", "Phil Roe", "John Duncan", "Charles Fleischmann", "Scott DesJarlais", "Jim Cooper", "Diane Black", "Marsha Blackburn", "Stephen Fincher", "Steve Cohen", "Louie Gohmert", "Ted Poe", "Sam Johnson", "Ralph Hall", "Jeb Hensarling", "Joe Barton", "John Culberson", "Kevin Brady", "Al Green", "Michael McCaul", "K. Michael Conaway", "Kay Granger", "Mac Thornberry", "Ron Paul", "Rubén Hinojosa", "Silvestre Reyes", "Bill Flores", "Sheila Jackson Lee", "Randy Neugebauer", "Charlie Gonzalez", "Lamar Smith", "Pete Olson", "Francisco Quico Canseco", "Kenny Marchant", "Lloyd Doggett", "Michael Burgess", "Blake Farenthold", "Henry Cuellar", "Gene Green", "Eddie Bernice Johnson", "John Carter", "Pete Sessions", "Donna Christensen", "Rob Bishop", "Jim Matheson", "Jason Chaffetz", "Peter Welch", "Robert Wittman", "Scott Rigell", "Robert Scott", "J. Randy Forbes", "Robert Hurt", "Bob Goodlatte", "Eric Cantor", "Jim Moran", "H. Morgan Griffith", "Frank Wolf", "Gerald Connolly", "Jay Inslee", "Rick Larsen", "Jaime Herrera Beutler", "Doc Hastings", "Cathy McMorris Rodgers", "Norman Dicks", "Jim McDermott", "David Reichert", "Adam Smith", "David McKinley", "Shelley Moore Capito", "Nick Rahall", "Paul Ryan", "Tammy Baldwin", "Ron Kind", "Gwen Moore", "F. James Sensenbrenner", "Thomas Petri", "Sean Duffy", "Reid Ribble", "Cynthia Lummis"]
representative_list = ["Representative Bonner", "Representative Roby", "Representative Aderholt", "Representative Brooks", "Representative Bachus", "Representative Sewell", "Representative Young", "Representative Faleomavaega", "Representative Gosar", "Representative Franks", "Representative Quayle", "Representative Pastor", "Representative Schweikert", "Representative Flake", "Representative Grijalva", "Representative Giffords", "Representative Barber", "Representative Crawford", "Representative Griffin", "Representative Womack", "Representative Ross", "Representative Thompson", "Representative Herger", ["Representative Lungren","Dan Lungren"], "Representative McClintock", "Representative Matsui", "Representative Woolsey", "Representative Miller", "Representative Pelosi", "Representative Lee", "Representative Garamendi", "Representative McNerney", "Representative Speier", "Representative Stark", "Representative Eshoo", "Representative Honda", "Representative Lofgren", "Representative Farr", "Representative Cardoza", "Representative Denham", "Representative Costa", "Representative Nunes", "Representative McCarthy", "Representative Capps", "Representative Gallegly", "Representative McKeon", "Representative Dreier", "Representative Sherman", "Representative Berman", "Representative Schiff", "Representative Waxman", "Representative Becerra", "Representative Chu", "Representative Bass", "Representative Roybal-Allard", "Representative Waters", "Representative Harman", "Representative Hahn", "Representative Richardson", "Representative Napolitano", "Representative Sanchez", "Representative Royce", "Representative Lewis", "Representative Miller", "Representative Baca", "Representative Calvert", "Representative Bono_mack", "Representative Rohrabacher", "Representative Sanchez", "Representative Campbell", "Representative Issa", "Representative Bilbray", "Representative Filner", "Representative Hunter", "Representative Davis", "Representative DeGette", "Representative Polis", "Representative Tipton", "Representative Gardner", "Representative Lamborn", "Representative Coffman", "Representative Perlmutter", "Representative Larson", "Representative Courtney", "Representative DeLauro", ["Representative Himes","Jim Himes"], ["Representative Murphy","Chris Murphy"], "Representative Carney", "Representative Norton", "Representative Miller", "Representative Southerland", "Representative Brown", "Representative Crenshaw", "Representative Nugent", "Representative Stearns", "Representative Mica", "Representative Webster", "Representative Bilirakis", ["Representative Young","Bill Young"], "Representative Castor", "Representative Ross", "Representative Buchanan", "Representative Mack", "Representative Posey", ["Representative Rooney","Tom Rooney"], "Representative Wilson", "Representative Ros-Lehtinen", ["Representative Deutch","Ted Deutch"], "Representative Wasserman Schultz", "Representative Diaz-Balart", "Representative West", "Representative Hastings", "Representative Adams", "Representative Rivera", "Representative Kingston", "Representative Bishop", "Representative Westmoreland", ["Representative Johnson","Hank Johnson"], "Representative Lewis", "Representative Price", "Representative Woodall", "Representative Scott", "Representative Graves", "Representative Broun", "Representative Gingrey", "Representative Barrow", "Representative Scott", "Representative Bordallo", "Representative Hanabusa", "Representative Hirono", "Representative Labrador", "Representative Simpson", "Representative Rush", "Representative Jackson", ["Representative Lipinski","Dan Lipinski"], "Representative Gutierrez", "Representative Quigley", "Representative Roskam", "Representative Davis", "Representative Walsh", "Representative Schakowsky", "Representative Dold", "Representative Kinzinger", "Representative Costello", "Representative Biggert", "Representative Hultgren", "Representative Manzullo", "Representative Schilling", "Representative Schock", "Representative Shimkus", ["Representative Visclosky","Pete Visclosky"], "Representative Donnelly", "Representative Stutzman", "Representative Rokita", "Representative Burton", "Representative Pence", "Representative Carson", "Representative Bucshon", "Representative Young", "Representative Braley", ["Representative Loebsack","Dave Loebsack"], "Representative Boswell", "Representative Latham", "Representative King", "Representative Huelskamp", "Representative Jenkins", "Representative Yoder", "Representative Pompeo", "Representative Whitfield", "Representative Guthrie", "Representative Yarmuth", "Representative Davis", ["Representative Rogers","Hal Rogers"], "Representative Chandler", "Representative Scalise", "Representative Richmond", ["Representative Landry","Jeff Landry"], "Representative Fleming", "Representative Alexander", "Representative Cassidy", "Representative Boustany", "Representative Pingree", ["Representative Michaud","Mike Michaud"], "Representative Harris", ["Representative Ruppersberger","Dutch Ruppersberger"], "Representative Sarbanes", "Representative Edwards", "Representative Hoyer", "Representative Bartlett", "Representative Cummings", "Representative Van Hollen", "Representative Olver", "Representative Neal", "Representative McGovern", "Representative Frank", "Representative Tsongas", "Representative Tierney", "Representative Markey", "Representative Capuano", "Representative Lynch", ["Representative Keating","Bill Keating"], "Representative Benishek", "Representative Huizenga", "Representative Amash", "Representative Camp", "Representative Kildee", "Representative Upton", "Representative Walberg", "Representative Peters", "Representative Miller", "Representative McCotter", "Representative Levin", "Representative Clarke", "Representative Conyers", "Representative Dingell", ["Representative Walz","Tim Walz"], "Representative Kline", "Representative Paulsen", "Representative McCollum", "Representative Ellison", "Representative Bachmann", "Representative Peterson", "Representative Cravaack", "Representative Nunnelee", "Representative Thompson", "Representative Harper", "Representative Palazzo", ["Representative Clay","William Lacy Clay","Lacy Clay","William Clay"], ["Representative Akin","Todd Akin"], "Representative Carnahan", "Representative Hartzler", "Representative Cleaver", "Representative Graves", "Representative Long", "Representative Emerson", "Representative Luetkemeyer", ["Representative Rehberg","Denny Rehberg"], "Representative Fortenberry", "Representative Terry", "Representative Smith", "Representative Berkley", "Representative Amodei", ["Representative Heck","Joe Heck"], "Representative Guinta", ["Representative Bass","Charles F. Bass"], ["Representative Andrews","Robert E. Andrews"], "Representative LoBiondo", "Representative Runyan", "Representative Smith", "Representative Garrett", "Representative Pallone", "Representative Lance", "Representative Pascrell", ["Representative Rothman","Steve Rothman"], ["Representative Payne","Don Payne"], "Representative Frelinghuysen", "Representative Holt", "Representative Sires", "Representative Heinrich", "Representative Pearce", "Representative Lujan", ["Representative Bishop","Tim Bishop"], "Representative Israel", "Representative King", "Representative McCarthy", "Representative Ackerman", "Representative Meeks", ["Joe Crowley","Representative Crowley"], ["Representative Nadler","Jerry Nadler"], "Representative Weiner", ["Representative Turner","Bob Turner"], ["Representative Towns","Ed Towns"], "Representative Clarke", "Representative Velazquez", "Representative Grimm", "Representative Maloney", ["Representative Rangel","Charlie Rangel"], "Representative Serrano", "Representative Engel", "Representative Lowey", "Representative Hayworth", ["Representative Gibson","Chris Gibson"], "Representative Tonko", "Representative Hinchey", ["Representative Owens","Bill Owens"], "Representative Hanna", "Representative Buerkle", ["Representative Lee","Chris Lee"], "Representative Hochul", "Representative Higgins", "Representative Slaughter", "Representative Reed", "Representative Butterfield", "Representative Ellmers", "Representative Jones", "Representative Price", "Representative Foxx", "Representative Coble", "Representative McIntyre", "Representative Kissell", "Representative Myrick", "Representative McHenry", "Representative Shuler", "Representative Watt", "Representative Miller", "Representative Berg", "Representative Sablan", "Representative Chabot", "Representative Schmidt", ["Representative Turner","Mike Turner"], "Representative Jordan", ["Representative Latta","Bob Latta"], "Representative Johnson", "Representative Austria", "Representative Boehner", "Representative Kaptur", "Representative Kucinich", "Representative Fudge", "Representative Tiberi", "Representative Sutton", "Representative LaTourette", "Representative Stivers", ["Representative Renacci","Jim Renacci"], "Representative Ryan", "Representative Gibbs", "Representative Sullivan", "Representative Boren", "Representative Lucas", "Representative Cole", "Representative Lankford", "Representative Wu", "Representative Bonamici", "Representative Walden", "Representative Blumenauer", "Representative DeFazio", "Representative Schrader", "Representative Brady", "Representative Fattah", ["Representative Kelly","Michael Kelly"], "Representative Altmire", "Representative Thompson", "Representative Gerlach", ["Representative Meehan","Pat Meehan"], ["Representative Fitzpatrick","Mike Fitzpatrick"], "Representative Shuster", "Representative Marino", "Representative Barletta", "Representative Critz", "Representative Schwartz", "Representative Doyle", ["Representative Dent","Charlie Dent"], ["Representative Pitts","Joe Pitts"], "Representative Holden", "Representative Murphy", "Representative Platts", "Representative Pierluisi", "Representative Cicilline", "Representative Langevin", "Representative Scott", "Representative Wilson", "Representative Duncan", "Representative Gowdy", "Representative Mulvaney", ["Representative Clyburn","Jim Clyburn"], "Representative Noem", "Representative Roe", "Representative Duncan", ["Representative Fleischmann","Chuck Fleishmann"], "Representative DesJarlais", "Representative Cooper", "Representative Black", "Representative Blackburn", "Representative Fincher", "Representative Cohen", "Representative Gohmert", "Representative Poe", "Representative Johnson", "Representative Hall", "Representative Hensarling", "Representative Barton", "Representative Culberson", "Representative Brady", "Representative Green", "Representative McCaul", ["Representative Conaway","Michael Conaway","Mike Conaway"], "Representative Granger", "Representative Thornberry", "Representative Paul", "Representative Hinojosa", "Representative Reyes", "Representative Flores", "Representative Jackson Lee", "Representative Neugebauer", "Representative Gonzalez", "Representative Smith", "Representative Olson", "Representative Canseco", "Representative Marchant", "Representative Doggett", "Representative Burgess", "Representative Farenthold", "Representative Cuellar", "Representative Green", "Representative Johnson", "Representative Carter", "Representative Sessions", "Representative Christian-Christensen", "Representative Bishop", "Representative Matheson", "Representative Chaffetz", "Representative Welch", ["Representative Wittman","Rob Wittman"], "Representative Rigell", ["Representative Scott","Bobby Scott"], "Representative Forbes", "Representative Hurt", "Representative Goodlatte", "Representative Cantor", "Representative Moran", ["Representative Griffith","Morgan Griffith"], "Representative Wolf", ["Representative Connolly","Gerry Connolly"], "Representative Inslee", "Representative Larsen", ["Representative Herrera Beutler","Herrera Beutler"], "Representative Hastings", "Representative McMorris Rodgers", ["Representative Dicks","Norm Dicks"], "Representative McDermott", ["Representative Reichert","Dave Reichert"], "Representative Smith", "Representative McKinley", "Representative Capito", "Representative Rahall", "Representative Ryan", "Representative Baldwin", "Representative Kind", "Representative Moore", ["Representative Sensenbrenner","James Sensenbrenner"], ["Representative Petri","Tom Petri"], "Representative Duffy", "Representative Ribble", "Representative Lummis"]
rep_list1 = ["Rep. Bonner", "Rep. Roby", "Rep. Aderholt", "Rep. Brooks", "Rep. Bachus", "Rep. Sewell", "Rep. Young", "Rep. Faleomavaega", "Rep. Gosar", "Rep. Franks", "Rep. Quayle", "Rep. Pastor", "Rep. Schweikert", "Rep. Flake", "Rep. Grijalva", "Rep. Giffords", "Rep. Barber", "Rep. Crawford", "Rep. Griffin", "Rep. Womack", "Rep. Ross", "Rep. Thompson", "Rep. Herger", "Rep. Lungren", "Rep. McClintock", "Rep. Matsui", "Rep. Woolsey", "Rep. Miller", "Rep. Pelosi", "Rep. Lee", "Rep. Garamendi", "Rep. McNerney", "Rep. Speier", "Rep. Stark", "Rep. Eshoo", "Rep. Honda", "Rep. Lofgren", "Rep. Farr", "Rep. Cardoza", "Rep. Denham", "Rep. Costa", "Rep. Nunes", "Rep. McCarthy", "Rep. Capps", "Rep. Gallegly", "Rep. McKeon", "Rep. Dreier", "Rep. Sherman", "Rep. Berman", "Rep. Schiff", "Rep. Waxman", "Rep. Becerra", "Rep. Chu", "Rep. Bass", "Rep. Roybal-Allard", "Rep. Waters", "Rep. Harman", "Rep. Hahn", "Rep. Richardson", "Rep. Napolitano", "Rep. Sanchez", "Rep. Royce", "Rep. Lewis", "Rep. Miller", "Rep. Baca", "Rep. Calvert", "Rep. Bono_mack", "Rep. Rohrabacher", "Rep. Sanchez", "Rep. Campbell", "Rep. Issa", "Rep. Bilbray", "Rep. Filner", "Rep. Hunter", "Rep. Davis", "Rep. DeGette", "Rep. Polis", "Rep. Tipton", "Rep. Gardner", "Rep. Lamborn", "Rep. Coffman", "Rep. Perlmutter", "Rep. Larson", "Rep. Courtney", "Rep. DeLauro", "Rep. Himes", "Rep. Murphy", "Rep. Carney", "Rep. Norton", "Rep. Miller", "Rep. Southerland", "Rep. Brown", "Rep. Crenshaw", "Rep. Nugent", "Rep. Stearns", "Rep. Mica", "Rep. Webster", "Rep. Bilirakis", "Rep. Young", "Rep. Castor", "Rep. Ross", "Rep. Buchanan", "Rep. Mack", "Rep. Posey", "Rep. Rooney", "Rep. Wilson", "Rep. Ros-Lehtinen", "Rep. Deutch", "Rep. Wasserman Schultz", "Rep. Diaz-Balart", "Rep. West", "Rep. Hastings", "Rep. Adams", "Rep. Rivera", "Rep. Kingston", "Rep. Bishop", "Rep. Westmoreland", "Rep. Johnson", "Rep. Lewis", "Rep. Price", "Rep. Woodall", "Rep. Scott", "Rep. Graves", "Rep. Broun", "Rep. Gingrey", "Rep. Barrow", "Rep. Scott", "Rep. Bordallo", "Rep. Hanabusa", "Rep. Hirono", "Rep. Labrador", "Rep. Simpson", "Rep. Rush", "Rep. Jackson", "Rep. Lipinski", "Rep. Gutierrez", "Rep. Quigley", "Rep. Roskam", "Rep. Davis", "Rep. Walsh", "Rep. Schakowsky", "Rep. Dold", "Rep. Kinzinger", "Rep. Costello", "Rep. Biggert", "Rep. Hultgren", "Rep. Manzullo", "Rep. Schilling", "Rep. Schock", "Rep. Shimkus", "Rep. Visclosky", "Rep. Donnelly", "Rep. Stutzman", "Rep. Rokita", "Rep. Burton", "Rep. Pence", "Rep. Carson", "Rep. Bucshon", "Rep. Young", "Rep. Braley", "Rep. Loebsack", "Rep. Boswell", "Rep. Latham", "Rep. King", "Rep. Huelskamp", "Rep. Jenkins", "Rep. Yoder", "Rep. Pompeo", "Rep. Whitfield", "Rep. Guthrie", "Rep. Yarmuth", "Rep. Davis", "Rep. Rogers", "Rep. Chandler", "Rep. Scalise", "Rep. Richmond", "Rep. Landry", "Rep. Fleming", "Rep. Alexander", "Rep. Cassidy", "Rep. Boustany", "Rep. Pingree", "Rep. Michaud", "Rep. Harris", "Rep. Ruppersberger", "Rep. Sarbanes", "Rep. Edwards", "Rep. Hoyer", "Rep. Bartlett", "Rep. Cummings", "Rep. Van Hollen", "Rep. Olver", "Rep. Neal", "Rep. McGovern", "Rep. Frank", "Rep. Tsongas", "Rep. Tierney", "Rep. Markey", "Rep. Capuano", "Rep. Lynch", "Rep. Keating", "Rep. Benishek", "Rep. Huizenga", "Rep. Amash", "Rep. Camp", "Rep. Kildee", "Rep. Upton", "Rep. Walberg", "Rep. Peters", "Rep. Miller", "Rep. McCotter", "Rep. Levin", "Rep. Clarke", "Rep. Conyers", "Rep. Dingell", "Rep. Walz", "Rep. Kline", "Rep. Paulsen", "Rep. McCollum", "Rep. Ellison", "Rep. Bachmann", "Rep. Peterson", "Rep. Cravaack", "Rep. Nunnelee", "Rep. Thompson", "Rep. Harper", "Rep. Palazzo", "Rep. Clay", "Rep. Akin", "Rep. Carnahan", "Rep. Hartzler", "Rep. Cleaver", "Rep. Graves", "Rep. Long", "Rep. Emerson", "Rep. Luetkemeyer", "Rep. Rehberg", "Rep. Fortenberry", "Rep. Terry", "Rep. Smith", "Rep. Berkley", "Rep. Amodei", "Rep. Heck", "Rep. Guinta", "Rep. Bass", "Rep. Andrews", "Rep. LoBiondo", "Rep. Runyan", "Rep. Smith", "Rep. Garrett", "Rep. Pallone", "Rep. Lance", "Rep. Pascrell", "Rep. Rothman", "Rep. Payne", "Rep. Frelinghuysen", "Rep. Holt", "Rep. Sires", "Rep. Heinrich", "Rep. Pearce", "Rep. Lujan", "Rep. Bishop", "Rep. Israel", "Rep. King", "Rep. McCarthy", "Rep. Ackerman", "Rep. Meeks", "Rep. Crowley", "Rep. Nadler", "Rep. Weiner", "Rep. Turner", "Rep. Towns", "Rep. Clarke", "Rep. Velazquez", "Rep. Grimm", "Rep. Maloney", "Rep. Rangel", "Rep. Serrano", "Rep. Engel", "Rep. Lowey", "Rep. Hayworth", "Rep. Gibson", "Rep. Tonko", "Rep. Hinchey", "Rep. Owens", "Rep. Hanna", "Rep. Buerkle", "Rep. Lee", "Rep. Hochul", "Rep. Higgins", "Rep. Slaughter", "Rep. Reed", "Rep. Butterfield", "Rep. Ellmers", "Rep. Jones", "Rep. Price", "Rep. Foxx", "Rep. Coble", "Rep. McIntyre", "Rep. Kissell", "Rep. Myrick", "Rep. McHenry", "Rep. Shuler", "Rep. Watt", "Rep. Miller", "Rep. Berg", "Rep. Sablan", "Rep. Chabot", "Rep. Schmidt", "Rep. Turner", "Rep. Jordan", "Rep. Latta", "Rep. Johnson", "Rep. Austria", "Rep. Boehner", "Rep. Kaptur", "Rep. Kucinich", "Rep. Fudge", "Rep. Tiberi", "Rep. Sutton", "Rep. LaTourette", "Rep. Stivers", "Rep. Renacci", "Rep. Ryan", "Rep. Gibbs", "Rep. Sullivan", "Rep. Boren", "Rep. Lucas", "Rep. Cole", "Rep. Lankford", "Rep. Wu", "Rep. Bonamici", "Rep. Walden", "Rep. Blumenauer", "Rep. DeFazio", "Rep. Schrader", "Rep. Brady", "Rep. Fattah", "Rep. Kelly", "Rep. Altmire", "Rep. Thompson", "Rep. Gerlach", "Rep. Meehan", "Rep. Fitzpatrick", "Rep. Shuster", "Rep. Marino", "Rep. Barletta", "Rep. Critz", "Rep. Schwartz", "Rep. Doyle", "Rep. Dent", "Rep. Pitts", "Rep. Holden", "Rep. Murphy", "Rep. Platts", "Rep. Pierluisi", "Rep. Cicilline", "Rep. Langevin", "Rep. Scott", "Rep. Wilson", "Rep. Duncan", "Rep. Gowdy", "Rep. Mulvaney", "Rep. Clyburn", "Rep. Noem", "Rep. Roe", "Rep. Duncan", "Rep. Fleischmann", "Rep. DesJarlais", "Rep. Cooper", "Rep. Black", "Rep. Blackburn", "Rep. Fincher", "Rep. Cohen", "Rep. Gohmert", "Rep. Poe", "Rep. Johnson", "Rep. Hall", "Rep. Hensarling", "Rep. Barton", "Rep. Culberson", "Rep. Brady", "Rep. Green", "Rep. McCaul", "Rep. Conaway", "Rep. Granger", "Rep. Thornberry", "Rep. Paul", "Rep. Hinojosa", "Rep. Reyes", "Rep. Flores", "Rep. Jackson Lee", "Rep. Neugebauer", "Rep. Gonzalez", "Rep. Smith", "Rep. Olson", "Rep. Canseco", "Rep. Marchant", "Rep. Doggett", "Rep. Burgess", "Rep. Farenthold", "Rep. Cuellar", "Rep. Green", "Rep. Johnson", "Rep. Carter", "Rep. Sessions", "Rep. Christian-Christensen", "Rep. Bishop", "Rep. Matheson", "Rep. Chaffetz", "Rep. Welch", "Rep. Wittman", "Rep. Rigell", "Rep. Scott", "Rep. Forbes", "Rep. Hurt", "Rep. Goodlatte", "Rep. Cantor", "Rep. Moran", "Rep. Griffith", "Rep. Wolf", "Rep. Connolly", "Rep. Inslee", "Rep. Larsen", "Rep. HerreraBeutler", "Rep. Hastings", "Rep. McMorris Rodgers", "Rep. Dicks", "Rep. McDermott", "Rep. Reichert", "Rep. Smith", "Rep. McKinley", "Rep. Capito", "Rep. Rahall", "Rep. Ryan", "Rep. Baldwin", "Rep. Kind", "Rep. Moore", "Rep. Sensenbrenner", "Rep. Petri", "Rep. Duffy", "Rep. Ribble", "Rep. Lummis"]
rep_list2 = ["Rep Bonner", "Rep Roby", "Rep Aderholt", "Rep Brooks", "Rep Bachus", "Rep Sewell", "Rep Young", "Rep Faleomavaega", "Rep Gosar", "Rep Franks", "Rep Quayle", "Rep Pastor", "Rep Schweikert", "Rep Flake", "Rep Grijalva", "Rep Giffords", "Rep Barber", "Rep Crawford", "Rep Griffin", "Rep Womack", "Rep Ross", "Rep Thompson", "Rep Herger", "Rep Lungren", "Rep McClintock", "Rep Matsui", "Rep Woolsey", "Rep Miller", "Rep Pelosi", "Rep Lee", "Rep Garamendi", "Rep McNerney", "Rep Speier", "Rep Stark", "Rep Eshoo", "Rep Honda", "Rep Lofgren", "Rep Farr", "Rep Cardoza", "Rep Denham", "Rep Costa", "Rep Nunes", "Rep McCarthy", "Rep Capps", "Rep Gallegly", "Rep McKeon", "Rep Dreier", "Rep Sherman", "Rep Berman", "Rep Schiff", "Rep Waxman", "Rep Becerra", "Rep Chu", "Rep Bass", "Rep Roybal-Allard", "Rep Waters", "Rep Harman", "Rep Hahn", "Rep Richardson", "Rep Napolitano", "Rep Sanchez", "Rep Royce", "Rep Lewis", "Rep Miller", "Rep Baca", "Rep Calvert", "Rep Bono_mack", "Rep Rohrabacher", "Rep Sanchez", "Rep Campbell", "Rep Issa", "Rep Bilbray", "Rep Filner", "Rep Hunter", "Rep Davis", "Rep DeGette", "Rep Polis", "Rep Tipton", "Rep Gardner", "Rep Lamborn", "Rep Coffman", "Rep Perlmutter", "Rep Larson", "Rep Courtney", "Rep DeLauro", "Rep Himes", "Rep Murphy", "Rep Carney", "Rep Norton", "Rep Miller", "Rep Southerland", "Rep Brown", "Rep Crenshaw", "Rep Nugent", "Rep Stearns", "Rep Mica", "Rep Webster", "Rep Bilirakis", "Rep Young", "Rep Castor", "Rep Ross", "Rep Buchanan", "Rep Mack", "Rep Posey", "Rep Rooney", "Rep Wilson", "Rep Ros-Lehtinen", "Rep Deutch", "Rep Wasserman Schultz", "Rep Diaz-Balart", "Rep West", "Rep Hastings", "Rep Adams", "Rep Rivera", "Rep Kingston", "Rep Bishop", "Rep Westmoreland", "Rep Johnson", "Rep Lewis", "Rep Price", "Rep Woodall", "Rep Scott", "Rep Graves", "Rep Broun", "Rep Gingrey", "Rep Barrow", "Rep Scott", "Rep Bordallo", "Rep Hanabusa", "Rep Hirono", "Rep Labrador", "Rep Simpson", "Rep Rush", "Rep Jackson", "Rep Lipinski", "Rep Gutierrez", "Rep Quigley", "Rep Roskam", "Rep Davis", "Rep Walsh", "Rep Schakowsky", "Rep Dold", "Rep Kinzinger", "Rep Costello", "Rep Biggert", "Rep Hultgren", "Rep Manzullo", "Rep Schilling", "Rep Schock", "Rep Shimkus", "Rep Visclosky", "Rep Donnelly", "Rep Stutzman", "Rep Rokita", "Rep Burton", "Rep Pence", "Rep Carson", "Rep Bucshon", "Rep Young", "Rep Braley", "Rep Loebsack", "Rep Boswell", "Rep Latham", "Rep King", "Rep Huelskamp", "Rep Jenkins", "Rep Yoder", "Rep Pompeo", "Rep Whitfield", "Rep Guthrie", "Rep Yarmuth", "Rep Davis", "Rep Rogers", "Rep Chandler", "Rep Scalise", "Rep Richmond", "Rep Landry", "Rep Fleming", "Rep Alexander", "Rep Cassidy", "Rep Boustany", "Rep Pingree", "Rep Michaud", "Rep Harris", "Rep Ruppersberger", "Rep Sarbanes", "Rep Edwards", "Rep Hoyer", "Rep Bartlett", "Rep Cummings", "Rep Van Hollen", "Rep Olver", "Rep Neal", "Rep McGovern", "Rep Frank", "Rep Tsongas", "Rep Tierney", "Rep Markey", "Rep Capuano", "Rep Lynch", "Rep Keating", "Rep Benishek", "Rep Huizenga", "Rep Amash", "Rep Camp", "Rep Kildee", "Rep Upton", "Rep Walberg", "Rep Peters", "Rep Miller", "Rep McCotter", "Rep Levin", "Rep Clarke", "Rep Conyers", "Rep Dingell", "Rep Walz", "Rep Kline", "Rep Paulsen", "Rep McCollum", "Rep Ellison", "Rep Bachmann", "Rep Peterson", "Rep Cravaack", "Rep Nunnelee", "Rep Thompson", "Rep Harper", "Rep Palazzo", "Rep Clay", "Rep Akin", "Rep Carnahan", "Rep Hartzler", "Rep Cleaver", "Rep Graves", "Rep Long", "Rep Emerson", "Rep Luetkemeyer", "Rep Rehberg", "Rep Fortenberry", "Rep Terry", "Rep Smith", "Rep Berkley", "Rep Amodei", "Rep Heck", "Rep Guinta", "Rep Bass", "Rep Andrews", "Rep LoBiondo", "Rep Runyan", "Rep Smith", "Rep Garrett", "Rep Pallone", "Rep Lance", "Rep Pascrell", "Rep Rothman", "Rep Payne", "Rep Frelinghuysen", "Rep Holt", "Rep Sires", "Rep Heinrich", "Rep Pearce", "Rep Lujan", "Rep Bishop", "Rep Israel", "Rep King", "Rep McCarthy", "Rep Ackerman", "Rep Meeks", "Rep Crowley", "Rep Nadler", "Rep Weiner", "Rep Turner", "Rep Towns", "Rep Clarke", "Rep Velazquez", "Rep Grimm", "Rep Maloney", "Rep Rangel", "Rep Serrano", "Rep Engel", "Rep Lowey", "Rep Hayworth", "Rep Gibson", "Rep Tonko", "Rep Hinchey", "Rep Owens", "Rep Hanna", "Rep Buerkle", "Rep Lee", "Rep Hochul", "Rep Higgins", "Rep Slaughter", "Rep Reed", "Rep Butterfield", "Rep Ellmers", "Rep Jones", "Rep Price", "Rep Foxx", "Rep Coble", "Rep McIntyre", "Rep Kissell", "Rep Myrick", "Rep McHenry", "Rep Shuler", "Rep Watt", "Rep Miller", "Rep Berg", "Rep Sablan", "Rep Chabot", "Rep Schmidt", "Rep Turner", "Rep Jordan", "Rep Latta", "Rep Johnson", "Rep Austria", "Rep Boehner", "Rep Kaptur", "Rep Kucinich", "Rep Fudge", "Rep Tiberi", "Rep Sutton", "Rep LaTourette", "Rep Stivers", "Rep Renacci", "Rep Ryan", "Rep Gibbs", "Rep Sullivan", "Rep Boren", "Rep Lucas", "Rep Cole", "Rep Lankford", "Rep Wu", "Rep Bonamici", "Rep Walden", "Rep Blumenauer", "Rep DeFazio", "Rep Schrader", "Rep Brady", "Rep Fattah", "Rep Kelly", "Rep Altmire", "Rep Thompson", "Rep Gerlach", "Rep Meehan", "Rep Fitzpatrick", "Rep Shuster", "Rep Marino", "Rep Barletta", "Rep Critz", "Rep Schwartz", "Rep Doyle", "Rep Dent", "Rep Pitts", "Rep Holden", "Rep Murphy", "Rep Platts", "Rep Pierluisi", "Rep Cicilline", "Rep Langevin", "Rep Scott", "Rep Wilson", "Rep Duncan", "Rep Gowdy", "Rep Mulvaney", "Rep Clyburn", "Rep Noem", "Rep Roe", "Rep Duncan", "Rep Fleischmann", "Rep DesJarlais", "Rep Cooper", "Rep Black", "Rep Blackburn", "Rep Fincher", "Rep Cohen", "Rep Gohmert", "Rep Poe", "Rep Johnson", "Rep Hall", "Rep Hensarling", "Rep Barton", "Rep Culberson", "Rep Brady", "Rep Green", "Rep McCaul", "Rep Conaway", "Rep Granger", "Rep Thornberry", "Rep Paul", "Rep Hinojosa", "Rep Reyes", "Rep Flores", "Rep Jackson Lee", "Rep Neugebauer", "Rep Gonzalez", "Rep Smith", "Rep Olson", "Rep Canseco", "Rep Marchant", "Rep Doggett", "Rep Burgess", "Rep Farenthold", "Rep Cuellar", "Rep Green", "Rep Johnson", "Rep Carter", "Rep Sessions", "Rep Christian-Christensen", "Rep Bishop", "Rep Matheson", "Rep Chaffetz", "Rep Welch", "Rep Wittman", "Rep Rigell", "Rep Scott", "Rep Forbes", "Rep Hurt", "Rep Goodlatte", "Rep Cantor", "Rep Moran", "Rep Griffith", "Rep Wolf", "Rep Connolly", "Rep Inslee", "Rep Larsen", "Rep HerreraBeutler", "Rep Hastings", "Rep McMorris Rodgers", "Rep Dicks", "Rep McDermott", "Rep Reichert", "Rep Smith", "Rep McKinley", "Rep Capito", "Rep Rahall", "Rep Ryan", "Rep Baldwin", "Rep Kind", "Rep Moore", "Rep Sensenbrenner", "Rep Petri", "Rep Duffy", "Rep Ribble", "Rep Lummis"]
sen_name_list = ["Jeff Sessions", "Richard Shelby", "Mark Begich", "Lisa Murkowski", "Jon Kyl", "John McCain", "John Boozman", "Mark Pryor", "Barbara Boxer", "Dianne Feinstein", "Michael Bennet", "Mark Udall", "Richard Blumenthal", "Joseph Lieberman", "Thomas Carper", "Chris Coons", "Marco Rubio", "Bill Nelson", "Saxby Chambliss", "Johnny Isakson", "Daniel Akaka", "Daniel Inouye", "Michael Crapo", "James Risch", "Mark Kirk", "Richard Durbin", "Daniel Coats", "Richard Lugar", "Chuck Grassley", "Tom Harkin", "Jerry Moran", "Pat Roberts", "Rand Paul", "Mitch McConnell", "Mary Landrieu", "David Vitter", "Susan Collins", "Olympia Snowe", "Benjamin Cardin", "Barbara Mikulski", "John Kerry", "Scott Brown", "Carl Levin", "Debbie Stabenow", "Al Franken", "Amy Klobuchar", "Thad Cochran", "Roger Wicker", "Roy Blunt", "Claire McCaskill", "Max Baucus", "Jon Tester", "Mike Johanns", "Ben Nelson", "John Ensign", "Harry Reid", "Kelly Ayotte", "Jeanne Shaheen", "Frank Lautenberg", "Robert Menéndez", "Jeff Bingaman", "Tom Udall", "Kirsten Gillibrand", "Charles Schumer", "Richard Burr", "Kay Hagan", "Kent Conrad", "John Hoeven", "Sherrod Brown", "Rob Portman", "Thomas Coburn", "James Inhofe", "Jeff Merkley", "Ron Wyden", "Robert Casey", "Pat Toomey", "Jack Reed", "Sheldon Whitehouse", "Jim DeMint", "Lindsey Graham", "John Thune", "Lamar Alexander", "Bob Corker", "John Cornyn", "Kay Bailey Hutchison", "Mike Lee", "Orrin Hatch", "Patrick Leahy", "Bernard Sanders", "Mark Warner", "Jim Webb", "Maria Cantwell", "Patty Murray", "Joe Manchin", "John D. Rockefeller", "Ron Johnson", "Herb Kohl", "John Barrasso", "Michael Enzi"]
senator_list = ["Senator Sessions", "Senator Shelby", "Senator Begich", "Senator Murkowski", "Senator Kyl", "Senator McCain", "Senator Boozman", "Senator Pryor", "Senator Boxer", "Senator Feinstein", "Senator Bennet", "Senator Udall", "Senator Blumenthal", ["Senator Lieberman","Joe Lieberman"], ["Senator Carper","Tom Carper"], "Senator Coons", "Senator Rubio", "Senator Nelson", "Senator Chambliss", "Senator Isakson", "Senator Akaka", ["Senator Inouye","Dan Inouye"], ["Senator Crapo","Mike Crapo"], "Senator Risch", "Senator Kirk", ["Senator Durbin","Dick Durbin"], ["Senator Coats","Dan Coats"], ["Senator Lugar","Dick Lugar"], "Senator Grassley", "Senator Harkin", "Senator Moran", "Senator Roberts", "Senator Paul", "Senator McConnell", "Senator Landrieu", "Senator Vitter", "Senator Collins", "Senator Snowe", "Senator Cardin", "Senator Mikulski", "Senator Kerry", "Senator Brown", "Senator Levin", "Senator Stabenow", "Senator Franken", "Senator Klobuchar", "Senator Cochran", "Senator Wicker", "Senator Blunt", "Senator McCaskill", "Senator Baucus", "Senator Tester", "Senator Johanns", "Senator Nelson", "Senator Ensign", "Senator Reid", "Senator Ayotte", "Senator Shaheen", "Senator Lautenberg", "Senator Menéndez", "Senator Bingaman", "Senator Udall", "Senator Gillibrand", "Senator Schumer", "Senator Burr", "Senator Hagan", "Senator Conrad", "Senator Hoeven", "Senator Brown", "Senator Portman", ["Senator Coburn","Tom Coburn"], ["Senator Inhofe","Jim Inhofe"], "Senator Merkley", "Senator Wyden", ["Senator Casey","Bob Casey"], "Senator Toomey", "Senator Reed", "Senator Whitehouse", "Senator DeMint", "Senator Graham", "Senator Thune", "Senator Alexander", "Senator Corker", "Senator Cornyn", "Senator Hutchison", "Senator Lee", "Senator Hatch", ["Senator Leahy","Pay Leahy"], ["Senator Sanders","Bernie Sanders"], "Senator Warner", "Senator Webb", "Senator Cantwell", "Senator Murray", "Senator Manchin", ["Senator Rockefeller","Jay Rockefeller"], "Senator Johnson", "Senator Kohl", "Senator Barrasso", ["Senator Enzi","Mike Enzi"]]
sen_list1 = ["Sen. Sessions", "Sen. Shelby", "Sen. Begich", "Sen. Murkowski", "Sen. Kyl", "Sen. McCain", "Sen. Boozman", "Sen. Pryor", "Sen. Boxer", "Sen. Feinstein", "Sen. Bennet", "Sen. Udall", "Sen. Blumenthal", "Sen. Lieberman", "Sen. Carper", "Sen. Coons", "Sen. Rubio", "Sen. Nelson", "Sen. Chambliss", "Sen. Isakson", "Sen. Akaka", "Sen. Inouye", "Sen. Crapo", "Sen. Risch", "Sen. Kirk", "Sen. Durbin", "Sen. Coats", "Sen. Lugar", "Sen. Grassley", "Sen. Harkin", "Sen. Moran", "Sen. Roberts", "Sen. Paul", "Sen. McConnell", "Sen. Landrieu", "Sen. Vitter", "Sen. Collins", "Sen. Snowe", "Sen. Cardin", "Sen. Mikulski", "Sen. Kerry", "Sen. Brown", "Sen. Levin", "Sen. Stabenow", "Sen. Franken", "Sen. Klobuchar", "Sen. Cochran", "Sen. Wicker", "Sen. Blunt", "Sen. McCaskill", "Sen. Baucus", "Sen. Tester", "Sen. Johanns", "Sen. Nelson", "Sen. Ensign", "Sen. Reid", "Sen. Ayotte", "Sen. Shaheen", "Sen. Lautenberg", "Sen. Menéndez", "Sen. Bingaman", "Sen. Udall", "Sen. Gillibrand", "Sen. Schumer", "Sen. Burr", "Sen. Hagan", "Sen. Conrad", "Sen. Hoeven", "Sen. Brown", "Sen. Portman", "Sen. Coburn", "Sen. Inhofe", "Sen. Merkley", "Sen. Wyden", "Sen. Casey", "Sen. Toomey", "Sen. Reed", "Sen. Whitehouse", "Sen. DeMint", "Sen. Graham", "Sen. Thune", "Sen. Alexander", "Sen. Corker", "Sen. Cornyn", "Sen. Hutchison", "Sen. Lee", "Sen. Hatch", "Sen. Leahy", "Sen. Sanders", "Sen. Warner", "Sen. Webb", "Sen. Cantwell", "Sen. Murray", "Sen. Manchin", "Sen. Rockefeller", "Sen. Johnson", "Sen. Kohl", "Sen. Barrasso", "Sen. Enzi"]
sen_list2 = ["Sen Sessions", "Sen Shelby", "Sen Begich", "Sen Murkowski", "Sen Kyl", "Sen McCain", "Sen Boozman", "Sen Pryor", "Sen Boxer", "Sen Feinstein", "Sen Bennet", "Sen Udall", "Sen Blumenthal", "Sen Lieberman", "Sen Carper", "Sen Coons", "Sen Rubio", "Sen Nelson", "Sen Chambliss", "Sen Isakson", "Sen Akaka", "Sen Inouye", "Sen Crapo", "Sen Risch", "Sen Kirk", "Sen Durbin", "Sen Coats", "Sen Lugar", "Sen Grassley", "Sen Harkin", "Sen Moran", "Sen Roberts", "Sen Paul", "Sen McConnell", "Sen Landrieu", "Sen Vitter", "Sen Collins", "Sen Snowe", "Sen Cardin", "Sen Mikulski", "Sen Kerry", "Sen Brown", "Sen Levin", "Sen Stabenow", "Sen Franken", "Sen Klobuchar", "Sen Cochran", "Sen Wicker", "Sen Blunt", "Sen McCaskill", "Sen Baucus", "Sen Tester", "Sen Johanns", "Sen Nelson", "Sen Ensign", "Sen Reid", "Sen Ayotte", "Sen Shaheen", "Sen Lautenberg", "Sen Menéndez", "Sen Bingaman", "Sen Udall", "Sen Gillibrand", "Sen Schumer", "Sen Burr", "Sen Hagan", "Sen Conrad", "Sen Hoeven", "Sen Brown", "Sen Portman", "Sen Coburn", "Sen Inhofe", "Sen Merkley", "Sen Wyden", "Sen Casey", "Sen Toomey", "Sen Reed", "Sen Whitehouse", "Sen DeMint", "Sen Graham", "Sen Thune", "Sen Alexander", "Sen Corker", "Sen Cornyn", "Sen Hutchison", "Sen Lee", "Sen Hatch", "Sen Leahy", "Sen Sanders", "Sen Warner", "Sen Webb", "Sen Cantwell", "Sen Murray", "Sen Manchin", "Sen Rockefeller", "Sen Johnson", "Sen Kohl", "Sen Barrasso", "Sen Enzi"]
def read_json(json_fp):
'''Reads a json and returns the enclosed data.'''
with open(json_fp, 'r') as json_file:
data = json.load(json_file)
return data
def increment_key(dictionary, key1, key2, party, gender, state, startdate, leadership_title):
'''If the dictionary has the key, increment the value, otherwise initialize the key with value 1.
key1 is the sender and key2 is the congressman being mentioned'''
if key1 in dictionary:
if key2 in dictionary[key1]:
dictionary[key1][key2] += 1
else:
dictionary[key1][key2] = 1
else:
dictionary[key1] = {key2:1,'party':party,'gender':gender,'state':state,'startdate':startdate,'leadership_title':leadership_title}
def generate_names(entry):
'''Generates a list of the possible names from a given sender'''
name = entry['firstname'] + ' ' + entry['lastname']
name1 = entry['firstname'] + ' ' + entry['middlename'] + ' ' + entry['lastname']
name2 = entry['firstname'] + ' ' + entry['lastname'] + ' ' + entry['namemod']
name3 = entry['firstname'] + ' ' + entry['middlename'] +' ' + entry['lastname'] + ' ' + entry['namemod']
if not entry['nickname'] == '':
name4 = entry['nickname'] + ' ' + entry['lastname']
name5 = entry['nickname'] + ' ' + entry['middlename'] + ' ' + entry['lastname']
name6 = entry['nickname'] + ' ' + entry['lastname'] + ' ' + entry['namemod']
name7 = entry['nickname'] + ' ' + entry['middlename'] +' ' + entry['lastname'] + ' ' + entry['namemod']
name8 = entry['firstname'] + ' ' + entry['nickname'] + ' ' + entry['lastname']
if not entry['middlename'] == '':
name9 = entry['firstname'] + ' ' + entry['middlename'][0] + '. ' + entry['lastname']
name10 = entry['firstname'] + ' ' + entry['middlename'][0] +'. ' + entry['lastname'] + ' ' + entry['namemod']
name11 = entry['firstname'] + ' ' + entry['middlename'][0] + '. ' + entry['lastname']
name12 = entry['firstname'] + ' ' + entry['middlename'][0] +'. ' + entry['lastname'] + ' ' + entry['namemod']
return [name, name1, name2, name3, name4, name5, name6, name7, name8, name9, name10, name11, name12]
else:
return [name, name1, name2, name3, name4, name5, name6, name7, name8]
elif not entry['middlename'] == '':
name9 = entry['firstname'] + ' ' + entry['middlename'][0] + '. ' + entry['lastname']
name10 = entry['firstname'] + ' ' + entry['middlename'][0] +'. ' + entry['lastname'] + ' ' + entry['namemod']
name11 = entry['firstname'] + ' ' + entry['middlename'][0] + '. ' + entry['lastname']
name12 = entry['firstname'] + ' ' + entry['middlename'][0] +'. ' + entry['lastname'] + ' ' + entry['namemod']
return [name, name1, name2, name3, name9, name10, name11, name12]
else:
return [name, name1, name2, name3]
def search(data, name_list):
'''Searches through the emails to find the names of Congressmen
and calls increment_key if one is found.'''
congressmen_dict = {}
bad_names = ['Representative Miller','Rep. Miller','Rep Miller','Senator Nelson','Sen. Nelson','Sen Nelson','Representative Murphy','Rep. Murphy','Rep Murphy','Representative Young','Rep. Young','Rep Young','Representative Lee','Rep. Lee','Rep Lee','Representative Thompson','Rep. Thompson','Rep Thompson','Representative Young','Rep. Young','Rep Young']
for entry in data:
name_match = False
match_list = generate_names(entry)
for group in name_list: #this section ensures that the sender is in the 112th congress
for elem in group:
for temp in match_list:
if isinstance(elem,list):
for foo in elem:
if temp.lower() == foo.lower():
sender_name = group[0]
name_match = True
elif temp.lower() == elem.lower():
sender_name = group[0]
name_match = True
if name_match: #if the sender is in the 112th congress, continue
for group in name_list:
mention_name = group[0]
for elem in group:
if isinstance(elem,list):
for temp in elem:
if temp.lower() in entry['Body'].lower() and temp not in bad_names:
increment_key(congressmen_dict, sender_name, mention_name, entry['party'], entry['gender_label'], entry['state'], entry['startdate'],entry['leadership_title'])
elif elem.lower() in entry['Body'].lower() and elem not in bad_names:
increment_key(congressmen_dict, sender_name, mention_name, entry['party'], entry['gender_label'], entry['state'], entry['startdate'], entry['leadership_title'])
break
else:
if sender_name not in congressmen_dict:
congressmen_dict[sender_name] = {mention_name:0,'party':entry['party'],'gender':entry['gender'],'state':entry['state'],'startdate':entry['startdate'],'leadership_title':entry['leadership_title']}
if mention_name not in congressmen_dict[sender_name]:
congressmen_dict[sender_name][mention_name] = 0
return congressmen_dict
def form_lists(dictionary):
'''Forms the lists that will be written as csvs'''
return_list = [['Source','Target','Weight','Party','Gender','State','Start Date','Leadership Title']]
for entry in dictionary:
for value in dictionary[entry]:
if not (value == 'party' or value == 'gender' or value == 'state' or value == 'startdate' or value == 'leadership_title'):
if not dictionary[entry][value] == 0 and not entry == value:
append_list = [entry,value,dictionary[entry][value],dictionary[entry]['party'],dictionary[entry]['gender'],dictionary[entry]['state'],dictionary[entry]['startdate'],dictionary[entry]['leadership_title']]
return_list.append(append_list)
return return_list
def write_csv(data, csv_fp):
'''Writes a list of lists to a csv file with each list as a row.'''
with open(csv_fp, 'w') as out:
csv_out = csv.writer(out, lineterminator = '\n')
for row in data:
csv_out.writerow(row)
json_data = read_json('112th_congress.json')
rep_list_full = list(zip(rep_name_list, representative_list, rep_list1, rep_list2))
sen_list_full = list(zip(sen_name_list, senator_list, sen_list1, sen_list2))
full_name_list = rep_list_full + sen_list_full
#This section will create a .csv for Representatives
#mentions_dict = search(json_data,rep_list_full)
#lists = form_lists(mentions_dict)
#write_csv(lists, 'house_mentions_for_gephi.csv')
#This section will create a .csv for Senators
#mentions_dict = search(json_data,sen_list_full)
#lists = form_lists(mentions_dict)
#write_csv(lists, 'senate_mentions_for_gephi.csv')'''
mentions_dict = search(json_data,full_name_list)
lists = form_lists(mentions_dict)
write_csv(lists, 'congress_mentions_for_gephi_with_extra_info.csv')
|
[
"noreply@github.com"
] |
stephenmcardle.noreply@github.com
|
1a5130bf738612accd39b16edf85782846daa5bd
|
22d30599038a1cf1bd23127b55cd06e502160e0b
|
/WordEmbedding/sim.py
|
0c74c3b24a00ffa4cecf9711125c14ad65f4c5db
|
[] |
no_license
|
jennkimerson/hackharvard19
|
ac6e7a5c4509e569af3ddcca29ff5024029cbce5
|
c24ad7d5f625d4a31234376324d48df5cdd8b553
|
refs/heads/master
| 2020-08-22T09:39:39.962411
| 2019-10-25T14:15:10
| 2019-10-25T14:15:10
| 216,367,835
| 0
| 0
| null | 2019-10-20T13:39:50
| 2019-10-20T13:39:50
| null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
import spacy
nlp = spacy.load('en_core_web_md')
doc = nlp(u'football soccer baseball')
football = doc[0]
soccer = doc[1]
baseball = doc[2]
print(football.similarity(soccer))
print(football.similarity(baseball))
print(baseball.similarity(soccer))
# simulates 2 users
doc1 = nlp(u'football')
football = doc1[0]
doc2 = nlp(u'soccer')
soccer = doc2[0]
print(football.similarity(soccer))
|
[
"noreply@github.com"
] |
jennkimerson.noreply@github.com
|
33ac17f018649be1b8cfd5518ce2d4cf0984535e
|
c1cc0c7f046db576f7dcbfa109e42d37fdca71d1
|
/python/algorithms/strings/unique_characters.py
|
d0718d67fecfdee543d4910b4efe0d870fbb3298
|
[] |
no_license
|
drownedout/data_structures_algorithms
|
44929f3814ae725c88d85ac18512179caab4d080
|
a443a1ba02b9c7e1c73e70287843f9e9650a346e
|
refs/heads/master
| 2020-12-26T13:11:09.157102
| 2020-04-07T22:37:05
| 2020-04-07T22:37:05
| 237,519,294
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
"""
===============
Unique Characters
===============
Determine if a string is made up of all unique characters.
"""
# Single line solution
def unique_char_one(string: str) -> bool:
return len(set(string)) == len(string)
# More verbose solution
def unique_char_two(string: str) -> bool:
characters = set()
for letter in string:
# If the letter already exists - it is not unique
if letter in characters:
return False
else:
characters.add(letter)
return True
|
[
"danielcdonnelly@gmail.com"
] |
danielcdonnelly@gmail.com
|
c7b8fe7932b9f8c233dd2ef3c136f5cebd90cd82
|
14133349e752d1479468a9ae4792c1e85f68d504
|
/templates.py
|
22747918f385ddf57e80b5b879010cc19d0fd9e8
|
[] |
no_license
|
seaneightysix/wsgi-calculator
|
0980f6c0441883cc1f3aba17121251c319a50030
|
ac78bc9490a577da4dd83227efcac86d0fbdd773
|
refs/heads/master
| 2020-06-21T12:41:05.790660
| 2019-07-20T17:06:47
| 2019-07-20T17:06:47
| 197,453,533
| 0
| 0
| null | 2019-07-17T19:54:27
| 2019-07-17T19:54:26
| null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
class Template():
def home():
return '''
<head>
<title>Internet Programming in Python: wsgi-calculator Assignment</title>
</head>
<body>
<h1>Internet Programming in Python: wsgi-calculator Assignment</h1>
<h2>Please follow the format below to operate the calculator:</h2>
<p>For multiplacation: http://localhost:8080/multiply/3/5</p>
<p>For addition: http://localhost:8080/add/23/42</p>
<p>For subtraction: http://localhost:8080/subtract/23/42</p>
<p>For division: http://localhost:8080/divide/22/11</p>
</body>
'''
def answer(operation, ans):
page = '''
<h1>The answer for the {} operation is: {}.</h1>
'''
return page.format(operation, ans)
|
[
"sean.eightysix@GMAIL.com"
] |
sean.eightysix@GMAIL.com
|
a9af0a62b1fb70533cb84d1aacff5daafd3a40d0
|
f3c8cc1e83782c8764c904bdfac24f2c6ba0591e
|
/wangtao/leetcode/0234.py
|
e7c5bd6cb37271586ca8215d4fa1ff25928438e0
|
[] |
no_license
|
wangtao090620/LeetCode
|
18054aa2fa52cf36f0486ac20d50d80a328400f4
|
dd6b9a07479d7a8c04672024595cf04a3ed0d067
|
refs/heads/master
| 2020-09-09T10:57:51.559497
| 2020-07-30T03:17:38
| 2020-07-30T03:17:38
| 221,428,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @Author : wangtao
# @Contact : wangtao090620@gmail.com
# @Time : 2019-12-28 10:12
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
fast = slow = head
# 快指正到尾部,慢指针正好在链表一半的位置
while fast and fast.next:
fast = fast.next.next
slow = slow.next
node = None
while slow:
nxt = slow.next
slow.next = node
node = slow
slow = nxt
while node:
if head.val != node.val:
return False
head = head.next
node = node.next
return True
if __name__ == '__main__':
pass
|
[
"wangtao090620@gmail.com"
] |
wangtao090620@gmail.com
|
9da447e80fc6687b168a437cd26366af2a97a0d2
|
b24a9b98e36cb77d0f622a3d5ea27447c008ea0f
|
/bbp/bbp/settings.py
|
acc5e7648649782b9add64cf4e7a3154050ca1f4
|
[
"Apache-2.0"
] |
permissive
|
ekivemark/bbp_oa
|
593eb7d2a12baf904a1f5443694c2aefc75ca73f
|
7f36f1600b9b4edfd36f829f73cf24f277a60d89
|
refs/heads/master
| 2020-06-25T22:35:20.051549
| 2014-11-22T05:35:33
| 2014-11-22T05:35:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,915
|
py
|
"""
Django settings for bbp_oa project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
APPS_DIR = os.path.join(BASE_DIR, 'bbp/apps')
sys.path.insert(0, APPS_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# Use this to review Settings at run time
DEBUG_SETTINGS = True
APPLICATION_TITLE="BlueButtonPlus"
if DEBUG_SETTINGS:
print "Application: %s" % APPLICATION_TITLE
print ""
print "BASE_DIR:%s " % BASE_DIR
print "APPS_DIR:%s " % APPS_DIR
ALLOWED_HOSTS = []
ADMINS = (
('Mark Scrimshire', 'mark@ekivemark.com'),
)
MANAGERS = ADMINS
# Application definition
INSTALLED_APPS = (
# add admin_bootstrapped items before django.contrib.admin
'django_admin_bootstrapped.bootstrap3',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'bootstrap_themes',
# https://django-oauth2-provider.readthedocs.org/en/latest/getting_started.html
#'provider',
#'provider.oauth2',
# http://django-oauth-toolkit.readthedocs.org/en/latest/tutorial/tutorial_01.html
'oauth2_provider',
'corsheaders',
'rest_framework',
)
AUTHENTICATION_BACKENDS = (
'oauth2_provider.backends.OAuth2Backend',
# Uncomment following if you want to access the admin
'django.contrib.auth.backends.ModelBackend',
#'...',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'oauth2_provider.middleware.OAuth2TokenMiddleware',
)
# http://django-oauth-toolkit.readthedocs.org/en/latest/tutorial/tutorial_01.html
# Allow CORS requests from all domains (just for the scope of this tutorial):
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'bbp.urls'
WSGI_APPLICATION = 'bbp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DBPATH = os.path.join(BASE_DIR, 'db/db.db')
if DEBUG_SETTINGS:
print "DBPATH:",DBPATH
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': DBPATH, # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
# STATIC_ROOT = ''
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
if DEBUG_SETTINGS:
print "STATIC_ROOT:%s" % STATIC_ROOT
ADMIN_MEDIA_PREFIX = '/static/admin'
MAIN_STATIC_ROOT = os.path.join(BASE_DIR, 'mainstatic')
if DEBUG_SETTINGS:
print "MAIN_STATIC_ROOT:%s" % MAIN_STATIC_ROOT
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
MAIN_STATIC_ROOT,
# '/Users/mark/PycharmProjects/virtualenv/rb/rainbowbutton/static',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'bbp/templates'),
)
TEMPLATE_VISIBLE_SETTINGS = {
# Put Strings here that you want to be visible in the templates
# then add settings_context_processor
'DEBUG',
'TEMPLATE_DEBUG',
'APPLICATION_TITLE',
}
TEMPLATE_MODULES = {
# Put the names of custom modules in this section
# This will be used by home.index to display a list of modules
# that can be called
'privacy',
'about',
'contact',
'terms',
'faq',
'admin',
'accounts/profile',
'accounts/logout',
'accounts/login',
}
TEMPLATE_CONTEXT_PROCESSORS = (
# Use a context processor to enable frequently used settings variables
# to be used in templates
'django.contrib.auth.context_processors.auth',
'bbp.settings_context_processor.settings',
)
# Default settings for bootstrap 3
BOOTSTRAP3 = {
# The URL to the jQuery JavaScript file
'jquery_url': '//code.jquery.com/jquery.min.js',
# The Bootstrap base URL
'base_url': '//netdna.bootstrapcdn.com/bootstrap/3.2.0/',
# The complete URL to the Bootstrap CSS file (None means derive it from base_url)
'css_url': None,
# The complete URL to the Bootstrap CSS file (None means no theme)
'theme_url': None,
# The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)
'javascript_url': None,
# Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)
'javascript_in_head': False,
# Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)
'include_jquery': False,
# Label class to use in horizontal forms
'horizontal_label_class': 'col-md-2',
# Field class to use in horizontal forms
'horizontal_field_class': 'col-md-4',
# Set HTML required attribute on required fields
'set_required': True,
# Set placeholder attributes to label if no placeholder is provided
'set_placeholder': True,
# Class to indicate required (better to set this in your Django form)
'required_css_class': '',
# Class to indicate error (better to set this in your Django form)
'error_css_class': 'has-error',
# Class to indicate success, meaning the field has valid input (better to set this in your Django form)
'success_css_class': 'has-success',
# Renderers (only set these if you have studied the source and understand the inner workings)
'formset_renderers':{
'default': 'bootstrap3.renderers.FormsetRenderer',
},
'form_renderers': {
'default': 'bootstrap3.renderers.FormRenderer',
},
'field_renderers': {
'default': 'bootstrap3.renderers.FieldRenderer',
'inline': 'bootstrap3.renderers.InlineFieldRenderer',
},
}
# http://django-oauth-toolkit.readthedocs.org/en/latest/rest-framework/getting_started.html
OAUTH2_PROVIDER = {
# this is the list of available scopes
'SCOPES': {'read': 'Read scope', 'write': 'Write scope', 'groups': 'Access to your groups'}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.ext.rest_framework.OAuth2Authentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
}
# @login_required defaults to using settings.LOGIN_URL
# if login_url= is not defined
#LOGIN_URL='/member/login'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Make this unique, and don't share it with anybody.
# Setting a false value here and will overwrite using value in local_settings.py
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fake_value'
# Get Local Settings that you want to keep private.
# Make sure Local_settings.py is excluded from Git
try:
from local_settings import *
except Exception as e:
pass
if DEBUG_SETTINGS:
print "SECRET_KEY:%s" % SECRET_KEY
print "================================================================"
# SECURITY WARNING: keep the secret key used in production secret!
|
[
"mscrimshire@gmail.com"
] |
mscrimshire@gmail.com
|
8cf52c690f77f0f49373c855c61be0a11742b5ad
|
50b2898f6631883f95efc109c99cda9e0e880d04
|
/WordFrequency/corpus2txt.py
|
9b06889e4835b377a8e34b3ac6b25fe754e8eab9
|
[] |
no_license
|
choldawa/Word-Frequency
|
93ea1697f66309e95839040d752d598ec1403869
|
b6dc0ac2007c1a39e5c43d428fa0c2fff253e9df
|
refs/heads/master
| 2022-09-01T00:53:21.840451
| 2022-08-23T17:12:09
| 2022-08-23T17:12:09
| 250,048,002
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
import os, sys, re
from datetime import datetime
from collections import defaultdict
N = 2 # the N of the n-gram
DIR = "corpus/data-news"
start_date = datetime.strptime('2015-11-26', "%Y-%m-%d")
def is_valid_word(w):
# our criteria to filter out non-words -- here we use a heuristic of must have a vowel and no numbers
return 1 <= len(w) <= 12 and re.search(r"[aeiouy]", w) and not re.search(r"[0-9]", w)
print("day word freq z")
for pth in os.listdir(DIR):
freq = defaultdict(int) # just for this file (which should be a single date), the frequencies of each word
m = re.match('([0-9]{4}-[0-9]{2}-[0-9]{2}).txt', pth) #match the date format (to ensure we only process corpus files)
if m:
dt = m.groups(None)[0]
day = (datetime.strptime(dt, "%Y-%m-%d") - start_date).days
with open(DIR+"/"+pth, 'r') as f:
for l in f: # split each line
date, time, site, url, txt = re.split(r'\t', l)
assert date == dt, "Date in filename does not match column! Something went wrong"
txt = re.sub(r'\\n', r'\n', txt) # fix the newline encoding!
txt = txt.lower() # collapse case
#print txt
words = re.findall(r'([a-z]+)', txt)
words = filter(is_valid_word, words)
#print words
# loop through words to count up frequencies on this day
for i in xrange(len(words)-N+1):
chunk = words[i:(i+N)]
#print chunk
freq[' '.join(chunk)] += 1
# normalize within days
z = sum(freq.values())
for k in sorted(freq.keys()):
print(day, "\"%s\""%k, freq[k], z)
|
[
"cameron.holdaway@gmail.com"
] |
cameron.holdaway@gmail.com
|
20458b659df53879f8ccfb9be42057918fd28340
|
16877121615a8a8f882a1f1bd43bad58a26b88f0
|
/api/test_try.py
|
77d2616ee6a3072b143dc6c998cfa3ca4dbeeabf
|
[] |
no_license
|
LinaProg/wishlist
|
55282fc20176ef0a4b6e6da6fdd49a753e53fc24
|
175eb382743589009f465c7cef7ec2100657c120
|
refs/heads/master
| 2022-05-30T16:51:08.612131
| 2020-04-17T21:52:37
| 2020-04-17T21:52:37
| 201,097,447
| 0
| 0
| null | 2022-05-25T03:17:58
| 2019-08-07T17:35:45
|
Python
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
# def func(x):
# return x+1
# def test_answer():
# assert func(3) ==4
import pytest
# def f():
# raise SystemExit(1)
# def test_mytest():
# with pytest.raises(SyntaxError):
# f()
# def test_kek():
# assert 'foo neggs bar' == 'foo nspam bar'
# class Person():
# def greet():
# return
# @pytest.fixture
# def person():
# return Person()
# def test_greet(person):
# greeting = Person.greet()
# assert greeting == 'Hi there'
|
[
"linasmith69@yahoo.com"
] |
linasmith69@yahoo.com
|
1c1876137f1c9717417fa9aeb3f2be22eb3301e0
|
47c964b21f4157d978b066f81124772abd15af7a
|
/venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/database.py
|
030df06db6a0def7039cd2fdd1c77039ceef308f
|
[] |
no_license
|
hvsio/ScrapperController
|
ae483e207c5ac7a89690e235a80d94c86c5195e9
|
b05ab61e2e93cd396e6318a3eaf730101c2ae69f
|
refs/heads/master
| 2022-03-28T15:00:59.109049
| 2019-12-09T23:51:53
| 2019-12-09T23:51:53
| 234,582,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51,041
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
LEGACY_METADATA_FILENAME)
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
possible_filenames = [METADATA_FILENAME,
WHEEL_METADATA_FILENAME,
LEGACY_METADATA_FILENAME]
for metadata_filename in possible_filenames:
metadata_path = posixpath.join(entry, metadata_filename)
pydist = finder.find(metadata_path)
if pydist:
break
else:
continue
with contextlib.closing(pydist.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if version is not None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
# We hit a problem on Travis where enum34 was installed and doesn't
# have a provides attribute ...
if not hasattr(dist, 'provides'):
logger.debug('No "provides": %s', dist)
else:
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
md = self.metadata
logger.debug('Getting requirements from metadata %r', md.todict())
reqts = getattr(md, req_attr)
return set(md.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.modules = []
self.finder = finder = resources.finder_for_path(path)
if finder is None:
raise ValueError('finder unavailable for %s' % path)
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for Wheel 0.23 support
if r is None:
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find('METADATA')
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
r = finder.find('REQUESTED')
self.requested = r is not None
p = os.path.join(path, 'top_level.txt')
if os.path.exists(p):
with open(p, 'rb') as f:
data = f.read()
self.modules = data.splitlines()
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in config.ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in config.ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
tl_path = tl_data = None
if path.endswith('.egg'):
if os.path.isdir(path):
p = os.path.join(path, 'EGG-INFO')
meta_path = os.path.join(p, 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(p, 'requires.txt')
tl_path = os.path.join(p, 'top_level.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
tl_path = os.path.join(path, 'top_level.txt')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
# look for top-level modules in top_level.txt, if present
if tl_data is None:
if tl_path is not None and os.path.exists(tl_path):
with open(tl_path, 'rb') as f:
tl_data = f.read().decode('utf-8')
if not tl_data:
tl_data = []
else:
tl_data = tl_data.splitlines()
self.modules = tl_data
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.services.InstalledDistribution`
or :class:`distutils2.services.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.services.InstalledDistribution` or
:class:`distutils2.services.EggInfoDistribution`
:type y: :class:`distutils2.services.InstalledDistribution` or
:class:`distutils2.services.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.services.InstalledDistribution`
or :class:`distutils2.services.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.services.InstalledDistribution` and
:class:`distutils2.services.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Placeholder for summary'
return Distribution(md)
|
[
"patrickvibild@gmail.com"
] |
patrickvibild@gmail.com
|
2b6663470e3ee010d70b708247f8c850bd6682ac
|
16bc55e28eabc5f26b281d25e459458e34bfca44
|
/urls.py
|
f028ad41d1b049563169c191d8cffd08b5bdcb0c
|
[] |
no_license
|
980124/-
|
302c744241c237de967ffacdfa5d04d24772d50c
|
5ec6045c831dccee74a2c41a62c7bb3dd32062be
|
refs/heads/master
| 2020-06-30T08:27:30.153357
| 2019-09-06T06:45:50
| 2019-09-06T06:45:50
| 200,778,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
"""wordcount URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from .import function
urlpatterns = [
path('admin/', admin.site.urls),
path('', function.home),
path('count/', function.count),
]
|
[
"noreply@github.com"
] |
980124.noreply@github.com
|
d8c4cdf133c9119b5c8825b538354dd78e455528
|
d02f9b972dc29160955456d15082e24bf7ec231b
|
/lumin/nn/models/layers/self_attention.py
|
2c2170fd68544955a08fcaed2711904ba8cb4318
|
[
"Apache-2.0"
] |
permissive
|
GilesStrong/lumin
|
fc644747f77ccfc064cab813b8bf1f5bea965062
|
bcaf35731ef48bc6184d090a5180fbc33a3dfe6e
|
refs/heads/master
| 2023-05-26T15:17:47.564892
| 2023-04-28T00:13:10
| 2023-04-28T00:13:10
| 163,840,693
| 48
| 16
|
Apache-2.0
| 2023-04-14T01:14:57
| 2019-01-02T12:52:32
|
Python
|
UTF-8
|
Python
| false
| false
| 4,573
|
py
|
from typing import Callable, Optional, Any
import math
from fastcore.all import store_attr
import torch
from torch import nn, Tensor
from .activations import lookup_act
from .batchnorms import LCBatchNorm1d
from ..initialisations import lookup_normal_init
__all__ = ['SelfAttention', 'OffsetSelfAttention']
class SelfAttention(nn.Module):
r'''
Class for applying self attention (Vaswani et al. 2017 (https://arxiv.org/abs/1706.03762)) to features per vertex.
Arguments:
n_fpv: number of features per vertex to expect
n_a: width of self attention representation
do: dropout rate to be applied to hidden layers in the NNs
bn: whether batch normalisation should be applied to hidden layers in the NNs
act: activation function to apply to hidden layers in the NNs
lookup_init: function taking choice of activation function, number of inputs, and number of outputs an returning a function to initialise layer weights.
lookup_act: function taking choice of activation function and returning an activation function layer
bn_class: class to use for BatchNorm, default is :class:`~lumin.nn.models.layers.batchnorms.LCBatchNorm1d`
'''
def __init__(self, n_fpv:int, n_a:int, do:float=0, bn:bool=False, act:str='relu',
lookup_init:Callable[[str,Optional[int],Optional[int]],Callable[[Tensor],None]]=lookup_normal_init,
lookup_act:Callable[[str],Any]=lookup_act, bn_class:Callable[[int],nn.Module]=nn.BatchNorm1d):
super().__init__()
store_attr()
self.q = self._get_layer(self.n_fpv, self.n_a)
self.k = self._get_layer(self.n_fpv, self.n_a)
self.v = self._get_layer(self.n_fpv, self.n_fpv)
self.out = self._get_out()
def _get_out(self) -> nn.Sequential:
layers = [self._get_layer(self.n_fpv, self.n_fpv)]
if self.act != 'linear': layers.append(self.lookup_act(self.act))
if self.bn: layers.append(LCBatchNorm1d(self.bn_class(self.n_fpv)))
if self.do:
if self.act == 'selu': layers.append(nn.AlphaDropout(self.do))
else: layers.append(nn.Dropout(self.do))
return nn.Sequential(*layers)
def _get_layer(self, fan_in:int, fan_out:int) -> nn.Module:
l = nn.Linear(fan_in, fan_out)
self.lookup_init('linear', fan_in, fan_out)(l.weight)
nn.init.zeros_(l.bias)
return l
def forward(self, x:Tensor) -> Tensor: # B N C
r'''
Augments features per vertex
Arguemnts:
x: incoming data (batch x vertices x features)
Returns:
augmented features (batch x vertices x new features)
'''
a = (self.q(x)@self.k(x).transpose(-1,-2))/math.sqrt(self.n_a) # B N N
a = torch.softmax(a, dim=-1) # Softmax norm columns
sa = a@self.v(x) # B N C
return x+self.out(sa) # B N C
def get_out_size(self) -> int: return self.n_fpv
class OffsetSelfAttention(SelfAttention):
r'''
Class for applying offset-self attention (Guo et al. 2020 (https://arxiv.org/abs/2012.09688)) to features per vertex.
Arguments:
n_fpv: number of features per vertex to expect
n_a: width of self attention representation (paper recommends n_fpv//4)
do: dropout rate to be applied to hidden layers in the NNs
bn: whether batch normalisation should be applied to hidden layers in the NNs
act: activation function to apply to hidden layers in the NNs
lookup_init: function taking choice of activation function, number of inputs, and number of outputs an returning a function to initialise layer weights.
lookup_act: function taking choice of activation function and returning an activation function layer
bn_class: class to use for BatchNorm, default is :class:`~lumin.nn.models.layers.batchnorms.LCBatchNorm1d`
'''
def forward(self, x:Tensor) -> Tensor: # B N C
r'''
Augments features per vertex
Arguemnts:
x: incoming data (batch x vertices x features)
Returns:
augmented features (batch x vertices x new features)
'''
a = self.q(x)@self.k(x).transpose(-1,-2) # B N N
a = torch.softmax(a, dim=-2) # Softmax norm rows
a = a/(a.sum(-1, keepdim=True)+1e-17) # L1 norm columns
sa = a@self.v(x) # B N C
return x+self.out(x-sa) # B N C
|
[
"giles.strong@outlook.com"
] |
giles.strong@outlook.com
|
3a9f276b01003b3646492b2f4dda8c7d70f6f666
|
6e63894d2289feece585eb4d6b0085ecf9609c86
|
/week7/draft3_arcs.py
|
14b91740782135005e8bfb8311e6a8a0bf82cf85
|
[] |
no_license
|
Homerade/DA-520
|
af592149cbb654b9564d931d5e7a6f644e3f4b04
|
0a4422dcdbbb41ec85d8f0e7ac8585adf1e96ae4
|
refs/heads/master
| 2021-01-13T08:59:02.059857
| 2016-12-10T04:52:16
| 2016-12-10T04:52:16
| 68,859,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
from PIL import Image
from PIL import ImageDraw
im = Image.new("RGB", (1000,1000), (255,255,255))
draw = ImageDraw.Draw(im, 'RGBA')
data = [
{'year' : 1820, 'figure' : 8385},
{'year' : 1821, 'figure' : 9127},
{'year' : 1822, 'figure' : 6911},
{'year' : 1823, 'figure' : 6354},
{'year' : 1824, 'figure' : 7912},
{'year' : 1825, 'figure' : 10199},
{'year' : 1826, 'figure' : 10837},
{'year' : 1827, 'figure' : 18875},
]
x = (data[0]['year'])-1820
y = 490
degree = (data[0]['figure'])/55
# x = x+10
x1 = (data[1]['year'])-1820
degree1 = (data[1]['figure'])/55
draw.arc((x+10,x+10,y+10,y+10),0,degree,fill=0)
draw.arc((x1+20,x1+20,y+20,y+20),0,degree1,fill=0)
# draw.arc((10,10,510,510),0,350,fill=50)
# draw.arc((20,20,520,520),0,100,fill=100)
# draw.arc((30,30,530,530),0,250,fill=150)
# draw.arc((40,40,540,540),0,177,fill=200)
# draw.arc((50,50,550,550),0,44,fill=250)
# distance from the last year = difference between figure from previous year
# arc = ratio of figure on 0-360 scale
# color = progressing RGB
#make it go in a circle instead of a diagonal line
im.show()
im.save("mydatavis.png")
# color = figure [times] .01
# parabola curves up at a distance of the difference from the year before
|
[
"mizzsvizz@gmail.com"
] |
mizzsvizz@gmail.com
|
f2a26eb8fc4d0b681cb827a0d2f3b3b42e19e747
|
2cdca093365c498cd11f7f03385e9c9310a80113
|
/reltest/util.py
|
6d4a3c1695b5d2055f4d54d0ba58a2e26391f2cd
|
[
"MIT"
] |
permissive
|
jenninglim/model-comparison-test
|
3565a98253e05950d1dc55c39e0b8480c12b6166
|
0024d1ff76ef71a25610b368cc364a59bc672961
|
refs/heads/master
| 2020-07-20T21:59:02.231673
| 2020-01-08T18:13:28
| 2020-01-08T18:13:28
| 206,715,600
| 5
| 2
|
MIT
| 2020-01-08T18:13:29
| 2019-09-06T04:54:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,376
|
py
|
import numpy as np
import time
class NumpySeedContext(object):
"""
A context manager to reset the random seed by numpy.random.seed(..).
Set the seed back at the end of the block.
From Wittawat Jitkrittum.
See https://github.com/wittawatj/kernel-gof/blob/master/kgof/util.py.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
rstate = np.random.get_state()
self.cur_state = rstate
np.random.seed(self.seed)
return self
def __exit__(self, *args):
np.random.set_state(self.cur_state)
# end NumpySeedContext
class ContextTimer(object):
"""
A class used to time an execution of a code snippet.
Use it with with .... as ...
For example,
with ContextTimer() as t:
# do something
time_spent = t.secs
From https://www.huyng.com/posts/python-performance-analysis
"""
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
if self.verbose:
print('elapsed time: %f ms' % (self.secs*1000))
# end class ContextTimer
def dist_matrix(X, Y):
"""
Construct a pairwise Euclidean distance matrix of size X.shape[0] x Y.shape[0]
From Wittawat Jitkrittum.
See https://github.com/wittawatj/kernel-gof/blob/master/kgof/util.py.
"""
sx = np.sum(X**2, 1)
sy = np.sum(Y**2, 1)
D2 = sx[:, np.newaxis] - 2.0*X.dot(Y.T) + sy[np.newaxis, :]
# to prevent numerical errors from taking sqrt of negative numbers
D2[D2 < 0] = 0
D = np.sqrt(D2)
return D
def dist2_matrix(X, Y):
"""
Construct a pairwise Euclidean distance **squared** matrix of size
X.shape[0] x Y.shape[0]
From Wittawat Jitkrittum.
See https://github.com/wittawatj/kernel-gof/blob/master/kgof/util.py.
"""
sx = np.sum(X**2, 1)
sy = np.sum(Y**2, 1)
D2 = sx[:, np.newaxis] - 2.0*np.dot(X, Y.T) + sy[np.newaxis, :]
return D2
def meddistance(X, subsample=None, mean_on_fail=True):
"""
Compute the median of pairwise distances (not distance squared) of points
in the matrix. Useful as a heuristic for setting Gaussian kernel's width.
Parameters
----------
X : n x d numpy array
mean_on_fail: True/False. If True, use the mean when the median distance is 0.
This can happen especially, when the data are discrete e.g., 0/1, and
there are more slightly more 0 than 1. In this case, the m
Return
------
median distance
From Wittawat Jitkrittum.
See https://github.com/wittawatj/kernel-gof/blob/master/kgof/util.py.
"""
if subsample is None:
D = dist_matrix(X, X)
Itri = np.tril_indices(D.shape[0], -1)
Tri = D[Itri]
med = np.median(Tri)
if med <= 0:
# use the mean
return np.mean(Tri)
return med
else:
assert subsample > 0
rand_state = np.random.get_state()
np.random.seed(9827)
n = X.shape[0]
ind = np.random.choice(n, min(subsample, n), replace=False)
np.random.set_state(rand_state)
# recursion just one
return meddistance(X[ind, :], None, mean_on_fail)
|
[
"noreply@github.com"
] |
jenninglim.noreply@github.com
|
4e0f1314cd0c86b210a2a07cea518ae73d822d5e
|
b9878c92b857f73ff0452fc51c822cfc9fa4dc1c
|
/watson_machine_learning_client/libs/repo/swagger_client/models/array_model_version_metrics_experiments.py
|
faa813230c70d6c8be7f742146d6b8e505b1379b
|
[] |
no_license
|
DavidCastilloAlvarado/WMLC_mod
|
35f5d84990c59b623bfdd27369fe7461c500e0a5
|
f2673b9c77bd93c0e017831ee4994f6d9789d9a1
|
refs/heads/master
| 2022-12-08T02:54:31.000267
| 2020-09-02T15:49:21
| 2020-09-02T15:49:21
| 292,322,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,890
|
py
|
# coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ArrayModelVersionMetricsExperiments(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ArrayModelVersionMetricsExperiments - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
}
self.attribute_map = {
}
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"dcastilloa@uni.pe"
] |
dcastilloa@uni.pe
|
9ef6ceb92f36601794092fc771c1aaeafed08846
|
4b2f3531760607c2ea25434bd0d599d78c9b7b67
|
/apps/utils/__init__.py
|
422c7ccd3143519d38949ae893539659c698ffd3
|
[] |
no_license
|
yangtuothink/mxonline
|
05c17f3be1c599ba53a0eecc84bd4fa5a256b428
|
8bd6ab9e16e01e4901c00f47b4ffa598b7859107
|
refs/heads/master
| 2020-04-29T01:17:48.437042
| 2019-05-06T11:58:07
| 2019-05-06T11:58:07
| 175,725,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
# _*_ coding:utf-8 _*_
__author__ = "yangtuo"
__date__ = "2019/3/16 10:28"
|
[
"745206110@qq.com"
] |
745206110@qq.com
|
3f50a0c05995c671a5be627df0ded646f9272dd6
|
9ecee8a76e8516ea5f16764c3a9b8f24b56eabaa
|
/formula/cel_to_far1.py
|
89c41615354101971de6ad63bdb37a16c55d6829
|
[] |
no_license
|
Abhinavsuresh21/python-
|
35a93d255d309a6edcb76547bf54f6b40891fd1e
|
dca3c768474e20e681cacce3ce96eaf111812ff0
|
refs/heads/main
| 2023-09-02T18:59:50.962755
| 2021-10-18T06:20:27
| 2021-10-18T06:20:27
| 394,149,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
celsius=int(input("Enter celsius:"))
farenhiet=(celsius*9/5)+32;
print("\t\t output \n\n")
print("The entered celsius value:",celsius)
print("The calculated farenhiet :",farenhiet)
|
[
"noreply@github.com"
] |
Abhinavsuresh21.noreply@github.com
|
d0079b7be4a3e7f64982fa38add317d358fa9bf7
|
6ca19bc795797dcab87830d691c6b0aaacd3bfdb
|
/examples/resume_albert_eigenmetric.py
|
16fa470f01484356ef262f6008c1566688168e5a
|
[
"BSD-2-Clause"
] |
permissive
|
davidjurgens/prosocial-conversation-forecasting
|
78174e9d684a2a752f8ed84588cf54d63d91a512
|
473496d59e37a78d6246bf0f8eba3571114e29d2
|
refs/heads/main
| 2023-06-02T03:02:44.855084
| 2021-06-15T15:25:07
| 2021-06-15T15:25:07
| 338,390,002
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
from models.albert.EigenmetricRegressionSolver import EigenmetricRegressionSolver
from models.albert.AlbertForEigenmetricRegression import AlbertForEigenmetricRegression
from pathlib import Path
ROOT = Path('/shared/0/projects/prosocial/data/finalized/')
pretrained_system_name_or_path = \
ROOT / 'model_checkpoints/albert/run2/best_checkpoint_ep214498.pth'
input_dir = ROOT / 'data_cache/albert'
output_dir = ROOT / 'model_checkpoints/albert/run2_resume'
solver = EigenmetricRegressionSolver.from_pretrained(
model_constructor=AlbertForEigenmetricRegression,
pretrained_system_name_or_path=pretrained_system_name_or_path,
resume_training=True,
input_dir=input_dir,
output_dir=output_dir,
n_epoch=2,
learning_rates=2e-6)
solver.fit(num_eval_per_epoch=10)
|
[
"jiajunb@umich.edu"
] |
jiajunb@umich.edu
|
396c00c6189cb775934dbfb6254143045646d951
|
dc23ba044be5311e0515b602fd0f6c3cd4042464
|
/test/test_ui.py
|
4ea2e5f422fbda35d517ba535e574b951131352c
|
[] |
no_license
|
gfixler/tomayto
|
63143ce15f6c06536e99d12cd320464eee5461d3
|
a84679cc67f099538eb1f1b28f76b8c264c6a596
|
refs/heads/master
| 2023-08-31T15:45:44.198644
| 2023-08-16T08:57:25
| 2023-08-16T08:57:25
| 191,275,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
import unittest
import ui
class Test_SelectionList (unittest.TestCase):
def test_canInstantiateWithNothing (self):
sl = ui.SelectionList(createUI=False)
self.assertEqual(sl.values, [])
def test_canInstantiateWithList (self):
testList = ["one", "two", "three", "four", "five"]
sl = ui.SelectionList(testList, createUI=False)
self.assertEqual(sl.values, testList)
|
[
"gary@garyfixler.com"
] |
gary@garyfixler.com
|
4eebf9122238374366dac7ce8293e764812a7754
|
b5078eb072a327c4be87630e1e98c2018b988b1c
|
/fortniteTracker.py
|
62fd66560f0efd8751e47b9e5bea47a126fe5dec
|
[] |
no_license
|
johngraham660/fortniteTracker
|
5b554f05d10655c306ff9111c887d02c905ce5fb
|
d68ebbd96524a176163f3cba40abf0e2ede4dd36
|
refs/heads/master
| 2022-09-06T08:35:46.719231
| 2019-12-08T22:35:58
| 2019-12-08T22:35:58
| 226,734,720
| 0
| 0
| null | 2019-12-08T22:35:59
| 2019-12-08T21:31:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,858
|
py
|
#!/usr/bin/env python3
import os
import sys
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def __get_br_player_stats():
url = "https://api.fortnitetracker.com/v1/profile/"
endpoint = url + profile + '/' + epicname
headers = {'content-type': 'application/json', 'TRN-Api-Key': apikey}
# ==============================================
# Submit the request to the fortnite tracker API
# ==============================================
s = requests.Session()
r = s.get(endpoint, headers=headers, verify=False)
brps = r.json()
return brps
def __get_br_match_stats():
pass
if __name__ == "__main__":
profile = 'pc'
if os.path.isfile('credentials.txt'):
apikey, epicname = open('credentials.txt').read().strip().split(',')
else:
print("Cannot find credentials file")
sys.exit(1)
player_stats = __get_br_player_stats()
match_stats = __get_br_match_stats()
# ======================
# Get the lifetime stats
# ======================
lifetime_stats = player_stats['lifeTimeStats']
# =============================
# Create a new blank dictionary
# =============================
dicts_by_key = {}
# ========================================================
# Lifetime stats are represented in the variable lt_stats
# This comes in as a list of dictionaries that we need to
# iterate over.
# I am adding each dictionary to dicts_by_key which should
# result in 12 dictionary objects.
# ========================================================
for dicts in lifetime_stats:
dicts_by_key[dicts['key']] = dicts
# ============================================================
# Now setup some lifetime_total vairables from the dictionarys
# ============================================================
lt_total_wins = dicts_by_key['Wins']
lt_total_kills = dicts_by_key['Kills']
lt_total_score = dicts_by_key['Score']
lt_top_25s = dicts_by_key['Top 25s']
lt_top_10s = dicts_by_key['Top 10']
lt_top_5s = dicts_by_key['Top 5s']
lt_total_matches_played = dicts_by_key['Matches Played']
# ====================================================
# I now setup list objects with the dictionary values
# I use lists so that I can reference the index value.
# ====================================================
lt_total_wins_val_list = list(lt_total_wins.values())
lt_total_kills_val_list = list(lt_total_kills.values())
lt_total_score_val_list = list(lt_total_score.values())
lt_top_25s_val_list = list(lt_top_25s.values())
lt_top_10s_val_list = list(lt_top_10s.values())
lt_top_5s_val_list = list(lt_top_5s.values())
lt_total_matches_played_val_list = list(lt_total_matches_played.values())
# ===========================================
# This is the final output of our application
# ===========================================
print("=======================")
print("Fortnite Tracker (v0.1)")
print("=======================")
print(("Player: {}").format(epicname))
print("\t====================")
print("\tLifetime Statistics:")
print("\t====================")
print(f"\tTotal Wins : {lt_total_wins_val_list[1]}")
print(f"\tKills : {lt_total_kills_val_list[1]}")
print(f"\tMatches Played: {lt_total_matches_played_val_list[1]}")
print(f"\tOverall Score : {lt_total_score_val_list[1]}")
print()
print(f"\t=========")
print(f"\tPlacings:")
print(f"\t=========")
print(f"\tNo. Top 25s : {lt_top_25s_val_list[1]}")
print(f"\tNo. Top 10s : {lt_top_10s_val_list[1]}")
print(f"\tNo. Top 5s : {lt_top_5s_val_list[1]}")
|
[
"johngraham660@gmail.com"
] |
johngraham660@gmail.com
|
f2237a63fc73251d11b08c32db14f2b4331d7346
|
2fdccb28d6a8a0e5e5a0023bb99e6fdb855fc7b3
|
/vagrant/database_setup.py
|
d6fbf3ac6567a21677c9e6156544231bd57d5539
|
[] |
no_license
|
jd12/fullstack-nanodegree-vm
|
93e524b2b5b910b1ea5aa3ecd41d20ba436d424b
|
54b18946f550c5f6cf0bb7a6d0059b57635c4531
|
refs/heads/master
| 2020-07-15T06:58:43.073560
| 2017-10-08T23:09:03
| 2017-10-08T23:09:03
| 45,796,631
| 0
| 0
| null | 2015-11-08T20:04:27
| 2015-11-08T20:04:26
| null |
UTF-8
|
Python
| false
| false
| 836
|
py
|
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Restaurant(Base):
__tablename__ = 'restaurant'
name = Column(String(80), nullable = False)
id = Column(Integer, primary_key = True)
class MenuItem(Base):
__tablename__ = 'menu_item'
name = Column(String(80), nullable = False)
id = Column(Integer, primary_key = True)
course = Column(String(250))
description = Column(String(250))
price = Column(String(8))
restaurant_id = Column(Integer, ForeignKey('restaurant.id'))
restaurant = relationship(Restaurant)
##### insert at end of file #####
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.create_all(engine)
|
[
"jd@cs.umass.edu"
] |
jd@cs.umass.edu
|
0295b3a9d43de5b3cccad05b2c96a3748de6199b
|
cdc20b6c6672f9d1bceacf203eebfcc1e17a3cd4
|
/env/bin/pip
|
7f374a428f9faa3d2306f4d6454d34381d90952b
|
[
"MIT"
] |
permissive
|
akatcha/storyteller
|
54a3d321292101c8fbd83e149fe42739ed41547d
|
b485dca52ebc3d22decc705705bf778d490dc082
|
refs/heads/master
| 2021-07-15T12:32:45.070469
| 2017-10-21T15:01:52
| 2017-10-21T15:01:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
#!/mnt/c/Users/Andrew/Documents/hacking/storyteller/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"akatcha@umich.edu"
] |
akatcha@umich.edu
|
|
f743314f0c24de90ca2297a53bff462db910360d
|
a48cff82e98279e19c0f759ff15974e6ace4feab
|
/ordering/util/upc2excel.py
|
a3cef929dd26a95b0f1009986174ff4aa6c5321c
|
[
"MIT"
] |
permissive
|
LamCiuLoeng/jcp
|
59cd503f3553a7fd2cc966ac9d39bac0036c2688
|
a9efaa5fe26714465a690e1bc0d820713bf11080
|
refs/heads/master
| 2021-01-22T11:20:41.772418
| 2014-05-30T09:18:04
| 2014-05-30T09:18:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,689
|
py
|
# -*- coding: utf-8 -*-
import shutil, os, zipfile, traceback, random
from datetime import datetime
from ordering.util.excel_helper import *
from common import serveFile, Date2Text
from ordering.model import *
from tg import request, config, flash, redirect
import time, zlib
from ordering.util.bin2hex import *
epcobj=upcToepc()
def returnEPC(beginSerail, upc, qty):
global epcobj
return epcobj.run(beginSerail, upc, qty)
def upcExportBatch(id_list, qty_list, begin_list, job):
try:
fileDir=config.rfid_temp_template
if not os.path.exists(fileDir):
os.makedirs(fileDir)
dlzipFile=os.path.join(fileDir, "%s.zip"%job.no)
###########
#templatePath = os.path.join(os.path.abspath(os.path.curdir),"TEMPLATE/RFID_TEMPLATE.xls")
templatePath=os.path.join(config.rfid_template, "RFID_TEMPLATE.xls")
rm=random.randint(1, 1000)
copyTemplatePath=os.path.join(fileDir, "RFID_TEMPLATE_%d.xls"%rm)
#copy the template to the dest folder to invoid the confict.
shutil.copyfile(templatePath, copyTemplatePath)
fileList=[]
for index, id in enumerate(id_list):
row=DBSession.query(Item).get(id)
#update the last quantity of item.
row.last_epc=int(begin_list[index])-1+int(qty_list[index])
result=[]
serial_no=int(begin_list[index])
epc_list=returnEPC(int(begin_list[index]), row.upc, int(qty_list[index]))
for epc in epc_list:
result.append((row.category.name, \
row.part, \
row.style_no, \
row.size, \
row.format, \
row.garment_description, \
row.style_size, \
row.epc_style, \
row.upc, \
row.rn_no, \
"", \
row.epc_logo, \
row.gtin, \
row.vendor, \
qty_list[index], \
serial_no,
epc))
serial_no+=1
fileList.append(_upc2Excel(row.upc, result, copyTemplatePath, fileDir, row.part, row.style_no))
for d in job.details :
if d.item_id==int(id) :
d.epc_code_begin=epc_list[0]
d.epc_code_end=epc_list[-1]
break
dlzip=zipfile.ZipFile(dlzipFile, "w", zlib.DEFLATED)
for fl in fileList:
logging.info(os.path.abspath(fl))
dlzip.write(os.path.abspath(str(fl)), os.path.basename(str(fl)))
dlzip.close()
try:
for fl in fileList:
os.remove(fl)
os.remove(copyTemplatePath)
except:
pass
return dlzip
except:
traceback.print_exc()
return None
# for jcp, 20120202
def genProducionFile(upc, beginNo, qty):
try:
fileDir = config.rfid_temp_template
if not os.path.exists(fileDir):
os.makedirs(fileDir)
dlzipFile = os.path.join(fileDir, "UPC-%s_%s.zip" % (upc, datetime.now().strftime("%Y%m%d%H%M%S")))
templatePath = os.path.join(config.rfid_template, "RFID_TEMPLATE.xls")
rm = random.randint(1, 1000)
copyTemplatePath = os.path.join(fileDir, "RFID_TEMPLATE_%d.xls"%rm)
#copy the template to the dest folder to invoid the confict.
shutil.copyfile(templatePath, copyTemplatePath)
fileList = []
result = []
for index, epc in enumerate(returnEPC(beginNo, upc, qty)):
result.append((upc, 1, beginNo+index, epc))
fileList.append(_upc2Excel(upc, result, copyTemplatePath, fileDir))
dlzip = zipfile.ZipFile(dlzipFile, "w", zlib.DEFLATED)
for fl in fileList:
dlzip.write(os.path.abspath(str(fl)), os.path.basename(str(fl)))
dlzip.close()
try:
for fl in fileList:
os.remove(fl)
os.remove(copyTemplatePath)
except:
pass
return dlzipFile
except:
traceback.print_exc()
return None
def _upc2Excel(upc, data, copyTemplatePath, fileDir):
xlsFileName = "%s_%s.xls" % (upc, datetime.now().strftime("%Y%m%d%H%M%S"))
filename = os.path.join(fileDir, xlsFileName)
#print 'filename',filename
# print 'input data', time.ctime()
rfid = RFIDExcel(templatePath=copyTemplatePath, destinationPath=filename)
try:
rfid.inputData(data = data)
rfid.outputData()
return filename
#serveFile(filename, "application/x-download", "attachment")
# print 'input data', time.ctime()
except:
traceback.print_exc()
if rfid:
rfid.clearData()
redirect("index")
def reportExport(jdid_list):
try:
fileDir=config.report_download_path
if not os.path.exists(fileDir):
os.makedirs(fileDir)
for i in os.listdir(fileDir):
oldfile=os.path.join(fileDir, i)
if(time.time()-os.path.getmtime(oldfile)>60*60):
try:
os.remove(oldfile)
except:
pass
dateStr=datetime.now().strftime("%Y%m%d%H%M%S")
dlzipFile=os.path.join(fileDir, "report_%s.zip"%dateStr)
#print dlzipFile
templatePath=os.path.join(config.rfid_template, "REPORT_TEMPLATE.xls")
rm=random.randint(1, 1000)
copyTemplatePath=os.path.join(fileDir, "REPORT_TEMPLATE_%d.xls"%rm)
#copy the template to the dest folder to invoid the confict.
shutil.copyfile(templatePath, copyTemplatePath)
result=[]
if jdid_list:
for index, jdid in enumerate(jdid_list):
jd = DBSession.query(JobDetail).get(jdid)
serial_begin = jd.serial_begin if jd.serial_begin else 0
serial_end = jd.serial_end if jd.serial_end else 0
if serial_begin==0 and serial_end==0:
qty = 0
else:
qty = serial_end - serial_begin + 1
result.append((
jd.header.no,
jd.location.name if jd.location else '',
gtinToUpc(jd.gtin),
jd.gtin,
qty,
serial_begin,
serial_end,
jd.epc_code_begin,
jd.epc_code_end))
report_xls=_report2Excel(result, copyTemplatePath, os.path.join(fileDir, "report_%s.xls"%dateStr))
dlzip=zipfile.ZipFile(dlzipFile, "w", zlib.DEFLATED)
dlzip.write(os.path.abspath(str(report_xls)), os.path.basename(str(report_xls)))
dlzip.close()
try:
os.remove(copyTemplatePath)
os.remove(os.path.join(fileDir, "report_%s.xls"%dateStr))
except:
pass
return dlzip, dlzipFile
except:
traceback.print_exc()
return None
def _report2Excel(data, copyTemplatePath, filename):
rfid=HANESExcel(templatePath = copyTemplatePath, destinationPath = filename)
try:
rfid.inputData(data = data)
rfid.outputData()
return filename
except:
traceback.print_exc()
if rfid:
rfid.clearData()
redirect("index")
|
[
"lamciuloeng@gmail.com"
] |
lamciuloeng@gmail.com
|
fb2863bacc09c5bed7d80aee30a690e7b65abba1
|
a600c0c0ca4be9103a4977725ef77459b7ef483f
|
/test_api/extensions.py
|
6656117c2dfce24db6e48b3143c5c7d148dc28bc
|
[
"Apache-2.0"
] |
permissive
|
hpf0532/flask-celery-demo
|
a739e5bcb1bf46dfcee487b3c4c08afa3a15e843
|
19c736b0c75227bd94eda3c74de6c2e8c118545a
|
refs/heads/master
| 2022-01-22T01:33:03.427332
| 2020-03-03T14:18:27
| 2020-03-03T14:18:27
| 244,649,950
| 10
| 0
|
Apache-2.0
| 2022-01-06T22:42:38
| 2020-03-03T13:57:37
|
Python
|
UTF-8
|
Python
| false
| false
| 132
|
py
|
from flask_sqlalchemy import SQLAlchemy
from flask_celeryext import FlaskCeleryExt
db = SQLAlchemy()
celery_ext = FlaskCeleryExt()
|
[
"hpf1992@126.com"
] |
hpf1992@126.com
|
11c93caecd66f575eb4adb81ecc712d0f618b123
|
0d29ffb6792b0a964388ea811591fb8b3606f2e7
|
/problems/hitcon2014-rsaha/server.py
|
67d187c11f3069454a0a09fe89f9010ed22612af
|
[] |
no_license
|
dkohlbre/crypto-vm
|
a9dc5c5609267325df000a13fb0bbea23620f690
|
cbb11bbb2c6ae446883e718e11c8a2166fc6a79e
|
refs/heads/master
| 2016-09-06T09:00:09.436010
| 2015-06-12T18:48:41
| 2015-06-12T18:48:41
| 35,638,059
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,525
|
py
|
#!/usr/bin/env python
import random
import select
import signal
import sympy
import sys
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def random_prime(bits):
return sympy.nextprime(2 ** bits + random.randint(0, 2 ** bits))
def encrypt(bits, m):
p = random_prime(bits)
q = random_prime(bits)
n = p * q
assert m < n
print n
print m ** 3 % n
print (m + 1) ** 3 % n
def main():
signal.alarm(180)
sys.stdout = Unbuffered(sys.stdout)
for i in range(1, 10):
bits = 50 * i
m = random.randint(0, 4 ** bits)
encrypt(bits, m)
rfd, _, _ = select.select([sys.stdin], [], [], 10)
if rfd:
try:
x = int(raw_input())
except ValueError:
print "\033[31;1mEnter a number, ok?\033[0m"
exit()
if x == m:
print "\033[32;1mGreat:)\033[0m"
continue
else:
print "\033[31;1mso sad :(\033[0m"
exit()
else:
print "\033[31;1mToo slooooooooooow :(\033[0m"
exit()
bits = 512.512
m = int("HITCON{RSA is a really secure algorithm, right?}".encode('hex'), 16)
encrypt(bits, m)
print "\033[32;1mGood Luck!\033[0m"
if __name__ == '__main__':
main()
|
[
"david.kohlbrenner@gmail.com"
] |
david.kohlbrenner@gmail.com
|
6a6aa1efeccf513f3b03b7e74b63d6f3d9d2de0f
|
92943fadaa0468848cb4180d40e6820e90a21697
|
/simple/process_data.py
|
1bbc5a895e63191d47a9e3ed4c2a7dc870a9e541
|
[] |
no_license
|
arhee/midi_song
|
269997c8954e7a5db3822e975fef81be99e48847
|
b0aa8ce04bb82f47a4a7d5d05d4eee76f8222b17
|
refs/heads/master
| 2021-01-01T17:27:28.860683
| 2017-01-17T08:03:09
| 2017-01-17T08:03:09
| 78,306,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,988
|
py
|
""" A simplified version of the problem that only looks at note
sequence
generates a matrix of note sequences.
"""
import mido
import glob
import numpy as np
from math import ceil
import pickle
import numpy as np
def get_notelist(mido_obj, res=16, note_range=(36,84)):
"""
Args:
res (int) - resolution. 8 is eighth notes, 4 quarter, etc...
note_range (tuple) - middle C = 60
Returns:
counter (int) - total amount of time elapsed for song
notelist (list) - a list of notes in (pitch, start, duration) format
"""
# this gives # of ticks in a column of the piano sheet
tick_step = round(float(mido_obj.ticks_per_beat/res))
active_notes = [0] * abs(np.diff(note_range)[0])
counter = 0
notelist = []
# this needs to be fixed in the future for multiinstrument songs
rawmsgs = [msg for track in mido_obj.tracks for msg in track]
# have to reset counter when a new track is detected
for msg in rawmsgs:
if msg.type not in set(['note_on','note_off']):
continue
#counter += ceil(msg.time/tick_step)
counter += msg.time
if msg.type == 'note_on' and msg.velocity > 0:
try:
active_notes[msg.note - note_range[0]] = counter
except IndexError:
pass
elif msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0):
# fill everything up to this with 1s
try:
start = active_notes[msg.note - note_range[0]] #round this section
notelist.append((msg.note, int(ceil(start/tick_step)), int(ceil((counter-start)/tick_step))))
active_notes[msg.note - note_range[0]] = 0
except IndexError:
pass
notelist = sorted(notelist, key=lambda x: x[1])
return (counter, notelist)
mididir = '../data/midi/tr_guitar_licks/'
midifnames = glob.glob(mididir + '*')
midifiles = []
for x in midifnames:
try:
midifiles.append(mido.MidiFile(x))
except IOError:
pass
# get the order of notes
data = []
for obj in midifiles:
counter, notelist = get_notelist(obj)
data.append([x[0] for x in notelist])
# shrink the dataset by reducing by min note
min_note = min([min(y) for y in data])
max_note = max([max(y) for y in data])
normdata = [np.array(x) - min_note for x in data]
delta = max_note - min_note
# create matrix
mat_data = []
for x in normdata:
# rows, cols
mat = np.zeros((delta+1, len(x)))
mat[x,range(len(x))] = 1
mat_data.append(mat)
# segmentize
# generate pickled data
X = []
y = []
maxlen = 10
step = 1
for obj in mat_data:
steps = obj.shape[1]
# rows cols
for ix in range(0, steps-maxlen-1, step):
end = ix + maxlen
X.append(obj[:, ix:end:1])
y.append(obj[:, end+1])
X = np.stack(X)
y = np.stack(y)
data = {'X':X, 'y':y, 'note_range':delta}
with open('mmat.p','wb') as f:
pickle.dump(data, f)
|
[
"alex.rhee@gmail.com"
] |
alex.rhee@gmail.com
|
c69efa01965331bd2028a83d33866ec5b96324b3
|
66828168e36f905697fcc144f7610c88450927d9
|
/ee/clickhouse/queries/event_query.py
|
cd38645db748faad161d6656ea0676fa8ee385f4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
nehachauhan14/posthog
|
975657e855e951aae82acfe6606f678304146dd0
|
27144478f534290ee01b69d6ac5c6a53b8e05ec4
|
refs/heads/master
| 2023-06-05T17:30:39.999325
| 2021-06-19T00:40:49
| 2021-06-19T00:40:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,406
|
py
|
from typing import Any, Dict, Tuple
from ee.clickhouse.models.cohort import format_person_query, get_precalculated_query, is_precalculated_query
from ee.clickhouse.models.property import filter_element, prop_filter_json_extract
from ee.clickhouse.queries.trends.util import populate_entity_params
from ee.clickhouse.queries.util import date_from_clause, get_time_diff, get_trunc_func_ch, parse_timestamps
from posthog.models import Cohort, Entity, Filter, Property, Team
class ClickhouseEventQuery:
DISTINCT_ID_TABLE_ALIAS = "pdi"
PERSON_TABLE_ALIAS = "person"
EVENT_TABLE_ALIAS = "e"
_PERSON_PROPERTIES_ALIAS = "person_props"
_filter: Filter
_entity: Entity
_team_id: int
_should_join_distinct_ids = False
_should_join_persons = False
_should_round_interval = False
_date_filter = None
def __init__(
self,
filter: Filter,
entity: Entity,
team_id: int,
round_interval=False,
should_join_distinct_ids=False,
should_join_persons=False,
date_filter=None,
**kwargs,
) -> None:
self._filter = filter
self._entity = entity
self._team_id = team_id
self.params = {
"team_id": self._team_id,
}
self._date_filter = date_filter
self._should_join_distinct_ids = should_join_distinct_ids
self._should_join_persons = should_join_persons
if not self._should_join_distinct_ids:
self._determine_should_join_distinct_ids()
if not self._should_join_persons:
self._determine_should_join_persons()
self._should_round_interval = round_interval
def get_query(self) -> Tuple[str, Dict[str, Any]]:
_fields = (
f"{self.EVENT_TABLE_ALIAS}.timestamp as timestamp, {self.EVENT_TABLE_ALIAS}.properties as properties"
+ (f", {self.DISTINCT_ID_TABLE_ALIAS}.person_id as person_id" if self._should_join_distinct_ids else "")
+ (f", {self.PERSON_TABLE_ALIAS}.person_props as person_props" if self._should_join_persons else "")
)
date_query, date_params = self._get_date_filter()
self.params.update(date_params)
prop_query, prop_params = self._get_props()
self.params.update(prop_params)
entity_query, entity_params = self._get_entity_query()
self.params.update(entity_params)
query = f"""
SELECT {_fields} FROM events {self.EVENT_TABLE_ALIAS}
{self._get_disintct_id_query()}
{self._get_person_query()}
WHERE team_id = %(team_id)s
{entity_query}
{date_query}
{prop_query}
"""
return query, self.params
def _determine_should_join_distinct_ids(self) -> None:
if self._entity.math == "dau":
self._should_join_distinct_ids = True
return
def _get_disintct_id_query(self) -> str:
if self._should_join_distinct_ids:
return f"""
INNER JOIN (
SELECT person_id,
distinct_id
FROM (
SELECT *
FROM person_distinct_id
JOIN (
SELECT distinct_id,
max(_offset) as _offset
FROM person_distinct_id
WHERE team_id = %(team_id)s
GROUP BY distinct_id
) as person_max
ON person_distinct_id.distinct_id = person_max.distinct_id
AND person_distinct_id._offset = person_max._offset
WHERE team_id = %(team_id)s
)
WHERE team_id = %(team_id)s
) AS {self.DISTINCT_ID_TABLE_ALIAS}
ON events.distinct_id = {self.DISTINCT_ID_TABLE_ALIAS}.distinct_id
"""
else:
return ""
def _determine_should_join_persons(self) -> None:
for prop in self._filter.properties:
if prop.type == "person":
self._should_join_distinct_ids = True
self._should_join_persons = True
return
if prop.type == "cohort" and self._does_cohort_need_persons(prop):
self._should_join_distinct_ids = True
self._should_join_persons = True
return
for prop in self._entity.properties:
if prop.type == "person":
self._should_join_distinct_ids = True
self._should_join_persons = True
return
if self._filter.breakdown_type == "person":
self._should_join_distinct_ids = True
self._should_join_persons = True
if self._filter.filter_test_accounts:
test_account_filters = Team.objects.only("test_account_filters").get(id=self._team_id).test_account_filters
test_filter_props = [Property(**prop) for prop in test_account_filters]
for prop in test_filter_props:
if prop.type == "person":
self._should_join_distinct_ids = True
self._should_join_persons = True
return
def _does_cohort_need_persons(self, prop: Property) -> bool:
cohort = Cohort.objects.get(pk=prop.value, team_id=self._team_id)
for group in cohort.groups:
if group.get("properties"):
return True
return False
def _get_person_query(self) -> str:
if self._should_join_persons:
return f"""
INNER JOIN (
SELECT id, properties as person_props
FROM (
SELECT id,
argMax(properties, person._timestamp) as properties,
max(is_deleted) as is_deleted
FROM person
WHERE team_id = %(team_id)s
GROUP BY id
HAVING is_deleted = 0
)
) {self.PERSON_TABLE_ALIAS}
ON {self.PERSON_TABLE_ALIAS}.id = {self.DISTINCT_ID_TABLE_ALIAS}.person_id
"""
else:
return ""
def _get_entity_query(self) -> Tuple[str, Dict]:
entity_params, entity_format_params = populate_entity_params(self._entity)
return entity_format_params["entity_query"], entity_params
def _get_date_filter(self) -> Tuple[str, Dict]:
if self._date_filter:
return self._date_filter, {}
parsed_date_from, parsed_date_to, date_params = parse_timestamps(filter=self._filter, team_id=self._team_id)
query = f"""
{parsed_date_from}
{parsed_date_to}
"""
return query, date_params
def _get_props(self, allow_denormalized_props: bool = False) -> Tuple[str, Dict]:
filters = [*self._filter.properties, *self._entity.properties]
filter_test_accounts = self._filter.filter_test_accounts
team_id = self._team_id
table_name = f"{self.EVENT_TABLE_ALIAS}."
prepend = "global"
final = []
params: Dict[str, Any] = {}
if filter_test_accounts:
test_account_filters = Team.objects.only("test_account_filters").get(id=team_id).test_account_filters
filters.extend([Property(**prop) for prop in test_account_filters])
for idx, prop in enumerate(filters):
if prop.type == "cohort":
person_id_query, cohort_filter_params = self._get_cohort_subquery(prop)
params = {**params, **cohort_filter_params}
final.append(f"AND {person_id_query}")
elif prop.type == "person":
filter_query, filter_params = prop_filter_json_extract(
prop,
idx,
"{}person".format(prepend),
allow_denormalized_props=allow_denormalized_props,
prop_var=self._PERSON_PROPERTIES_ALIAS,
)
final.append(filter_query)
params.update(filter_params)
elif prop.type == "element":
query, filter_params = filter_element({prop.key: prop.value}, prepend="{}_".format(idx))
final.append("AND {}".format(query[0]))
params.update(filter_params)
else:
filter_query, filter_params = prop_filter_json_extract(
prop, idx, prepend, prop_var="properties", allow_denormalized_props=allow_denormalized_props,
)
final.append(filter_query)
params.update(filter_params)
return " ".join(final), params
def _get_cohort_subquery(self, prop) -> Tuple[str, Dict[str, Any]]:
cohort = Cohort.objects.get(pk=prop.value, team_id=self._team_id)
is_precalculated = is_precalculated_query(cohort)
person_id_query, cohort_filter_params = (
get_precalculated_query(cohort, custom_match_field=f"{self.DISTINCT_ID_TABLE_ALIAS}.person_id")
if is_precalculated
else format_person_query(cohort, custom_match_field=f"{self.DISTINCT_ID_TABLE_ALIAS}.person_id")
)
return person_id_query, cohort_filter_params
|
[
"noreply@github.com"
] |
nehachauhan14.noreply@github.com
|
14dcde1b59329171940009e02890125834141f0b
|
d7d0ecda6573a96e01d24531dfd257924d035d9c
|
/sept3/settings.py
|
6460fe08615df1b41591fba959a3d1ac3b0c4889
|
[] |
no_license
|
Nikadeatul/textutils
|
3a7390c3e6776d2ea23bb01ed0043329888aa2ef
|
b92971632b974284a206fd7d8a3554c0786a68bf
|
refs/heads/master
| 2023-08-31T18:24:08.248802
| 2021-10-16T22:17:02
| 2021-10-16T22:17:02
| 417,961,411
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,070
|
py
|
"""
Django settings for sept3 project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cefqd=8kij833btnb0#l#2gdk8b@mgqaeg19st1%2^zoeh_wst'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sept3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sept3.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"atul.nikade14@gmail.com"
] |
atul.nikade14@gmail.com
|
0a800a7ec152611cb1769c87d63ff497a36c01d6
|
5126d4883d3aa1d748ed0459e97864a7176939f4
|
/core/model.py
|
2ad9fb5768fc9710a09637f939d52f8c65b3b815
|
[] |
no_license
|
samxuxiang/Attention-Based-Hierarchical-Model-
|
3e1446c6b6044a64caac4e73a77863fa6e430fe8
|
09ca85a15997c60127484f4070b549d098464e5c
|
refs/heads/master
| 2020-06-10T19:04:09.919594
| 2018-10-21T21:06:29
| 2018-10-21T21:06:29
| 75,902,990
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,891
|
py
|
# =========================================================================================
# Xiang Xu (2016-12-1) code for 10-807 final project
# reuse some of the code from (https://github.com/yunjey/show-attend-and-tell-tensorflow)
# (https://github.com/jazzsaxmafia/video_to_sequence)
# other group members: Markus Woodsons, Yubo Zhang
# =========================================================================================
from __future__ import division
import tensorflow as tf
class CaptionGenerator(object):
def __init__(self, word_to_idx, dim_layer1=[7,512],dim_layer2=[14,512],
dim_embed=512, dim_hidden=1024, n_time_step=16,
alpha_c=[0.0,0.0],alpha_e=0.0):
"""
basic sum attention, simply attention extractor, yes entropy,
no kl, no selector, yes 1.0, yes combined
"""
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.iteritems()}
self.alpha_c = alpha_c
self.alpha_e = alpha_e
self.V = len(word_to_idx)
self.S1 = dim_layer1[0]
self.D1 = dim_layer1[1]
self.S2 = dim_layer2[0]
self.D2 = dim_layer2[1]
self.M = dim_embed
self.H = dim_hidden
self.T = n_time_step
self._start = word_to_idx['<START>']
self._null = word_to_idx['<NULL>']
self.weight_initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
self.const_initializer = tf.constant_initializer(0.0)
self.constant_one = tf.constant_initializer(1.0)
self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0)
self.small_initializer = tf.constant_initializer(0.01)
# Place holder for features and captions
self._features_layer1 = tf.placeholder(tf.float32, [None,self.S1,self.S1,self.D1])
self._features_layer2 = tf.placeholder(tf.float32, [None,self.S2,self.S2,self.D2])
self.dropout_keep_prob = tf.placeholder(tf.float32)
self.features_layer1 = tf.reshape(self._features_layer1, [-1, self.S1**2, self.D1])
self.features_layer2 = tf.reshape(self._features_layer2, [-1, self.S2**2, self.D2])
self.captions = tf.placeholder(tf.int32, [None, self.T + 1])
self.e =1e-12
def _get_initial_lstm(self,name,dim_f,dim_h,features):
with tf.variable_scope(name):
features_mean = tf.reduce_mean(features, 1) # (N,dim_f)
w_h = tf.get_variable('w_h', [dim_f, dim_h], initializer=self.weight_initializer)
b_h = tf.get_variable('b_h', [dim_h], initializer=self.const_initializer)
h = tf.nn.tanh(tf.matmul(features_mean, w_h) + b_h) # (N,dim_h)
w_c = tf.get_variable('w_c', [dim_f, dim_h], initializer=self.weight_initializer)
b_c = tf.get_variable('b_c', [dim_h], initializer=self.const_initializer)
c = tf.nn.tanh(tf.matmul(features_mean, w_c) + b_c) # (N,dim_h)
return c, h
def _word_embedding(self, inputs, reuse=False):
with tf.variable_scope('word_embedding', reuse=reuse):
w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer)
x = tf.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M)
return x
def _attention_layer_(self,name,shape_u,dim_u,dim_h,features,h,reuse=False):
with tf.variable_scope(name, reuse=reuse):
w1 = tf.get_variable('w1', [dim_h, dim_u], initializer=self.weight_initializer)
w2 = tf.get_variable('w2', [dim_u, 1], initializer=self.weight_initializer)
b1 = tf.get_variable('b1', [dim_u], initializer=self.const_initializer)
b2 = tf.get_variable('b2', [dim_u], initializer=self.const_initializer)
# computer posterior higher attention
sparse = tf.nn.sigmoid(tf.matmul(h, w1)+b1)
result0 = features * tf.expand_dims(sparse,1)
out1 = tf.nn.tanh(tf.reshape(result0,[-1,dim_u]) + b2)
out2 = tf.nn.sigmoid(tf.reshape(tf.matmul(out1,w2),[-1,shape_u**2]))
alpha = tf.nn.softmax(out2)
context = tf.reduce_sum(features * tf.expand_dims(alpha, 2), 1)
return context, alpha
def _attention_layer(self,name,alpha_l,shape_l,shape_u,dim_u,dim_h,features,h,reuse=False):
with tf.variable_scope(name, reuse=reuse):
w1 = tf.get_variable('w1', [dim_h, dim_u], initializer=self.weight_initializer)
w2 = tf.get_variable('w2', [dim_u, 1], initializer=self.weight_initializer)
b1 = tf.get_variable('b1', [dim_u], initializer=self.const_initializer)
b2 = tf.get_variable('b2', [dim_u], initializer=self.const_initializer)
# resize lower attention to higher attention (prior higher attention)
N = tf.shape(alpha_l)[0] # batch size
alpha_ = tf.reshape(tf.transpose(alpha_l,perm=[1,0]),[shape_l,shape_l,N])
alpha_resized_ = tf.image.resize_images(alpha_,[shape_u,shape_u])
alpha_resized = tf.transpose(tf.reshape(alpha_resized_,[-1,N]),[1,0])
alpha_truncate = tf.nn.relu(alpha_resized-(1.0/shape_u**2))
# computer posterior higher attention
sparse = tf.nn.sigmoid(tf.matmul(h, w1)+b1)
result0 = features * tf.expand_dims(sparse,1)
out1 = tf.nn.tanh(tf.reshape(result0,[-1,dim_u]) + b2)
out2 = tf.nn.sigmoid(tf.reshape(tf.matmul(out1,w2),[-1,shape_u**2]))
alpha = tf.nn.softmax(out2+alpha_truncate)
context = tf.reduce_sum(features * tf.expand_dims(alpha, 2), 1)
return context, alpha
def _combine_contexts(self,name,dim_c1,dim_c2,context1,context2,output_dim,reuse=False):
with tf.variable_scope(name,reuse=reuse):
w1 = tf.get_variable('w1', [dim_c1, output_dim], initializer=self.weight_initializer)
w2 = tf.get_variable('w2', [dim_c2, output_dim], initializer=self.weight_initializer)
b1 = tf.get_variable('b1', [output_dim], initializer=self.const_initializer)
b2 = tf.get_variable('b2', [output_dim], initializer=self.const_initializer)
c1 = tf.matmul(context1,w1)+b1
c2 = tf.matmul(context2,w2)+b2
combined_context = tf.nn.tanh(c1+c2)
return combined_context
def _decode_lstm(self,name,dim_h,dim_f,x,h,combined_context,reuse=False):
with tf.variable_scope(name, reuse=reuse):
w_h = tf.get_variable('w_h', [dim_h, self.M], initializer=self.weight_initializer)
b_h = tf.get_variable('b_h', [self.M], initializer=self.const_initializer)
w_out = tf.get_variable('w_out', [self.M, self.V], initializer=self.weight_initializer)
b_out = tf.get_variable('b_out', [self.V], initializer=self.const_initializer)
h = tf.nn.dropout(h, self.dropout_keep_prob)
h_logits = tf.matmul(h, w_h) + b_h
w_ctx2out = tf.get_variable('w_ctx2out', [dim_f, self.M], initializer=self.weight_initializer)
h_logits += tf.matmul(combined_context, w_ctx2out)
h_logits += x
h_logits = tf.nn.tanh(h_logits)
h_logits = tf.nn.dropout(h_logits, self.dropout_keep_prob)
out_logits = tf.matmul(h_logits, w_out) + b_out
return out_logits
def build_model(self):
features1 = self.features_layer1
features2 = self.features_layer2
captions = self.captions
batch_size = tf.shape(features1)[0]
# process time input
captions_in = captions[:, :self.T] # including <START> excluding <END>
captions_out = captions[:, 1:] # excluding <START> including <END>
mask = tf.to_float(tf.not_equal(captions_out, self._null)) # mask out shorter sentence
# initialize c & h for LSTM (use first layer)
c, h = self._get_initial_lstm('initial_lstm',self.D1,self.H,features1)
# get embedded words
x = self._word_embedding(inputs=captions_in)
# store loss and attention prob
loss = 0.0
alpha_list1,alpha_list2 = [],[]
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.H,state_is_tuple=True)
entropy_list1,entropy_list2 = [],[]
# rnn
for t in range(self.T):
# extract image attention region & prob (multi-layer)
context1, alpha1 = self._attention_layer_('att_layer1',self.S1,
self.D1,self.H,features1,h,reuse=(t!=0))
context2, alpha2 = self._attention_layer('att_layer2',alpha1,self.S1,self.S2,
self.D2,self.H,features2,h,reuse=(t!=0))
alpha_list1.append(alpha1) # T numbers of shape (N,L)
alpha_list2.append(alpha2)
# configure lstm
with tf.variable_scope('lstm', reuse=(t!=0)):
combined_context = self._combine_contexts('combined',self.D1,self.D2,context1,context2,512,reuse=(t!=0))
_, (c, h) = lstm_cell(inputs=tf.concat(1, [x[:,t,:], combined_context]), state=[c, h])
# decode output to sentence and compute loss
logits = self._decode_lstm('logits',self.H,512,x[:,t,:],h,combined_context,reuse=(t!=0))
loss += tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, captions_out[:, t]) * mask[:, t])
# entropy regularization
entropy1 = tf.reduce_sum(-alpha1*tf.log(alpha1+self.e))
entropy2 = tf.reduce_sum(-alpha2*tf.log(alpha2+self.e))
entropy_list1.append(entropy1)
entropy_list2.append(entropy2)
# spreading regularization (focus on different region throughout time)
alphas1 = tf.transpose(tf.pack(alpha_list1), (1, 0, 2)) # (N, T, L)
alphas_all1 = tf.reduce_sum(alphas1, 1) # (N, L)
alphas2 = tf.transpose(tf.pack(alpha_list2), (1, 0, 2)) # (N, T, L)
alphas_all2 = tf.reduce_sum(alphas2, 1) # (N, L)
entropys1 = tf.reduce_sum(tf.pack(entropy_list1))
entropys2 = tf.reduce_sum(tf.pack(entropy_list2))
loss += self.alpha_c[0]*tf.reduce_sum((1.0 - alphas_all1)**2)+self.alpha_c[1]*tf.reduce_sum((1.0 - alphas_all2)**2)
loss += self.alpha_e[0]*(entropys1/tf.to_float(self.T))+self.alpha_e[1]*(entropys2/tf.to_float(self.T))
return (loss/tf.to_float(batch_size)),(entropys1/tf.to_float(self.T)),(entropys2/tf.to_float(self.T))
def build_sampler(self, max_len=20):
features1 = self.features_layer1
features2 = self.features_layer2
# init inputs for lstm
c, h = self._get_initial_lstm('initial_lstm',self.D1,self.H,features1)
sampled_word_list = []
alpha_list1,alpha_list2 = [],[]
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.H,state_is_tuple=True)
for t in range(max_len):
if t == 0:
x = self._word_embedding(inputs=tf.fill([tf.shape(features1)[0]], self._start))
else:
x = self._word_embedding(inputs=sampled_word, reuse=True)
# extract image attention region & prob (multi-layer)
#alpha_init = tf.fill([tf.shape(features1)[0],self.S1**2],1.0)
context1, alpha1 = self._attention_layer_('att_layer1',self.S1,
self.D1,self.H,features1,h,reuse=(t!=0))
context2, alpha2 = self._attention_layer('att_layer2',alpha1,self.S1,self.S2,
self.D2,self.H,features2,h,reuse=(t!=0))
alpha_list1.append(alpha1) # T numbers of shape (N,L)
alpha_list2.append(alpha2)
# configure lstm
with tf.variable_scope('lstm', reuse=(t!=0)):
combined_context = self._combine_contexts('combined',self.D1,self.D2,context1,context2,512,reuse=(t!=0))
_, (c, h) = lstm_cell(inputs=tf.concat(1, [x,combined_context]), state=[c, h])
logits = self._decode_lstm('logits',self.H,512,x,h,combined_context,reuse=(t!=0))
sampled_word = tf.argmax(logits, 1)
sampled_word_list.append(sampled_word)
alphas1 = tf.transpose(tf.pack(alpha_list1), (1, 0, 2)) # (N, T, L)
alphas2 = tf.transpose(tf.pack(alpha_list2), (1, 0, 2)) # (N, T, L)
sampled_captions = tf.transpose(tf.pack(sampled_word_list), (1, 0)) # (N, max_len)
return alphas1,alphas2,sampled_captions
|
[
"noreply@github.com"
] |
samxuxiang.noreply@github.com
|
104ffcfd06540accfbcfa954aeb37d85855b1fbd
|
478b00d917af3105a75f8378d72227deb81a6067
|
/src/cn/xuqiang/fromInfoGAN/__init__.py
|
30fae0e1d9e84dd8d80b791fa08f62fbee34b04f
|
[] |
no_license
|
happyxuwork/python-learning
|
a0710054f87d9e5373838c28dc878bae007bd512
|
e9395d961f505ce0d5e1804fd56174e8a6ab3b6d
|
refs/heads/master
| 2021-09-06T11:53:51.451932
| 2018-02-06T08:20:37
| 2018-02-06T08:20:37
| 108,230,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
# -*- coding: UTF-8 -*-
'''
@author: xuqiang
'''
|
[
"1104016959@qq.com"
] |
1104016959@qq.com
|
c99d8e836e35915178184c08f009c5dde02d8c0a
|
cf28b08c7c47fe3fa8890fcd2a0429d83d9dbb3e
|
/samples/msi_extensions/setup.py
|
93d2dc5d7339f8a7ce7cf37820c308fc1f0c3f74
|
[
"Python-2.0"
] |
permissive
|
marcelotduarte/cx_Freeze
|
bc948693f5b650bf2459c0093c3a98ca419a1df9
|
38438b2418af9cda5f982c74ca55dc235d08aaa1
|
refs/heads/main
| 2023-08-18T11:37:29.282372
| 2023-08-16T01:32:06
| 2023-08-16T01:32:06
| 79,693,503
| 628
| 121
| null | 2023-09-14T19:15:58
| 2017-01-22T04:25:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
"""A very simple setup script to test adding extension handling to an MSI file.
This script defines three ways for the hello.py executable to handle text
files, that are registered in the operating system.
hello.py is a very simple 'Hello, world' type script which also displays the
environment in which the script runs
Run the build process by running the command 'python setup.py bdist_msi'"""
from __future__ import annotations
from cx_Freeze import Executable, setup
executables = [Executable("hello.py")]
bdist_msi_options = {
"extensions": [
# open / print / view text files
{
"extension": "txt",
"verb": "open",
"executable": "hello.exe",
"context": "Edit with hello.py",
},
{
"extension": "txt",
"verb": "print",
"executable": "hello.exe",
"context": "Print with hello.py",
"argument": '--print "%1"',
},
{
"extension": "txt",
"verb": "view",
"executable": "hello.exe",
"context": "View with hello.py",
"argument": '--read-only "%1"',
},
# open / print / view log files
{
"extension": "log",
"verb": "open",
"executable": "hello.exe",
"context": "Edit with hello.py",
},
{
"extension": "log",
"verb": "print",
"executable": "hello.exe",
"context": "Print with hello.py",
"argument": '--print "%1"',
},
{
"extension": "log",
"verb": "view",
"executable": "hello.exe",
"context": "View with hello.py",
"argument": '--read-only "%1"',
},
],
}
setup(
name="Hello Program",
version="0.1",
author="cx_Freeze",
description="Sample cx_Freeze script to test MSI extension registration",
executables=executables,
options={
"build_exe": {"excludes": ["tkinter"]},
"bdist_msi": bdist_msi_options,
},
)
|
[
"noreply@github.com"
] |
marcelotduarte.noreply@github.com
|
7d18a8d08397a2a3ff16bbb099e2b3122e4d2071
|
c954c2b7827ad00ed6fa88d69ec9f720e8a70199
|
/indicators/demarker.py
|
42ff7b315a0ccd4397ebc6c80bd92a5002a96e00
|
[] |
no_license
|
ShivAyyalasomayajula/quant
|
a4ae0f179703739c7fba85e7dba04c3a4ce05169
|
4cbaec658c4f01794a8625388cf4419312a350f7
|
refs/heads/main
| 2023-08-23T04:54:04.448989
| 2021-11-03T16:20:35
| 2021-11-03T16:20:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,435
|
py
|
import numpy as np
import vectorbt as vbt
from numba import njit
from vectorbt import _typing as tp
from vectorbt.generic import nb as generic_nb
# @njit
# def rolling_demarker_1d_nb(high: tp.Array1d, low: tp.Array1d, period: int) -> tp.Tuple[tp.Array1d, tp.Array1d, tp.Array1d]:
# demin = np.empty_like(low, dtype=np.float_)
# demax = np.empty_like(high, dtype=np.float_)
# for i in range(high.shape[0]):
# if low[i] < low[i - 1]:
# demin[i] = low[i - 1] - low[i]
# else:
# demin[i] = 0
#
# if high[i] > high[i - 1]:
# demax[i] = high[i] - high[i - 1]
# else:
# demax[i] = 0
#
# demax_avg = generic_nb.rolling_mean_1d_nb(demax, period, minp=period) # sma
# demin_avg = generic_nb.rolling_mean_1d_nb(demin, period, minp=period) # sma
#
# return demax_avg / (demax_avg + demin_avg), demin_avg, demax_avg
@njit
def rolling_demarker_2d_nb(high: tp.Array2d, low: tp.Array2d, period: int) -> tp.Tuple[tp.Array2d, tp.Array2d, tp.Array2d]:
demin = np.empty_like(low, dtype=np.float_)
demax = np.empty_like(high, dtype=np.float_)
for col in range(high.shape[1]):
for i in range(high.shape[0]):
if low[i, col] < low[i - 1, col]:
demin[i, col] = low[i - 1, col] - low[i, col]
else:
demin[i, col] = 0
if high[i, col] > high[i - 1, col]:
demax[i, col] = high[i, col] - high[i - 1, col]
else:
demax[i, col] = 0
demax_avg = generic_nb.rolling_mean_nb(demax, period, minp=period) # sma
demin_avg = generic_nb.rolling_mean_nb(demin, period, minp=period) # sma
return demax_avg / (demax_avg + demin_avg), demin_avg, demax_avg
# @njit
# def rolling_demarker_nb(high: tp.Array2d, low: tp.Array2d, period: int) -> tp.Tuple[tp.Array2d, tp.Array2d, tp.Array2d]:
# """2-dim version of `rolling_demarker_1d_nb`."""
# out = np.empty_like(high, dtype=np.float_)
# for col in range(high.shape[1]):
# print( high[:, col])
# out[:, col] = rolling_demarker_1d_nb(high[:, col], low[:, col], period)
# # return out
# return out, out, out # TODO where is what?
DeMarkerOscillator = vbt.IndicatorFactory(
input_names=['high', 'low'],
param_names=['period'],
output_names=['dem', 'demin_avg', 'demax_avg'],
).from_apply_func(rolling_demarker_2d_nb, period=13)
|
[
"robin.braemer@web.de"
] |
robin.braemer@web.de
|
02f0f826e20fe85b4578afa4a362aaf4b68434ad
|
d18cc96fbcf87e11155fbe8fa199989207dcdbd7
|
/migrations/versions/7dbb13e5d7c2_.py
|
0080e7ae14e0402e672f67fbda809470051473ea
|
[] |
no_license
|
Ivan-Sidorov/DeliveryService
|
c22e20fdc6183e2728fa177346f45f9eafb5b35b
|
e5edfff40b84c5a7008cb694bc694e8652832192
|
refs/heads/main
| 2023-03-27T17:13:29.242279
| 2021-03-29T19:29:12
| 2021-03-29T19:29:12
| 351,583,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
"""empty message
Revision ID: 7dbb13e5d7c2
Revises: bb1166c0df6a
Create Date: 2021-03-28 13:51:05.640557
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7dbb13e5d7c2'
down_revision = 'bb1166c0df6a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('orders', sa.Column('bunch_complete', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('orders', 'bunch_complete')
# ### end Alembic commands ###
|
[
"ivsidorov@miem.hse.ru"
] |
ivsidorov@miem.hse.ru
|
82f6291414d25aa2247cba2267bbc2e7241e8ec3
|
ba978ed8509119c7f9e5d300db3b9b171838f891
|
/Bioinformatics Stronghold/RNAtoProtein.py
|
3e7ad84a27758e7282ee07fa2d8ee5618995a6da
|
[] |
no_license
|
nadintamer/ROSALIND-Solutions
|
cddfbc9ebfb38b187ff7899c72361c692a6481a8
|
a1b6c613e0c23346ee5236528f2aa39b1eb365c7
|
refs/heads/master
| 2021-09-07T02:50:42.289324
| 2018-02-16T06:16:06
| 2018-02-16T06:16:06
| 121,720,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
RNATable = [["UUU","F"],["UUC","F"],["UUA","L"],["UUG","L"],["UCU","S"],["UCC","S"],["UCA","S"],["UCG","S"],["UAU","Y"],["UAC","Y"],["UAA","Stop"],["UAG","Stop"],["UGU","C"],["UGC","C"],["UGA","Stop"],["UGG","W"],["CUU","L"],["CUC","L"],["CUA","L"],["CUG","L"],["CCU","P"],["CCC","P"],["CCA","P"],["CCG","P"],["CAU","H"],["CAC","H"],["CAA","Q"],["CAG","Q"],["CGU","R"],["CGC","R"],["CGA","R"],["CGG","R"],["AUU","I"],["AUC","I"],["AUA","I"],["AUG","M"],["ACU","T"],["ACC","T"],["ACA","T"],["ACG","T"],["AAU","N"],["AAC","N"],["AAA","K"],["AAG","K"],["AGU","S"],["AGC","S"],["AGA","R"],["AGG","R"],["GUU","V"],["GUC","V"],["GUA","V"],["GUG","V"],["GCU","A"],["GCC","A"],["GCA","A"],["GCG","A"],["GAU","D"],["GAC","D"],["GAA","E"],["GAG","E"],["GGU","G"],["GGC","G"],["GGA","G"],["GGG","G"]]
def RNAtoProtein(rna):
protein = ""
x = 0
while x <= len(rna):
codon = rna[x:x+3]
for a in RNATable:
if codon == a[0]:
if a[1] != "Stop":
protein += a[1]
x += 3
return protein
|
[
"noreply@github.com"
] |
nadintamer.noreply@github.com
|
73119077bbf5b83ed52963ba055abbd4c00aa70c
|
3624dce3d8c498ee95c1734c33e161f1199ce788
|
/setup.py
|
57194c6f379bf8faf37dceb9bc1a141671f67e9c
|
[] |
no_license
|
jimyong88/idworker
|
dbb5bfdc75700c599949b5643fa42122375a11a9
|
cd6ab2ac7ae4a325f9bf695461105246e8f480ac
|
refs/heads/master
| 2021-04-16T23:23:44.393531
| 2020-03-23T10:14:45
| 2020-03-23T10:14:45
| 249,391,755
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = 'Jim Yong'
# __date__ = '2020/03/23'
# __contact__ = 'jimyong88@gmail.com'
# __copyright__ = 'Copyright (C) 2020, JimYong.com'
import sys
if sys.version_info < (2, 5):
sys.exit('Python 2.5 or greater is required.')
from setuptools import setup, find_packages
VERSION = "1.0.0"
LICENSE = "MIT"
setup(
name='idworker',
version=VERSION,
description=('idworker'),
long_description="Snowflake idworker",
author='JimYong',
author_email='jimyong88@gmail.com',
maintainer='JimYong',
maintainer_email='jimyong88@gmail.com',
license=LICENSE,
packages=find_packages(),
platforms=["all"],
url='https://github.com/jimyong88/idworker',
install_requires=[],
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries'
],
)
|
[
"jimyong88@gmail.com"
] |
jimyong88@gmail.com
|
eb4af5608df2b19a482bf8d9f385e8e9283f9044
|
df1141f0ec887cc6011cd8f0b864c6ad177ea46c
|
/MBIT/main/migrations/0001_initial.py
|
287c5ba6cc71d538a931d6b5e3751f872acbcef2
|
[] |
no_license
|
Hyewon0223/DjangoStudy
|
038004b93ebc87c2efc55f3052df47257b84ff7a
|
a5aa14c8922ff822e3636e3afda90cc5106a8aeb
|
refs/heads/main
| 2023-07-16T02:16:09.842177
| 2021-08-26T15:34:27
| 2021-08-26T15:34:27
| 391,594,466
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
# Generated by Django 3.2.6 on 2021-08-16 14:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Developer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('count', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.IntegerField(unique=True)),
('content', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=100)),
('developer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='main.developer')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.question')),
],
),
]
|
[
"ch2w2006@kookmin.ac.kr"
] |
ch2w2006@kookmin.ac.kr
|
3bccd42bea4632f044374355cd4daf40939d9ac7
|
c95a432510a6bfd56caf6097085bf1af95e9f850
|
/boardgames/tictactoe/models.py
|
bd51665a1b1aebb2d541055967af76166446c1d9
|
[] |
no_license
|
Epsa1993/boardgames
|
ddae144eccd4b592f8e77bdf9c99da43415856b6
|
8acbb8596b01eff708c5d46867f67876434e05ed
|
refs/heads/master
| 2021-01-15T19:04:18.167165
| 2017-08-09T12:23:42
| 2017-08-09T12:23:42
| 99,804,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Game(models.Model):
first_player = models.ForeignKey(User, related_name = "games_first_player")
second_player = models.ForeignKey(User, related_name = "games_second_player")
next_to_move = models.ForeignKey(User, related_name = "games_to_move")
start_time = models.DateTimeField(auto_now_add = True)
last_active = models.DateTimeField(auto_now = True)
class Move(models.Model):
x = models.IntegerField()
y = models.IntegerField()
comment = models.CharField(max_length=300)
game = models.ForeignKey(Game)
|
[
"epsa.kausik@thoughts2binary.com"
] |
epsa.kausik@thoughts2binary.com
|
18b798923083b1b718cbaeeda3686f0d2cfb8328
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_Difference/trend_MovingAverage/cycle_12/ar_12/test_artificial_1024_Difference_MovingAverage_12_12_20.py
|
3af611f7bc5cdf7bbd199fef12858416f640dc8c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 277
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
e471e36174f124757ad31571788e0c21cf65dda1
|
0326948d86a409347b62c93cae8fe6fcd04da7fd
|
/hello_world.py
|
6a0176e4d1f68dc447382907b3015b550bf0a8e5
|
[] |
no_license
|
danylo-basarab/lab_1_AIMP
|
4df92d3b114eef9cad9d59456ab43da43351f3c4
|
b0b796aa582dde4f24ada64ea9e887278ebaeef5
|
refs/heads/master
| 2023-03-06T07:53:05.575641
| 2021-02-19T20:10:27
| 2021-02-19T20:10:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 67
|
py
|
name = input('Enter your name : ')
print('Hello world from ', name)
|
[
"danya.basarab@gmail.com"
] |
danya.basarab@gmail.com
|
6407cd5a5fc81d533d847a1fde80f8d2e84e263b
|
a74fd817087c65b59ff6638cecb0313567db92c8
|
/setup.py
|
32c121691300224a1ba4122b3c1a781b4ad77178
|
[
"MIT"
] |
permissive
|
daveyclk/hologram-python
|
17f63ea44def79ab561a751c4d76af07f019ff87
|
7a75b6a30466556c96da900889f64460176a9071
|
refs/heads/master
| 2021-05-06T20:01:37.485758
| 2017-11-20T22:27:08
| 2017-11-20T22:27:08
| 112,255,426
| 0
| 1
| null | 2017-11-27T22:20:49
| 2017-11-27T22:20:49
| null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
#!/usr/bin/env python
# Copyright 2016 Hologram (Konekt, Inc.)
#
# Author: Hologram <support@hologram.io>
#
longdesc = '''
This is a library for connecting to the Hologram Cloud
'''
import sys
try:
from setuptools import setup, find_packages
kw = {
}
except ImportError:
from distutils.core import setup
kw = {}
if sys.platform == 'darwin':
import setup_helper
setup_helper.install_custom_make_tarball()
setup(
name = 'hologram-python',
version = open('version.txt').read().split()[0],
description = 'Library for accessing Hologram Cloud at https://hologram.io',
long_description = longdesc,
author = 'Hologram',
author_email = 'support@hologram.io',
url = 'https://github.com/hologram-io/hologram-python/',
packages = find_packages(),
include_package_data = True,
install_requires = open('requirements.txt').read().split(),
scripts = ['scripts/hologram'],
license = 'MIT',
platforms = 'Posix; MacOS X; Windows',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Internet',
'Topic :: Security :: Cryptography',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
**kw
)
|
[
"tanzhao@umich.edu"
] |
tanzhao@umich.edu
|
cd645ef44cc5b4f56a1430cb8b3c0482ece779e7
|
bac508336950084cc1c47c85e37bc8bc8bb8bda4
|
/names/bst.py
|
fe3669bffe384449b143e82f3f86907fa1392601
|
[] |
no_license
|
deegrams221/Sprint-Challenge--Data-Structures-Python
|
bc61b2b6c0cf2b82d455a9fd086f329772ad24a2
|
73082267bbd3e99eb617725167b1c5cf1ce7adde
|
refs/heads/master
| 2020-12-26T09:26:38.276392
| 2020-02-24T21:00:00
| 2020-02-24T21:00:00
| 237,464,812
| 0
| 0
| null | 2020-02-24T21:00:02
| 2020-01-31T16:02:24
| null |
UTF-8
|
Python
| false
| false
| 1,801
|
py
|
# from README: "Hint: You might try importing a data structure you built during the week"
# this is from the data strutures assignment, insert and contains will be useful for the names challenge
class BinarySearchTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# Insert the given value into the tree
def insert(self, value):
# pass
# compare root node, if greator or equal go right
if value >= self.value:
# if no child
if self.right == None:
# insert
self.right = BinarySearchTree(value)
# else try again, continue right
else:
return self.right.insert(value)
# if lesser go left
elif value < self.value:
# if no child
if self.left == None:
# insert
self.left = BinarySearchTree(value)
# else try again, continue left
else:
return self.left.insert(value)
# Return True if the tree contains the value
# False if it does not
def contains(self, target):
# pass
# look at root, if root is taget return true
if target == self.value:
return True
# if value is less than node
elif target < self.value:
# go left
if self.left != None:
# return if found
return self.left.contains(target)
# if value is greater or equal to node
elif target >= self.value:
# go right
if self.right != None:
# return if found
return self.right.contains(target)
# else return false
else:
return False
|
[
"deegrams221@gmail.com"
] |
deegrams221@gmail.com
|
55b08fc6b6a02a3ec359ec5f61ae3856f2eda02e
|
bd561f3a1df58bb6964a48d0bbf8f9ad98591bff
|
/zhaoyuanfang/classwork/DSVC/__init__.py
|
d832ceb00c0683e083fe6a692973e9663e46ea74
|
[] |
no_license
|
yuanandfang/HandwrittenNumberRecognition
|
393dd71fe8d86772f4fc9a2e346604a9d6abeca1
|
b22ae987a0056277e268ac1c75e457239aaf7d0e
|
refs/heads/master
| 2020-06-09T21:54:35.075298
| 2019-07-01T01:34:48
| 2019-07-01T01:34:48
| 193,513,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,199
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
class LogisticRegression(object):
def __init__(self, learning_rate=0.1, max_iter=100, seed=None):
self.seed = seed
self.lr = learning_rate
self.max_iter = max_iter
def fit(self, x, y):
np.random.seed(self.seed)
self.w = np.random.normal(loc=0.0, scale=1.0, size=x.shape[1])
self.b = np.random.normal(loc=0.0, scale=1.0)
self.x = x
self.y = y
for i in range(self.max_iter):
self._update_step()
# print('loss: \t{}'.format(self.loss()))
# print('score: \t{}'.format(self.score()))
# print('w: \t{}'.format(self.w))
# print('b: \t{}'.format(self.b))
def _sigmoid(self, z):
return 1.0 / (1.0 + np.exp(-z))
def _f(self, x, w, b):
z = x.dot(w) + b
return self._sigmoid(z)
def predict_proba(self, x=None):
if x is None:
x = self.x
y_pred = self._f(x, self.w, self.b)
return y_pred
def predict(self, x=None):
if x is None:
x = self.x
y_pred_proba = self._f(x, self.w, self.b)
y_pred = np.array([0 if y_pred_proba[i] < 0.5 else 1 for i in range(len(y_pred_proba))])
return y_pred
def score(self, y_true=None, y_pred=None):
if y_true is None or y_pred is None:
y_true = self.y
y_pred = self.predict()
acc = np.mean([1 if y_true[i] == y_pred[i] else 0 for i in range(len(y_true))])
return acc
def loss(self, y_true=None, y_pred_proba=None):
if y_true is None or y_pred_proba is None:
y_true = self.y
y_pred_proba = self.predict_proba()
return np.mean(-1.0 * (y_true * np.log(y_pred_proba) + (1.0 - y_true) * np.log(1.0 - y_pred_proba)))
def _calc_gradient(self):
y_pred = self.predict()
d_w = (y_pred - self.y).dot(self.x) / len(self.y)
d_b = np.mean(y_pred - self.y)
return d_w, d_b
def _update_step(self):
d_w, d_b = self._calc_gradient()
self.w = self.w - self.lr * d_w
self.b = self.b - self.lr * d_b
return self.w, self.b
|
[
"969910828@qq.com"
] |
969910828@qq.com
|
aec9f81b651f9a846ebea9f50caecb3593c3d85f
|
b30013883f4086d0c45def2e25b78ddb1d982656
|
/to_do_backend/to_do/views.py
|
7b2b22212ad5c21454b2782600a5806569832649
|
[] |
no_license
|
KliskaB/to-do-back
|
1f3d79e3de75d1bc15ec43b8253e50d79a5ef6e9
|
19db44e27cdc59ddee8db9eb7922a112b7d3053f
|
refs/heads/master
| 2022-12-31T00:12:56.401506
| 2020-10-12T15:16:02
| 2020-10-12T15:16:02
| 300,601,870
| 0
| 0
| null | 2020-10-12T15:16:04
| 2020-10-02T12:04:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,808
|
py
|
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import permissions
from rest_framework.mixins import ListModelMixin, CreateModelMixin, RetrieveModelMixin, UpdateModelMixin, DestroyModelMixin
from to_do_backend.to_do.serializers import UserSerializer, UserDetailSerializer
from django.contrib.auth.models import User
from rest_framework import permissions
from rest_framework.views import APIView
from rest_framework.generics import CreateAPIView
from to_do_backend.to_do.models import ToDo
from to_do_backend.to_do.serializers import ToDoSerializer
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
# Create your views here.
class CreateUserViewSet(viewsets.GenericViewSet, CreateModelMixin):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [permissions.AllowAny]
class UserDetailViewSet(APIView):
queryset = User.objects.all()
serializer_class = UserDetailSerializer
permission_classes = [permissions.IsAuthenticated]
def get(self, request):
user = request.user
serializer = UserDetailSerializer(user)
return Response(serializer.data)
# class CreateUserViewSet(viewsets.ModelViewSet, CreateModelMixin):
# queryset = User.objects.all()
# serializer_class = UserSerializer
# http_method_names = ['post']
# permission_classes = [permissions.AllowAny]
class ToDoViewSet(viewsets.ModelViewSet, RetrieveModelMixin):
queryset = ToDo.objects.all()
serializer_class = ToDoSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
user = self.request.user
return super().get_queryset().filter(user=user)
|
[
"bojana.kliska@gmail.com"
] |
bojana.kliska@gmail.com
|
d222f9eae1ebfcc2f978d12980efc6076d8df10b
|
c9bc27f70a4bca5ce6acf346bfc25b5407502d00
|
/ATIVIDADE G - FÁBIO 2b - CONDICIONAIS/fabio2b_q02.py
|
cb1d0d5204481b047a05930e54ddb636a49ee89c
|
[] |
no_license
|
lucascoelho33/ifpi-ads-algoritmos2020
|
2197bbc84ce9c027b3f1da006728448a846d7ffb
|
1ce20a489adbfe7321817acd98d35f2efc0360ca
|
refs/heads/master
| 2021-03-01T23:03:10.293013
| 2020-10-17T14:35:19
| 2020-10-17T14:35:19
| 245,650,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
#2. Leia uma letra, verifique se letra é "F" ou "M" e escreva F - Feminino, M - Masculino, Sexo Inválido.
def main():
letra = input()
if letra == 'F' or letra == 'f':
print('Feminino')
elif letra == 'M' or letra == 'm':
print('Masculino')
else:
print('Sexo Inválido')
main()
|
[
"llucascoelho33@gmail.com"
] |
llucascoelho33@gmail.com
|
ad60b7d3b64972e2aaff9c5988e2f6e6f3805096
|
eeedc65ef99590d8316963717d1012cc6c90c9c5
|
/test/functional/wallet-dump.py
|
1f2b622a436b301701c2cc0b04a41ca20dca24e6
|
[
"MIT"
] |
permissive
|
BayerTM/DraftCoinZ
|
e277353042c908373738bce65716c38ab0cbc0ff
|
217db2822a320d278d93dda4d3cd5dc4d01764f2
|
refs/heads/main
| 2023-06-01T00:54:12.511923
| 2021-06-09T21:35:24
| 2021-06-09T21:35:24
| 362,256,925
| 0
| 0
|
MIT
| 2021-04-27T22:23:49
| 2021-04-27T21:33:59
| null |
UTF-8
|
Python
| false
| false
| 5,401
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the dumpwallet RPC."""
import os
import sys
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (assert_equal, assert_raises_rpc_error)
def read_dump(file_name, addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_label, comment = line.split("#")
# key = key_label.split(" ")[0]
keytype = key_label.split(" ")[2]
if len(comment) > 1:
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdmaster=1":
# ensure the old master is still available
assert(hd_master_addr_old == addr)
elif keytype == "hdmaster=1":
# ensure we have generated a new hd master key
assert(hd_master_addr_old != addr)
hd_master_addr_ret = addr
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
found_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-keypool=90", "-usehd=1"]]
def setup_network(self):
# TODO remove this when usehd=1 becomes the default
# use our own cache and -usehd=1 as extra arg as the default cache is run with -usehd=0
self.options.tmpdir = os.path.join(self.options.tmpdir, 'hd')
self.options.cachedir = os.path.join(self.options.cachedir, 'hd')
self._initialize_chain(extra_args=self.extra_args[0], stderr=sys.stdout)
self.set_cache_mocktime()
# Use 1 minute timeout because the initial getnewaddress RPC can take
# longer than the default 30 seconds due to an expensive
# CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
# the test often takes even longer.
self.add_nodes(self.num_nodes, self.extra_args, timewait=60, stderr=sys.stdout)
self.start_nodes()
def run_test (self):
tmpdir = self.options.tmpdir
# generate 20 addresses to compare against the dump
test_addr_count = 20
addrs = []
for i in range(0,test_addr_count):
addr = self.nodes[0].getnewaddress()
vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
addrs.append(vaddr)
# Should be a no-op:
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
assert_equal(found_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_addr_chg, 50) # 50 blocks where mined
assert_equal(found_addr_rsv, 180) # keypool size (external+internal)
#encrypt wallet, restart, unlock and dump
self.nodes[0].node_encrypt_wallet('test')
self.start_node(0)
self.nodes[0].walletpassphrase('test', 30)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
assert_equal(found_addr, test_addr_count)
# TODO clarify if we want the behavior that is tested below in DFTz (only when HD seed was generated and not user-provided)
# assert_equal(found_addr_chg, 180 + 50) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 180) # keypool size
# Overwriting should fail
assert_raises_rpc_error(-8, "already exists", self.nodes[0].dumpwallet, tmpdir + "/node0/wallet.unencrypted.dump")
if __name__ == '__main__':
WalletDumpTest().main ()
|
[
"james@xmc.com"
] |
james@xmc.com
|
4e40229cac88d3bdc1699557b9dbed9b9c045112
|
345a78f09f15a6679a9071756c8eaf593c5574d8
|
/GUI/2.py
|
8c97774f6e66b7afe489ed5a33535a133fe8b940
|
[] |
no_license
|
BingteamP/Bing_project
|
790e7fff0a7290516f3441ceaa917665c4cdde75
|
727f6b13b62d02123ccc47b431acec262be6617c
|
refs/heads/master
| 2021-01-17T22:23:55.947252
| 2014-04-24T11:37:22
| 2014-04-24T11:37:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,845
|
py
|
import sys, random
from PyQt4 import QtCore, QtGui
class TabContainer(QtGui.QWidget):
def __init__(self):
super(TabContainer, self).__init__()
self.next_item_is_table = False
self.initUI()
def initUI(self):
self.setGeometry( 150, 150, 650, 350)
self.tabwidget = QtGui.QTabWidget(self)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.tabwidget)
self.setLayout(vbox)
self.pages = []
self.add_page()
self.show()
def create_page(self, *contents):
page = QtGui.QWidget()
vbox = QtGui.QVBoxLayout()
for c in contents:
vbox.addWidget(c)
page.setLayout(vbox)
return page
def create_table(self):
rows, columns = random.randint(2,5), random.randint(1,5)
table = QtGui.QTableWidget( rows, columns )
for r in xrange(rows):
for c in xrange(columns):
table.setItem( r, c, QtGui.QTableWidgetItem( str( random.randint(0,10) ) ) )
return table
def create_list(self):
list = QtGui.QListWidget()
columns = random.randint(2,5)
for c in xrange(columns):
QtGui.QListWidgetItem( str( random.randint(0,10) ), list )
return list
def create_new_page_button(self):
btn = QtGui.QPushButton('Create a new page!')
btn.clicked.connect(self.add_page)
return btn
def add_page(self):
if self.next_item_is_table:
self.pages.append( self.create_page( self.create_table(), self.create_new_page_button() ) )
self.next_item_is_table = False
else:
self.pages.append( self.create_page( self.create_list(), self.create_new_page_button() ) )
self.next_item_is_table = True
self.tabwidget.addTab( self.pages[-1] , 'Page %s' % len(self.pages) )
self.tabwidget.setCurrentIndex( len(self.pages)-1 )
app = QtGui.QApplication(sys.argv)
ex = TabContainer()
sys.exit(app.exec_())
|
[
"titantbx1989215@gmail.com"
] |
titantbx1989215@gmail.com
|
a813ff6fd719a3e29e71a87be6ef707c0e85d852
|
785f5a4bfd97ac77559110fb831f18a3822b4e17
|
/01-python_crash_course/01-ejercicios_teoria/chapter_08_functions/printing_functions.py
|
4e617cff0835de7ddd59a36082e1a19a02856dc1
|
[] |
no_license
|
lionelherrerobattista/practica_python
|
c552ae06336eb805172decd0d527b26a17c09cb9
|
44f09bae58748c2c7c2449adc5117591abd2828d
|
refs/heads/master
| 2020-03-28T02:28:09.910563
| 2020-03-14T21:31:31
| 2020-03-14T21:31:31
| 147,572,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
def print_models(unprinted_designs, completed_models):
"""
Simulate printing each design, until none are left.
Move each design to completed_models after printing.
"""
while unprinted_designs:
current_design = unprinted_designs.pop()
# Simulate creating a 3D print from the design.
print("Printing model: " + current_design)
completed_models.append(current_design)
def show_completed_models(completed_models):
"""Show all models that were printed."""
print("\nThe following models have been printed:")
for completed_model in completed_models:
print(completed_model)
|
[
"-"
] |
-
|
19efb2bfb2c6e26d4dd0e9ce150675128eb97e0e
|
44ab74f49657a4244a5f63f6f7bc2a94d522d604
|
/homework05/tests/tests_api/test_wall.py
|
df64f047c780028362f8c5ad437ff8edcedd9fba
|
[] |
no_license
|
a-makhneva/cs102
|
f40a8ce61567502b5d9e23e685d6ad2e423a3da1
|
fc5068903e0ad8f7da4421839d5ffa1d4cdbb34a
|
refs/heads/master
| 2023-06-10T12:52:26.150511
| 2021-06-20T16:45:39
| 2021-06-20T16:45:39
| 294,995,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,309
|
py
|
# type: ignore
import time
import unittest
from unittest.mock import patch
from urllib.parse import unquote
import pandas as pd
import responses
from vkapi.wall import get_wall_execute
class GetWallTestCase(unittest.TestCase):
@responses.activate
def test_total_count(self):
expected_items = [
{
"id": 1,
"from_id": 1234,
"owner_id": 1234,
"date": 1234567890,
"text": "some message",
}
]
responses.add(
responses.POST,
"https://api.vk.com/method/execute",
json={
"response": {
"count": 1,
"items": expected_items,
}
},
status=200,
)
wall = get_wall_execute(domain="cs102py", count=1)
self.assertIsInstance(
wall,
pd.DataFrame,
msg="Функция должна возвращать DataFrame, используйте json_normalize",
)
self.assertEqual(
expected_items,
wall.to_dict("records"),
msg="Вы должны сделать один запрос, чтобы узнать общее число записей",
)
resp_body = unquote(responses.calls[0].request.body)
self.assertTrue(
'"count":"1"' in resp_body or '"count":+"1"' in resp_body,
msg="Вы должны сделать один запрос, чтобы узнать общее число записей",
)
@responses.activate
def test_too_many_requests(self):
responses.add(
responses.POST,
"https://api.vk.com/method/execute",
json={
"response": {
"count": 6000,
"items": [],
}
},
status=200,
)
start = time.time()
with patch("vkapi.wall.get_posts_2500") as get_posts_2500:
get_posts_2500.return_value = []
_ = get_wall_execute(domain="cs102py", count=6000)
end = time.time()
self.assertGreaterEqual(end - start, 2.0, msg="Слишком много запросов в секунду")
|
[
"a.d.makhneva@gmail.com"
] |
a.d.makhneva@gmail.com
|
32ad20a99bffd0ca006bdc4359c4d4069b6d9279
|
7b62646f1ef86004c0dacc7ba251b63740386d7e
|
/django/django_fullstack/login_register/user/migrations/0003_user_birthday.py
|
96d47801bcadc2ebfefea61eb39dfc4072bf2833
|
[] |
no_license
|
Ayman-Yahia/CodingDojo-Python
|
b8eaa72985b0f094100e571485e2af1dae2478b5
|
4b63df0cc3f421fc308f04db8c89626078655d26
|
refs/heads/main
| 2023-05-07T15:48:23.210832
| 2021-06-01T13:26:31
| 2021-06-01T13:26:31
| 365,169,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# Generated by Django 2.2.4 on 2021-05-26 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0002_auto_20210526_1120'),
]
operations = [
migrations.AddField(
model_name='user',
name='birthday',
field=models.DateField(null=True),
),
]
|
[
"ayman.ya1997@gmail.com"
] |
ayman.ya1997@gmail.com
|
4cc34fd865acc73b1c03106b77244e6b8d5fd27c
|
e86fd9f61a41731deb9d56e1107db04de41b6789
|
/beebeeto/poc_2014_0137.py
|
d3a03244caae5ff9208f7ba88909bc6aa7ab6963
|
[] |
no_license
|
c0py7hat/POC-EXP
|
f58e0f1df41e1905e5fdc72b019f8125aac48799
|
7ddf2790012efb7fb5bd258ddcd1e1c25f0cf201
|
refs/heads/master
| 2020-04-30T07:05:31.390537
| 2019-03-20T08:38:50
| 2019-03-20T08:38:50
| 176,674,030
| 3
| 2
| null | 2019-03-20T08:09:56
| 2019-03-20T07:00:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
#!/usr/bin/env python
# coding=utf-8
"""
Site: http://www.beebeeto.com/
Framework: https://github.com/n0tr00t/Beebeeto-framework
"""
from baseframe import BaseFrame
class MyPoc(BaseFrame):
poc_info = {
# poc相关信息
'poc': {
'id': 'poc-2014-0137',
'name': 'Esotalk topic xss vulnerability POC',
'author': 'evi1m0',
'create_date': '2014-11-05',
},
# 协议相关信息
'protocol': {
'name': 'http',
'port': [80],
'layer4_protocol': ['tcp'],
},
# 漏洞相关信息
'vul': {
'app_name': 'esotalk',
'vul_version': ['1.0'],
'type': 'Cross Site Request Forgery',
'tag': ['esotalk漏洞', 'xss', 'topic xss vul', 'php'],
'desc': 'esotalk topic xss vul.',
'references': ['http://www.hackersoul.com/post/ff0000-hsdb-0006.html',
],
},
}
@classmethod
def verify(cls, args):
verify_url = args['options']['target']
temp = '''
[url=[img]onmouseover=alert(document.cookie);//://example.com/image.jpg#"aaaaaa[/img]]evi1m0[/url]
'''
print '[*] Copy code: ' + temp
print '[*] Specific use: ' + str(MyPoc.poc_info['vul']['references'])
args['success'] = True
args['poc_ret']['vul_url'] = 'Generation ok'
return args
exploit = verify
if __name__ == '__main__':
from pprint import pprint
mp = MyPoc()
pprint(mp.run())
|
[
"noreply@github.com"
] |
c0py7hat.noreply@github.com
|
e4b7622846f0bfea863fc33b8dbb4fb97831033b
|
12f441018818dc2dcb1a8a89bccd946d87e0ac9e
|
/pywinrt/bleak_winrt/windows/foundation/__init__.pyi
|
8882078b9847e0b5eb06fb2117544d7c230fcd34
|
[
"MIT"
] |
permissive
|
dlech/bleak-winrt
|
cc7dd76fca9453b7415d65a428e22b2cbfe36209
|
a6c1f3fd073a7b5678304ea6bc08b9b067544320
|
refs/heads/main
| 2022-09-12T00:15:01.497572
| 2022-09-09T22:57:53
| 2022-09-09T22:57:53
| 391,440,675
| 10
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,668
|
pyi
|
# WARNING: Please don't edit this file. It was generated by Python/WinRT v1.0.0-beta.7
import enum
import datetime
import sys
import types
import typing
import bleak_winrt._winrt as _winrt
import bleak_winrt.windows.foundation.collections
class AsyncStatus(enum.IntEnum):
CANCELED = 2
COMPLETED = 1
ERROR = 3
STARTED = 0
class PropertyType(enum.IntEnum):
EMPTY = 0
UINT8 = 1
INT16 = 2
UINT16 = 3
INT32 = 4
UINT32 = 5
INT64 = 6
UINT64 = 7
SINGLE = 8
DOUBLE = 9
CHAR16 = 10
BOOLEAN = 11
STRING = 12
INSPECTABLE = 13
DATE_TIME = 14
TIME_SPAN = 15
GUID = 16
POINT = 17
SIZE = 18
RECT = 19
OTHER_TYPE = 20
UINT8_ARRAY = 1025
INT16_ARRAY = 1026
UINT16_ARRAY = 1027
INT32_ARRAY = 1028
UINT32_ARRAY = 1029
INT64_ARRAY = 1030
UINT64_ARRAY = 1031
SINGLE_ARRAY = 1032
DOUBLE_ARRAY = 1033
CHAR16_ARRAY = 1034
BOOLEAN_ARRAY = 1035
STRING_ARRAY = 1036
INSPECTABLE_ARRAY = 1037
DATE_TIME_ARRAY = 1038
TIME_SPAN_ARRAY = 1039
GUID_ARRAY = 1040
POINT_ARRAY = 1041
SIZE_ARRAY = 1042
RECT_ARRAY = 1043
OTHER_TYPE_ARRAY = 1044
Self = typing.TypeVar('Self')
T = typing.TypeVar('T')
TProgress = typing.TypeVar('TProgress')
TResult = typing.TypeVar('TResult')
TSender = typing.TypeVar('TSender')
class EventRegistrationToken:
value: _winrt.Int64
def __new__(cls: typing.Type[EventRegistrationToken], value: _winrt.Int64) -> EventRegistrationToken: ...
class HResult:
value: _winrt.Int32
def __new__(cls: typing.Type[HResult], value: _winrt.Int32) -> HResult: ...
class Point:
x: _winrt.Single
y: _winrt.Single
def __new__(cls: typing.Type[Point], x: _winrt.Single, y: _winrt.Single) -> Point: ...
class Rect:
x: _winrt.Single
y: _winrt.Single
width: _winrt.Single
height: _winrt.Single
def __new__(cls: typing.Type[Rect], x: _winrt.Single, y: _winrt.Single, width: _winrt.Single, height: _winrt.Single) -> Rect: ...
class Size:
width: _winrt.Single
height: _winrt.Single
def __new__(cls: typing.Type[Size], width: _winrt.Single, height: _winrt.Single) -> Size: ...
class Deferral(_winrt.Object):
def __enter__(self: Self) -> Self: ...
def __exit__(self, *args) -> None: ...
@staticmethod
def _from(obj: _winrt.Object) -> Deferral: ...
def __new__(cls: typing.Type[Deferral], handler: typing.Optional[DeferralCompletedHandler]) -> Deferral:...
def close(self) -> None: ...
def complete(self) -> None: ...
class GuidHelper(_winrt.Object):
empty: _winrt.Guid
@staticmethod
def _from(obj: _winrt.Object) -> GuidHelper: ...
@staticmethod
def create_new_guid() -> _winrt.Guid: ...
@staticmethod
def equals(target: _winrt.Guid, value: _winrt.Guid) -> _winrt.Boolean: ...
class MemoryBuffer(_winrt.Object):
def __enter__(self: Self) -> Self: ...
def __exit__(self, *args) -> None: ...
@staticmethod
def _from(obj: _winrt.Object) -> MemoryBuffer: ...
def __new__(cls: typing.Type[MemoryBuffer], capacity: _winrt.UInt32) -> MemoryBuffer:...
def close(self) -> None: ...
def create_reference(self) -> typing.Optional[IMemoryBufferReference]: ...
class PropertyValue(_winrt.Object):
@staticmethod
def _from(obj: _winrt.Object) -> PropertyValue: ...
@staticmethod
def create_boolean(value: _winrt.Boolean) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_boolean_array(value: typing.Sequence[_winrt.Boolean]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_char16(value: _winrt.Char16) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_char16_array(value: typing.Sequence[_winrt.Char16]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_date_time(value: datetime.datetime) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_date_time_array(value: typing.Sequence[datetime.datetime]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_double(value: _winrt.Double) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_double_array(value: typing.Sequence[_winrt.Double]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_empty() -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_guid(value: _winrt.Guid) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_guid_array(value: typing.Sequence[_winrt.Guid]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_inspectable(value: typing.Optional[_winrt.Object]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_inspectable_array(value: typing.Sequence[_winrt.Object]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_int16(value: _winrt.Int16) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_int16_array(value: typing.Sequence[_winrt.Int16]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_int32(value: _winrt.Int32) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_int32_array(value: typing.Sequence[_winrt.Int32]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_int64(value: _winrt.Int64) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_int64_array(value: typing.Sequence[_winrt.Int64]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_point(value: Point) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_point_array(value: typing.Sequence[Point]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_rect(value: Rect) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_rect_array(value: typing.Sequence[Rect]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_single(value: _winrt.Single) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_single_array(value: typing.Sequence[_winrt.Single]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_size(value: Size) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_size_array(value: typing.Sequence[Size]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_string(value: str) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_string_array(value: typing.Sequence[str]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_time_span(value: datetime.timedelta) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_time_span_array(value: typing.Sequence[datetime.timedelta]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_uint16(value: _winrt.UInt16) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_uint16_array(value: typing.Sequence[_winrt.UInt16]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_uint32(value: _winrt.UInt32) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_uint32_array(value: typing.Sequence[_winrt.UInt32]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_uint64(value: _winrt.UInt64) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_uint64_array(value: typing.Sequence[_winrt.UInt64]) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_uint8(value: _winrt.UInt8) -> typing.Optional[_winrt.Object]: ...
@staticmethod
def create_uint8_array(value: typing.Sequence[_winrt.UInt8]) -> typing.Optional[_winrt.Object]: ...
class Uri(_winrt.Object):
absolute_uri: str
display_uri: str
domain: str
extension: str
fragment: str
host: str
password: str
path: str
port: _winrt.Int32
query: str
query_parsed: typing.Optional[WwwFormUrlDecoder]
raw_uri: str
scheme_name: str
suspicious: _winrt.Boolean
user_name: str
absolute_canonical_uri: str
display_iri: str
def __str__(self) -> str: ...
@staticmethod
def _from(obj: _winrt.Object) -> Uri: ...
@typing.overload
def __new__(cls: typing.Type[Uri], uri: str) -> Uri:...
@typing.overload
def __new__(cls: typing.Type[Uri], base_uri: str, relative_uri: str) -> Uri:...
def combine_uri(self, relative_uri: str) -> typing.Optional[Uri]: ...
def equals(self, p_uri: typing.Optional[Uri]) -> _winrt.Boolean: ...
@staticmethod
def escape_component(to_escape: str) -> str: ...
def to_string(self) -> str: ...
@staticmethod
def unescape_component(to_unescape: str) -> str: ...
class WwwFormUrlDecoder(_winrt.Object, typing.Sequence[IWwwFormUrlDecoderEntry]):
size: _winrt.UInt32
def __len__(self) -> int: ...
@typing.overload
def __getitem__(self, index: int) -> IWwwFormUrlDecoderEntry: ...
@typing.overload
def __getitem__(self, index: slice) -> typing.Sequence[IWwwFormUrlDecoderEntry]: ...
@staticmethod
def _from(obj: _winrt.Object) -> WwwFormUrlDecoder: ...
def __new__(cls: typing.Type[WwwFormUrlDecoder], query: str) -> WwwFormUrlDecoder:...
def first(self) -> typing.Optional[bleak_winrt.windows.foundation.collections.IIterator[IWwwFormUrlDecoderEntry]]: ...
def get_at(self, index: _winrt.UInt32) -> typing.Optional[IWwwFormUrlDecoderEntry]: ...
def get_first_value_by_name(self, name: str) -> str: ...
def get_many(self, start_index: _winrt.UInt32, items_size: _winrt.UInt32) -> typing.Tuple[_winrt.UInt32, typing.List[IWwwFormUrlDecoderEntry]]: ...
def index_of(self, value: typing.Optional[IWwwFormUrlDecoderEntry]) -> typing.Tuple[_winrt.Boolean, _winrt.UInt32]: ...
class WwwFormUrlDecoderEntry(_winrt.Object):
name: str
value: str
@staticmethod
def _from(obj: _winrt.Object) -> WwwFormUrlDecoderEntry: ...
class IAsyncAction(_winrt.Object):
completed: typing.Optional[AsyncActionCompletedHandler]
error_code: HResult
id: _winrt.UInt32
status: AsyncStatus
def __await__(self) -> typing.Generator[typing.Any, None, None]: ...
@staticmethod
def _from(obj: _winrt.Object) -> IAsyncAction: ...
def cancel(self) -> None: ...
def close(self) -> None: ...
def get_results(self) -> None: ...
class IAsyncActionWithProgress(_winrt.Object, typing.Generic[TProgress]):
progress: typing.Optional[AsyncActionProgressHandler[TProgress]]
completed: typing.Optional[AsyncActionWithProgressCompletedHandler[TProgress]]
error_code: HResult
id: _winrt.UInt32
status: AsyncStatus
if sys.version_info >= (3, 9):
def __class_getitem__(cls, key: typing.Any) -> types.GenericAlias: ...
def __await__(self) -> typing.Generator[typing.Any, None, None]: ...
def cancel(self) -> None: ...
def close(self) -> None: ...
def get_results(self) -> None: ...
class IAsyncInfo(_winrt.Object):
error_code: HResult
id: _winrt.UInt32
status: AsyncStatus
@staticmethod
def _from(obj: _winrt.Object) -> IAsyncInfo: ...
def cancel(self) -> None: ...
def close(self) -> None: ...
class IAsyncOperationWithProgress(_winrt.Object, typing.Generic[TResult, TProgress]):
progress: typing.Optional[AsyncOperationProgressHandler[TResult, TProgress]]
completed: typing.Optional[AsyncOperationWithProgressCompletedHandler[TResult, TProgress]]
error_code: HResult
id: _winrt.UInt32
status: AsyncStatus
if sys.version_info >= (3, 9):
def __class_getitem__(cls, key: typing.Any) -> types.GenericAlias: ...
def __await__(self) -> typing.Generator[typing.Any, None, TResult]: ...
def cancel(self) -> None: ...
def close(self) -> None: ...
def get_results(self) -> typing.Optional[TResult]: ...
class IAsyncOperation(_winrt.Object, typing.Generic[TResult]):
completed: typing.Optional[AsyncOperationCompletedHandler[TResult]]
error_code: HResult
id: _winrt.UInt32
status: AsyncStatus
if sys.version_info >= (3, 9):
def __class_getitem__(cls, key: typing.Any) -> types.GenericAlias: ...
def __await__(self) -> typing.Generator[typing.Any, None, TResult]: ...
def cancel(self) -> None: ...
def close(self) -> None: ...
def get_results(self) -> typing.Optional[TResult]: ...
class IClosable(_winrt.Object):
def __enter__(self: Self) -> Self: ...
def __exit__(self, *args) -> None: ...
@staticmethod
def _from(obj: _winrt.Object) -> IClosable: ...
def close(self) -> None: ...
class IGetActivationFactory(_winrt.Object):
@staticmethod
def _from(obj: _winrt.Object) -> IGetActivationFactory: ...
def get_activation_factory(self, activatable_class_id: str) -> typing.Optional[_winrt.Object]: ...
class IMemoryBuffer(_winrt.Object):
def __enter__(self: Self) -> Self: ...
def __exit__(self, *args) -> None: ...
@staticmethod
def _from(obj: _winrt.Object) -> IMemoryBuffer: ...
def close(self) -> None: ...
def create_reference(self) -> typing.Optional[IMemoryBufferReference]: ...
class IMemoryBufferReference(_winrt.Object):
capacity: _winrt.UInt32
def __enter__(self: Self) -> Self: ...
def __exit__(self, *args) -> None: ...
def __bytes__(self) -> bytes: ...
@staticmethod
def _from(obj: _winrt.Object) -> IMemoryBufferReference: ...
def close(self) -> None: ...
def add_closed(self, handler: TypedEventHandler[IMemoryBufferReference, _winrt.Object]) -> EventRegistrationToken: ...
def remove_closed(self, cookie: EventRegistrationToken) -> None: ...
class IPropertyValue(_winrt.Object):
is_numeric_scalar: _winrt.Boolean
type: PropertyType
@staticmethod
def _from(obj: _winrt.Object) -> IPropertyValue: ...
def get_boolean(self) -> _winrt.Boolean: ...
def get_boolean_array(self) -> typing.List[_winrt.Boolean]: ...
def get_char16(self) -> _winrt.Char16: ...
def get_char16_array(self) -> typing.List[_winrt.Char16]: ...
def get_date_time(self) -> datetime.datetime: ...
def get_date_time_array(self) -> typing.List[datetime.datetime]: ...
def get_double(self) -> _winrt.Double: ...
def get_double_array(self) -> typing.List[_winrt.Double]: ...
def get_guid(self) -> _winrt.Guid: ...
def get_guid_array(self) -> typing.List[_winrt.Guid]: ...
def get_inspectable_array(self) -> typing.List[_winrt.Object]: ...
def get_int16(self) -> _winrt.Int16: ...
def get_int16_array(self) -> typing.List[_winrt.Int16]: ...
def get_int32(self) -> _winrt.Int32: ...
def get_int32_array(self) -> typing.List[_winrt.Int32]: ...
def get_int64(self) -> _winrt.Int64: ...
def get_int64_array(self) -> typing.List[_winrt.Int64]: ...
def get_point(self) -> Point: ...
def get_point_array(self) -> typing.List[Point]: ...
def get_rect(self) -> Rect: ...
def get_rect_array(self) -> typing.List[Rect]: ...
def get_single(self) -> _winrt.Single: ...
def get_single_array(self) -> typing.List[_winrt.Single]: ...
def get_size(self) -> Size: ...
def get_size_array(self) -> typing.List[Size]: ...
def get_string(self) -> str: ...
def get_string_array(self) -> typing.List[str]: ...
def get_time_span(self) -> datetime.timedelta: ...
def get_time_span_array(self) -> typing.List[datetime.timedelta]: ...
def get_uint16(self) -> _winrt.UInt16: ...
def get_uint16_array(self) -> typing.List[_winrt.UInt16]: ...
def get_uint32(self) -> _winrt.UInt32: ...
def get_uint32_array(self) -> typing.List[_winrt.UInt32]: ...
def get_uint64(self) -> _winrt.UInt64: ...
def get_uint64_array(self) -> typing.List[_winrt.UInt64]: ...
def get_uint8(self) -> _winrt.UInt8: ...
def get_uint8_array(self) -> typing.List[_winrt.UInt8]: ...
class IReferenceArray(_winrt.Object, typing.Generic[T]):
value: typing.Optional[T]
is_numeric_scalar: _winrt.Boolean
type: PropertyType
if sys.version_info >= (3, 9):
def __class_getitem__(cls, key: typing.Any) -> types.GenericAlias: ...
def get_boolean(self) -> _winrt.Boolean: ...
def get_boolean_array(self) -> typing.List[_winrt.Boolean]: ...
def get_char16(self) -> _winrt.Char16: ...
def get_char16_array(self) -> typing.List[_winrt.Char16]: ...
def get_date_time(self) -> datetime.datetime: ...
def get_date_time_array(self) -> typing.List[datetime.datetime]: ...
def get_double(self) -> _winrt.Double: ...
def get_double_array(self) -> typing.List[_winrt.Double]: ...
def get_guid(self) -> _winrt.Guid: ...
def get_guid_array(self) -> typing.List[_winrt.Guid]: ...
def get_inspectable_array(self) -> typing.List[_winrt.Object]: ...
def get_int16(self) -> _winrt.Int16: ...
def get_int16_array(self) -> typing.List[_winrt.Int16]: ...
def get_int32(self) -> _winrt.Int32: ...
def get_int32_array(self) -> typing.List[_winrt.Int32]: ...
def get_int64(self) -> _winrt.Int64: ...
def get_int64_array(self) -> typing.List[_winrt.Int64]: ...
def get_point(self) -> Point: ...
def get_point_array(self) -> typing.List[Point]: ...
def get_rect(self) -> Rect: ...
def get_rect_array(self) -> typing.List[Rect]: ...
def get_single(self) -> _winrt.Single: ...
def get_single_array(self) -> typing.List[_winrt.Single]: ...
def get_size(self) -> Size: ...
def get_size_array(self) -> typing.List[Size]: ...
def get_string(self) -> str: ...
def get_string_array(self) -> typing.List[str]: ...
def get_time_span(self) -> datetime.timedelta: ...
def get_time_span_array(self) -> typing.List[datetime.timedelta]: ...
def get_uint16(self) -> _winrt.UInt16: ...
def get_uint16_array(self) -> typing.List[_winrt.UInt16]: ...
def get_uint32(self) -> _winrt.UInt32: ...
def get_uint32_array(self) -> typing.List[_winrt.UInt32]: ...
def get_uint64(self) -> _winrt.UInt64: ...
def get_uint64_array(self) -> typing.List[_winrt.UInt64]: ...
def get_uint8(self) -> _winrt.UInt8: ...
def get_uint8_array(self) -> typing.List[_winrt.UInt8]: ...
class IReference(_winrt.Object, typing.Generic[T]):
value: typing.Optional[T]
is_numeric_scalar: _winrt.Boolean
type: PropertyType
if sys.version_info >= (3, 9):
def __class_getitem__(cls, key: typing.Any) -> types.GenericAlias: ...
def get_boolean(self) -> _winrt.Boolean: ...
def get_boolean_array(self) -> typing.List[_winrt.Boolean]: ...
def get_char16(self) -> _winrt.Char16: ...
def get_char16_array(self) -> typing.List[_winrt.Char16]: ...
def get_date_time(self) -> datetime.datetime: ...
def get_date_time_array(self) -> typing.List[datetime.datetime]: ...
def get_double(self) -> _winrt.Double: ...
def get_double_array(self) -> typing.List[_winrt.Double]: ...
def get_guid(self) -> _winrt.Guid: ...
def get_guid_array(self) -> typing.List[_winrt.Guid]: ...
def get_inspectable_array(self) -> typing.List[_winrt.Object]: ...
def get_int16(self) -> _winrt.Int16: ...
def get_int16_array(self) -> typing.List[_winrt.Int16]: ...
def get_int32(self) -> _winrt.Int32: ...
def get_int32_array(self) -> typing.List[_winrt.Int32]: ...
def get_int64(self) -> _winrt.Int64: ...
def get_int64_array(self) -> typing.List[_winrt.Int64]: ...
def get_point(self) -> Point: ...
def get_point_array(self) -> typing.List[Point]: ...
def get_rect(self) -> Rect: ...
def get_rect_array(self) -> typing.List[Rect]: ...
def get_single(self) -> _winrt.Single: ...
def get_single_array(self) -> typing.List[_winrt.Single]: ...
def get_size(self) -> Size: ...
def get_size_array(self) -> typing.List[Size]: ...
def get_string(self) -> str: ...
def get_string_array(self) -> typing.List[str]: ...
def get_time_span(self) -> datetime.timedelta: ...
def get_time_span_array(self) -> typing.List[datetime.timedelta]: ...
def get_uint16(self) -> _winrt.UInt16: ...
def get_uint16_array(self) -> typing.List[_winrt.UInt16]: ...
def get_uint32(self) -> _winrt.UInt32: ...
def get_uint32_array(self) -> typing.List[_winrt.UInt32]: ...
def get_uint64(self) -> _winrt.UInt64: ...
def get_uint64_array(self) -> typing.List[_winrt.UInt64]: ...
def get_uint8(self) -> _winrt.UInt8: ...
def get_uint8_array(self) -> typing.List[_winrt.UInt8]: ...
class IStringable(_winrt.Object):
def __str__(self) -> str: ...
@staticmethod
def _from(obj: _winrt.Object) -> IStringable: ...
def to_string(self) -> str: ...
class IWwwFormUrlDecoderEntry(_winrt.Object):
name: str
value: str
@staticmethod
def _from(obj: _winrt.Object) -> IWwwFormUrlDecoderEntry: ...
AsyncActionCompletedHandler = typing.Callable[[typing.Optional[IAsyncAction], AsyncStatus], None]
AsyncActionProgressHandler = typing.Callable[[IAsyncActionWithProgress[TProgress], typing.Optional[TProgress]], None]
AsyncActionWithProgressCompletedHandler = typing.Callable[[IAsyncActionWithProgress[TProgress], AsyncStatus], None]
AsyncOperationCompletedHandler = typing.Callable[[IAsyncOperation[TResult], AsyncStatus], None]
AsyncOperationProgressHandler = typing.Callable[[IAsyncOperationWithProgress[TResult, TProgress], typing.Optional[TProgress]], None]
AsyncOperationWithProgressCompletedHandler = typing.Callable[[IAsyncOperationWithProgress[TResult, TProgress], AsyncStatus], None]
DeferralCompletedHandler = typing.Callable[[], None]
EventHandler = typing.Callable[[typing.Optional[_winrt.Object], typing.Optional[T]], None]
TypedEventHandler = typing.Callable[[typing.Optional[TSender], typing.Optional[TResult]], None]
|
[
"david@lechnology.com"
] |
david@lechnology.com
|
21f9208488ecb8a3338940b7032bf4115ecc4f96
|
773a497d2b697ce9526f85eaa08730b8610ead7c
|
/store/views.py
|
ff23bb730e6afe352a66a94dd277d37d0c194bd7
|
[] |
no_license
|
karimkohel/TheLiquidBay
|
4493050be4729c9b671f1f102b42a2bb296b1298
|
2ccf917c55b35486762c57e9fad94eea04560ff5
|
refs/heads/master
| 2020-06-19T13:08:28.938367
| 2020-05-03T14:34:06
| 2020-05-03T14:34:06
| 196,719,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
from django.shortcuts import render
from .models import Liquid
# Create your views here.
def store_front(request):
context = {
'title':'Store',
'store': True,
'liquids': Liquid.objects.all()
}
return render(request,'store/front.html',context)
|
[
"kareemkohel@gmail.com"
] |
kareemkohel@gmail.com
|
ad97f8f44b0d1b59ed70aa09d13cfd66c83619eb
|
d5f2e696ae689a3d08a7344b805ae9ef7d3c9a24
|
/app/api/auth.py
|
be27e2b1c731a1ce22e018065acb765c255b7fda
|
[] |
no_license
|
prasannaboga/flaskapp05
|
02bd5eb7feac8a0a05bc7c1d2e0fec5548ad55fb
|
ea70fc384e45560b3383c1e3d05d5adfda9aa5ac
|
refs/heads/master
| 2020-03-23T05:20:13.746196
| 2018-08-02T05:54:52
| 2018-08-02T05:54:52
| 141,137,770
| 0
| 0
| null | 2018-07-24T06:44:31
| 2018-07-16T12:48:13
|
Python
|
UTF-8
|
Python
| false
| false
| 677
|
py
|
from flask import g, jsonify, make_response
from flask_httpauth import HTTPBasicAuth
from mongoengine import DoesNotExist
from app.models.user import User
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(email_or_token, password):
user = User.verify_auth_token(email_or_token)
if not user:
try:
user = User.objects.get(email=email_or_token)
if not user.verify_password(password):
return False
except DoesNotExist:
return False
g.user = user
return True
@auth.error_handler
def unauthorized():
return make_response(jsonify({'errors': 'Unauthorized access'}), 401)
|
[
"prasannaboga@gmail.com"
] |
prasannaboga@gmail.com
|
50bb7529f787f66817aa8d61e51cc3bc2dfd0161
|
920b1fa1d9ccfaf9902df878a42a1482f41fb846
|
/src/sudoku/game/exceptions/InvalidCmdParameterException.py
|
53f9e1299eecef67eb162062c10d039462aa77ed
|
[] |
no_license
|
pysudoku/sudoku
|
97fe616e18e538093217a42d04ee8650a94c78e5
|
27cc2f7cb52ea787191095c2e581729c22bba62a
|
refs/heads/master
| 2021-01-19T07:56:51.777861
| 2013-07-26T18:11:28
| 2013-07-26T18:11:28
| 11,187,872
| 1
| 0
| null | 2013-07-25T16:24:28
| 2013-07-04T22:43:06
|
Python
|
UTF-8
|
Python
| false
| false
| 246
|
py
|
'''
Created on Jul 16, 2013
@author: Jimena Terceros
'''
class InvalidCmdParametersException(Exception):
'''
classdocs
'''
def __init__(self, message):
'''
Constructor
'''
self.message = message
|
[
"jimena.terceros@jalasoft.com"
] |
jimena.terceros@jalasoft.com
|
dda5f468da7ee6e3b8d491722caa2bc4ee97e3c1
|
9d1a8a273ae12a581c849536914ca85de299c0d9
|
/project source/main/models.py
|
0d1954c5bd7c7b11475ffa7d8f98cdd3d8693865
|
[] |
no_license
|
kartiktyagi4282/Plant-Disease-Prediction
|
bc015a833dd47af71b0d47e9b17aa6cb2f06a11b
|
c30ec971f41ccebbc590c45490e1ce740c143d65
|
refs/heads/master
| 2022-12-09T21:28:03.204182
| 2020-10-03T12:52:21
| 2020-10-03T12:52:21
| 189,465,061
| 2
| 3
| null | 2022-12-08T05:32:50
| 2019-05-30T18:42:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
from django.db import models
import os
import re
from datetime import datetime
def upload_image_directory_path(instance, filename):
filename, file_extension = os.path.splitext(filename)
return 'plant/'+re.sub('[-:. ]','',str(datetime.today()))+file_extension
class Plant(models.Model):
name = models.CharField(max_length=100, unique=True)
description = models.TextField(null = True, blank = True)
createdAt = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class UploadFile(models.Model):
name = models.CharField(max_length=30, null = True, blank = True)
image = models.ImageField(upload_to = upload_image_directory_path)
edited = models.ImageField(upload_to = upload_image_directory_path, null = True, blank = True)
segmented = models.ImageField(upload_to = upload_image_directory_path, null = True, blank = True)
def __str__(self):
return self.name
class Disease(models.Model):
name = models.CharField(max_length=100)
plant = models.ForeignKey('Plant', on_delete=models.CASCADE , related_name="diseases")
symptoms = models.TextField(null = True, blank = True)
cause = models.TextField(null = True, blank = True)
comments = models.TextField(null = True, blank = True)
management = models.TextField(null = True, blank = True)
image = models.CharField(max_length = 100, null = True, blank = True)
def __str__(self):
return self.name
|
[
"tyagikartik4282@gmail.com"
] |
tyagikartik4282@gmail.com
|
d4006dfa73357c64b9566faf1dbbe03faf4a46d4
|
896d2fc1e9880f5d90fc65b28bcf537097a577d2
|
/database/agents.py
|
01865c785b8d6c09da207e12da26fdbc294c5bfc
|
[] |
no_license
|
rogerio-stubs/manager_data
|
66265c77b48c89e074642258adb755e2fd0a7150
|
7f46edd696ac270587f00c74a1a5acc4877e3271
|
refs/heads/master
| 2023-02-08T11:24:57.211042
| 2021-01-02T03:27:22
| 2021-01-02T03:27:22
| 291,856,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
from pymongo import MongoClient
from database.connection import agents
def login(user, password):
user_id = -1
acesso = "Acesso negado"
for user_id in agents.find({"user": user, "password": password}, {"atributo":0}):
if user_id != -1:
acesso = user_id.get("_id")
return acesso
|
[
"rostubs17@gmail.com"
] |
rostubs17@gmail.com
|
3f5ca45997d31a884f9b70cd4d755a2e1db9a3af
|
05d12a410a1cd0ee477e5f302adc916973f551e2
|
/config/set_apn.py
|
56cd97666508cc70bd5978d44e4d9d8bfde10d9f
|
[] |
no_license
|
KWH-DAS-TEAM/Datalogger
|
30b78aaaa487fd34fceeeb032803a51e216c66d4
|
68f97ec5e561fb1b18bf3e84c4b9267d0efcfbec
|
refs/heads/master
| 2022-11-10T04:25:33.117387
| 2020-07-02T02:08:18
| 2020-07-02T02:08:18
| 275,955,874
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
#!/usr/bin/env python3
import KWH_MySQL
import sys
sys.path.append('/kwh/lib')
try:
APN = sys.argv[1]
except:
print("Usage: setapn <APN> <optional Username> <optional Password>")
exit(1)
try:
USER = sys.argv[2]
except:
USER = " "
try:
PASS = sys.argv[3]
except:
PASS = " "
DB = KWH_MySQL.KWH_MySQL()
sql = "INSERT INTO config VALUES (\"APN\",\""+APN+"\",now(),\"\",1);"
# Returns 1 on failure
result = DB.INSERT(sql)[0]
# INSERT will fail for duplicate entry if the config key already exists
# due to our primary key (key, active). This lets us force only one key
# having active = 1
# If insert fails, the following logic keeps a historical record of the
# previous config, and then lets you update with the new value
if result == 1:
select_sql = "SELECT max(active) FROM config WHERE `key` = 'APN';"
new_active = DB.SELECT(select_sql)[0][0] + 1
update_sql = "UPDATE config SET time_changed = now(), active = "
update_sql += str(new_active)+" WHERE `key` = 'APN' AND active = 1;"
result = DB.INSERT(update_sql)
if result == 1:
print("unknown error")
else:
DB.INSERT(sql)
sakis = open("/etc/sakis3g.conf", "w+")
sakis.write("OTHER=CUSTOM_TTY\nCUSTOM_TTY=\"/dev/ttyAMA0\"\nBAUD=115200\nAPN=\"" +
APN+"\"\nAPN_USER=\""+USER+"\"\nAPN_PASS=\""+PASS+"\"")
print("config change complete")
|
[
"noreply@github.com"
] |
KWH-DAS-TEAM.noreply@github.com
|
2bf00bad4f2dd587acbfef22d01b90855f891b2c
|
efe8b6737cf09b11e48e42478fbbe682a0e3060e
|
/module/data/src/cms/__init__.py
|
2725921cb0015ba6727d814070758e53b17129de
|
[] |
no_license
|
tiit-learn/comment_publishing
|
1fa27c05cb900b67b5758c8f15e2422ada8de53d
|
105c672c500464ac0b8186544841271ff654ba35
|
refs/heads/main
| 2023-06-02T06:45:14.646958
| 2021-06-21T13:15:57
| 2021-06-21T13:15:57
| 378,930,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30
|
py
|
from . import bitrix, wp, dle
|
[
"dev@project"
] |
dev@project
|
cfd15b4614141f1614ac10e530b5396a04be5278
|
6898f222a9b3b8a1f1e60d3e465d6130c4112d35
|
/TestPlotterError.py
|
eae204d035ab3e9502bbb8b7fd92f1974345f599
|
[] |
no_license
|
laurajauch/phase3
|
98ac86701360ef537d0516d1d984e4bbaeb29fd3
|
2b0f6e35f53c109707a9def425c64789de9d52db
|
refs/heads/master
| 2016-09-11T13:12:32.265589
| 2015-04-25T21:53:52
| 2015-04-25T21:53:52
| 33,157,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,654
|
py
|
from PyCamellia import *
from Plotter import *
import unittest
spaceDim = 2
useConformingTraces = True
mu = 1.0
dims = [1.0,1.0]
numElements = [2,2]
x0 = [0.,0.]
polyOrder = 3
delta_k = 1
topBoundary = SpatialFilter.matchingY(1.0)
notTopBoundary = SpatialFilter.negatedFilter(topBoundary)
x = Function.xn(1)
rampWidth = 1./64
H_left = Function.heaviside(rampWidth)
H_right = Function.heaviside(1.0-rampWidth);
ramp = (1-H_right) * H_left + (1./rampWidth) * (1-H_left) * x + (1./rampWidth) * H_right * (1-x)
zero = Function.constant(0)
topVelocity = Function.vectorize(ramp,zero)
refinementNumber = 0
refCellVertexPoints = [[-1.,-1.],[1.,-1.],[1.,1.],[-1.,1.]];
class TestPlotterError(unittest.TestCase):
""" Test Plot"""
def test_plot_energyError(self):
print "Plot_energyError"
form = StokesVGPFormulation(spaceDim,useConformingTraces,mu)
meshTopo = MeshFactory.rectilinearMeshTopology(dims,numElements,x0)
form.initializeSolution(meshTopo,polyOrder,delta_k)
form.addZeroMeanPressureCondition()
form.addWallCondition(notTopBoundary)
form.addInflowCondition(topBoundary,topVelocity)
form.solve()
plot(form, "Error")
form = None
""" Test Plot with p auto refine"""
def test_plotPAutoRefine_energyError(self):
print "pAutoRefine_energyError"
form = StokesVGPFormulation(spaceDim,useConformingTraces,mu)
meshTopo = MeshFactory.rectilinearMeshTopology(dims,numElements,x0)
form.initializeSolution(meshTopo,polyOrder,delta_k)
form.addZeroMeanPressureCondition()
form.addWallCondition(notTopBoundary)
form.addInflowCondition(topBoundary,topVelocity)
form.solve()
form.pRefine()
plot(form, "Error")
form = None
""" Test Plot with h auto refine"""
def test_plothAutoRefine_energyError(self):
print "hAutoRefine_energyError"
form = StokesVGPFormulation(spaceDim,useConformingTraces,mu)
meshTopo = MeshFactory.rectilinearMeshTopology(dims,numElements,x0)
form.initializeSolution(meshTopo,polyOrder,delta_k)
form.addZeroMeanPressureCondition()
form.addWallCondition(notTopBoundary)
form.addInflowCondition(topBoundary,topVelocity)
form.solve()
form.hRefine()
plot(form, "Error")
form = None
""" Test Plot with p manual refine"""
def test_plotpManualRefine_energyError(self):
print "pManualRefine_energyError"
form = StokesVGPFormulation(spaceDim,useConformingTraces,mu)
meshTopo = MeshFactory.rectilinearMeshTopology(dims,numElements,x0)
form.initializeSolution(meshTopo,polyOrder,delta_k)
form.addZeroMeanPressureCondition()
form.addWallCondition(notTopBoundary)
form.addInflowCondition(topBoundary,topVelocity)
form.solve()
mesh = form.solution().mesh();
mesh.pRefine([3,1])
plot(form, "Error")
form = None
""" Test Plot with h manual refine"""
def test_plothManualRefine_energyError(self):
#return
print "hManualRefine_energyError"
form = StokesVGPFormulation(spaceDim,useConformingTraces,mu)
meshTopo = MeshFactory.rectilinearMeshTopology(dims,numElements,x0)
form.initializeSolution(meshTopo,polyOrder,delta_k)
form.addZeroMeanPressureCondition()
form.addWallCondition(notTopBoundary)
form.addInflowCondition(topBoundary,topVelocity)
form.solve()
mesh = form.solution().mesh();
mesh.hRefine([0,1])
plot(form, "Error")
form = None
|
[
"jeffrey.burge@cslab03.wheaton.edu"
] |
jeffrey.burge@cslab03.wheaton.edu
|
8eae5fb2dcd692f49a429547f82b7f02961ab9bd
|
d93d6218b94673b4669914c5fe1a7d18cf91a32b
|
/Bootlegging Emulator/District.py
|
eb7aeff8bdcac0bc74dfe8d5912524c080af4d38
|
[] |
no_license
|
aaronpgurrola/The-Crimson-Plateau
|
537e5d0c0c806f9abf4f1a47169a5b28e9a44af5
|
b8447ffed81d30f578b017384f3896a2e466e06e
|
refs/heads/main
| 2023-07-05T19:45:14.484328
| 2021-05-23T06:07:25
| 2021-05-23T06:07:25
| 369,736,719
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,744
|
py
|
import Front as f
import DrugDemand as dd
import GlobalUtils as gu
import DrugRun as dr
import math
# Constants for different drug price constants
GALLON_PRICE = 5.0
MJ_GRAM_PRICE = 10.0
COKE_GRAM_PRICE = 20.0
DRUG_PRICE_DICT = {gu.Drugs.COKE: COKE_GRAM_PRICE, gu.Drugs.MJ: MJ_GRAM_PRICE, gu.Drugs.SHINE: GALLON_PRICE}
# District is currently the top-tier component
# TODO: Build Map class to hold multiple districts
# TODO: Build methods to interact with other districts
class District():
def __init__(self):
# Each district will have independent drug demand
self.drugDemand = dd.DrugDemand()
# TODO: Whem Map class is built, move to Map class
self.drugRun = dr.DrugRun()
# List of front objects
self.fronts = []
# Determines whether or not every front is controlled by user
# TODO: Create methods to determine if District is monopolized
self.monopolized = False
# Builds a front using the Front class
# Appends it to a list of fronts in the District
def createFront(self, name: str, base: float):
front = f.Front(name, base)
self.fronts.append(front)
# Utility to print fronts and all their individual variables
def printFronts(self):
frontString = ''
for front in self.fronts:
frontString+= front.__str__() + '\n'
print(frontString)
# Sets all fronts as not hit during drug run
# TODO: When Map class is built, move to Map class
def resetFronts(self):
for front in self.fronts:
front.resetHit()
# Starts drug run
# TODO: When Map class is built, move to Map class
def startRun(self, type: int, quantity: int):
self.resetFronts()
self.drugRun.run(type, quantity)
# Ends drug run
# TODO: When Map class is built, move to Map class
def endRun(self):
self.resetFronts()
self.drugRun.end()
# Helper function for hitFront
# Helper function to determine output $ of front.basew * demand%
def demandFunction(self, type: int):
multiplier = (1+(1/math.exp(5))) - math.exp(-5*self.drugDemand.getDrugDemand(type))
return multiplier*DRUG_PRICE_DICT[type]
def ownFrontByIndex(self, index: int):
self.fronts[index].own()
# Hits a front and returns dollar amount
# Used for testing
# TODO: Check if front is owned
def hitFrontByIndex(self, index: int):
if self.drugRun.isRunning():
self.fronts[index].hitFront()
quantity = self.drugRun.getQuantity()
type = self.drugRun.getRunType()
base = self.fronts[index].getBase()
self.drugRun.addToHaul(quantity*base*self.demandFunction(type))
|
[
"43392595+aaronpgurrola@users.noreply.github.com"
] |
43392595+aaronpgurrola@users.noreply.github.com
|
9533c38408040b732a09bf57ee7f56e3d9a96ab5
|
0d2af397b900fddad3d532a9f772f70473886cf5
|
/modules/migrations/0001_initial.py
|
e42a5438116521c61bc87b998d83ccc633cf49ae
|
[] |
no_license
|
RobertUJ/Omaha
|
cc779b06e42c08ebadae0b8df4e006ad67d504d1
|
650d5e1e5550bf772f1817e16505c574f361bae0
|
refs/heads/master
| 2016-08-12T13:51:32.262876
| 2016-02-12T00:51:52
| 2016-02-12T00:51:52
| 49,794,851
| 0
| 0
| null | 2016-01-22T00:04:29
| 2016-01-16T23:12:39
|
Python
|
UTF-8
|
Python
| false
| false
| 755
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default=b'', max_length=50)),
('description', models.TextField(default=b'', max_length=200, null=True)),
('justification', models.TextField(default=b'', max_length=200, null=True)),
('analysis', models.TextField(default=b'', max_length=200)),
],
),
]
|
[
"mr24valves@icloud.com"
] |
mr24valves@icloud.com
|
522394227026dd1c5f0d9cd691de802a14a98194
|
78ee56b9e3a5ddfaeead5e281d015763d5acc79e
|
/Funciones/testFuncion.py
|
5098206cdeee89e312fc80804040b9c4883c9c7f
|
[] |
no_license
|
Mariocgo/CursoPython
|
6da6e2934d3eeb27e35866c67f67ae7de12e7a0c
|
4ee5b94aa7c3730f3c2b2101019fba1b9cb89926
|
refs/heads/main
| 2023-03-01T22:22:37.820848
| 2021-02-05T23:32:49
| 2021-02-05T23:32:49
| 333,627,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 78
|
py
|
def saludo(Nombre):
return Nombre
print(f"Mucho gusto "+saludo("Mario"))
|
[
"e17081448@itmerida.edu.mx"
] |
e17081448@itmerida.edu.mx
|
795fd8eb429f1f73738027709d5acde3dc4663ea
|
affba0c51c517ea52a85b81a6fec89c367dbbfa1
|
/script.py
|
f664a5810dac95937a828a12b4443d7e1a58f4de
|
[] |
no_license
|
pyKuga/lab6turma8cgrupo4
|
f0b175b59c77c67ade398039042fff1b49988a30
|
481ae6d009d6484f7d3a987654ce0fd74bdabc97
|
refs/heads/master
| 2022-11-30T12:21:49.529698
| 2020-07-28T22:57:28
| 2020-07-28T22:57:28
| 282,349,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,417
|
py
|
from math import sqrt as rq #importa a raiz quadrada
import pandas as pd #importa a biblioteca pandas para analisar nossos dados
up = pd.read_csv("z1.csv") #importa as medições com o celular para cima
down = pd.read_csv("z2.csv") #importa as medições com o celular para baixo
lat1 = pd.read_csv("x1.csv")
lat2 = pd.read_csv("x2.csv")
verlat1 = pd.read_csv("y1.csv")
verlat2 = pd.read_csv("y2.csv")
#bx
print("medias componentes bx")
print(up["Bx"].mean())
print(down["Bx"].mean())
print(lat1["Bx"].mean())
print(lat2["Bx"].mean())
print(verlat1["Bx"].mean())
print(verlat2["Bx"].mean())
#by
print("medias componentes by")
print(up["By"].mean())
print(down["By"].mean())
print(lat1["By"].mean())
print(lat2["By"].mean())
print(verlat1["By"].mean())
print(verlat2["By"].mean())
#by
print("medias componentes bt")
print(up["BT"].mean())
print(down["BT"].mean())
print(lat1["BT"].mean())
print(lat2["BT"].mean())
print(verlat1["BT"].mean())
print(verlat2["BT"].mean())
#de acordo com o paper do relatorio, temos o seguinte:
vertbx = (abs(up["Bx"].mean())+abs(down["Bx"].mean()))/2
lateralbx = (abs(lat1["Bx"].mean()) + abs(lat2["Bx"].mean()))/2
verlatbx= (abs(verlat1["Bx"].mean())+abs(verlat2["Bx"].mean()))/2
mediabx = (vertbx+verlatbx+lateralbx)/3
vertby = (abs(up["By"].mean())+abs(down["By"].mean()))/2
lateralby = (abs(lat1["By"].mean())+abs(lat2["By"].mean()))/2
verlatby = (abs(verlat1["By"].mean())+abs(verlat2["By"].mean()))/2
mediaby = (vertby+verlatby+lateralby)/3
vertbt = (abs(up["BT"].mean())+abs(down["BT"].mean()))/2
lateralbt = (abs(lat1["BT"].mean())+abs(lat2["BT"].mean()))/2
verlatbt = (abs(verlat1["BT"].mean())+abs(verlat2["BT"].mean()))/2
mediabt = (vertbt+verlatbt+lateralbt)/3
print("******************************")
print(format(vertbx, ".2f"))
print(format(vertby, ".2f"))
print(format(vertbt, ".2f"))
print("******************************")
print(format(lateralbx, ".2f"))
print(format(lateralby, ".2f"))
print(format(lateralbt, ".2f"))
print("******************************")
print(format(verlatbx, ".2f"))
print(format(verlatby, ".2f"))
print(format(verlatbt, ".2f"))
print("******************************")
#e se queremos a componente horizontal media:
horzmd = rq((mediabx**2) + (mediaby**2))
print("a componente horizontal do campo é:", format(horzmd, ".2f"))
print("a componente total do campo é", format(mediabt, ".2f"))
|
[
"noreply@github.com"
] |
pyKuga.noreply@github.com
|
75313085a3420a9b3fa00338b64ddbc0affc1fa8
|
b1b4dc2c530b56a9abbc6b144c3ca3f5a6d11e70
|
/django_/example/chat/apps/chat/serializer.py
|
d288a1d858368b951b9ad30c2792a52237f8b9ea
|
[] |
no_license
|
BorisovDima/_ex
|
d38cff25575bfc6d1906940d40091fb6946cd829
|
6c14d661b8e051f8ec385cb09997da1fa781431a
|
refs/heads/master
| 2020-05-07T18:13:38.304305
| 2019-08-23T10:59:09
| 2019-08-23T10:59:09
| 180,757,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,842
|
py
|
from rest_framework.serializers import ModelSerializer, HyperlinkedModelSerializer, HyperlinkedRelatedField, \
HyperlinkedIdentityField, CharField, IntegerField, DateTimeField, ImageField, SerializerMethodField, BooleanField
from .models import Room, Message
from chat.apps.myauth.serializer import UserSerializer
from .signals import create
from django.conf import settings
from rest_framework.reverse import reverse
from django.conf import settings
callback = settings.REST_CALLBACK
class MessageSerializer(HyperlinkedModelSerializer):
author = UserSerializer(read_only=True)
room = HyperlinkedRelatedField(view_name='room-detail', read_only=True)
view_it = SerializerMethodField()
def get_view_it(self, obj):
return reverse('message-viewed', kwargs={'pk': obj.id})
def create(self, validated_data):
self.instance = super().create(validated_data)
create.send(sender=type(self), instance=self.instance, json=self.data, callback=callback['msg_create'])
return self.instance
class Meta:
model = Message
fields = ('text', 'room', 'author', 'id', 'date_created', 'url', 'view_it')
class RoomSerializer(HyperlinkedModelSerializer):
# image = ImageField(source='get_image')
name = CharField(read_only=True)
last_msg = CharField(read_only=True)
count_msgs = IntegerField(source='get_count_msgs', read_only=True)
count_users = IntegerField(source='get_count_users', read_only=True)
unviewed_messages = SerializerMethodField()
messages = SerializerMethodField()
users = HyperlinkedRelatedField(many=True, view_name='user-detail', read_only=True)
author = UserSerializer(read_only=True)
def create(self, validated_data):
author, user = validated_data['author'], validated_data['user']
# self.instance = Room.objects.get_or_create_dialog(author, user)
# create.send(sender=type(self), instance=self.instance, json=self.data, callback=callback['room_create'])
return Room.objects.get_or_create_dialog(author, user)
def to_representation(self, instance):
ret = super().to_representation(instance)
request = self.context['request']
ret['name'] = instance.get_room_name(request)
ret['last_msg'] = MessageSerializer(instance=instance.get_last_msg(), context={'request': request}).data
return ret
def get_unviewed_messages(self, obj):
return obj.get_unviewed_messages(self.context['request']).count()
def get_messages(self, obj):
return reverse('room-messages', kwargs={'pk': obj.id}, request=self.context['request'])
class Meta:
model = Room
fields = ('author', 'unviewed_messages', 'id', 'url',
'count_msgs', 'count_users', 'name', 'last_msg', 'messages', 'users')
|
[
"you@example.com"
] |
you@example.com
|
7836b6868fd207fe33f88cb1bc6942fd74050a95
|
cb6166192835e0580895f743975d2ae866effa00
|
/DjangoTest/djangosite/app/forms.py
|
29d0064b590dbd51a2c0106ada3ab301b04edee1
|
[] |
no_license
|
luanshiyinyang/PythonWeb
|
d25ccf35a3eec6990295b276d721126bcdffc648
|
ba10163e8d71d23d123dee4946fb672fc6367337
|
refs/heads/master
| 2020-04-23T09:39:15.631088
| 2019-03-09T13:01:59
| 2019-03-09T13:01:59
| 171,075,985
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
from django.forms import ModelForm
from app.models import Moment
class MomentForm(ModelForm):
class Meta:
model = Moment
fields = '__all__'
|
[
"1695735420@qq.com"
] |
1695735420@qq.com
|
9867df62624bdb431c215e3b4ccdebb73711b7b9
|
3e2946515a739cdb5ce452d0ce10b27b23b4b9d3
|
/7kyu. binary to decimal.py
|
feb7f65cc5540afd427391a261473699edd07b91
|
[] |
no_license
|
fairlyoddparents/Python-Tutorials-My-Answers-and-First-Game
|
628b747d67544eb1679ca4fb44b00d2d949d70c6
|
16820b6efb4a01ef00f73186d9ba30e8d292703b
|
refs/heads/master
| 2022-12-14T13:54:16.948153
| 2020-09-19T06:51:01
| 2020-09-19T06:51:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 12:56:07 2018
@author: fairl
"""
def binary_array_to_number(arr):
decimal = 0
for num in arr:
decimal = decimal*2 + int(num)
print(decimal)
print(decimal)
binary_array_to_number([1,1,1,1,0,1,1])
|
[
"noreply@github.com"
] |
fairlyoddparents.noreply@github.com
|
c57ac2112c972756e2ff64e05963c875a6747133
|
5d95fb815afb12aaf096f9752d76f68b2555f235
|
/idls/ex1_idl.py
|
10372485767ce384cb643b9450f6a0ebe9418573
|
[] |
no_license
|
ramonjsa/sd-lista1
|
873b2e2171ead356c44e62d62d515eb2e4d0c834
|
d3b3b3d533274bf1e33db6bc8a7a8f6c2280f538
|
refs/heads/master
| 2020-08-03T00:34:36.444980
| 2019-09-30T02:36:32
| 2019-09-30T02:36:32
| 211,565,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,023
|
py
|
# Python stubs generated by omniidl from ex1.idl
# DO NOT EDIT THIS FILE!
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
def property(*args):
return None
#
# Start of module "Lista01"
#
__name__ = "Lista01"
_0_Lista01 = omniORB.openModule("Lista01", r"ex1.idl")
_0_Lista01__POA = omniORB.openModule("Lista01__POA", r"ex1.idl")
# interface Funcionario
_0_Lista01._d_Funcionario = (omniORB.tcInternal.tv_objref, "IDL:Lista01/Funcionario:1.0", "Funcionario")
omniORB.typeMapping["IDL:Lista01/Funcionario:1.0"] = _0_Lista01._d_Funcionario
_0_Lista01.Funcionario = omniORB.newEmptyClass()
class Funcionario :
_NP_RepositoryId = _0_Lista01._d_Funcionario[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_Lista01.Funcionario = Funcionario
_0_Lista01._tc_Funcionario = omniORB.tcInternal.createTypeCode(_0_Lista01._d_Funcionario)
omniORB.registerType(Funcionario._NP_RepositoryId, _0_Lista01._d_Funcionario, _0_Lista01._tc_Funcionario)
# Funcionario operations and attributes
Funcionario._d_reajuste = (((omniORB.tcInternal.tv_string,0), (omniORB.tcInternal.tv_string,0), omniORB.tcInternal.tv_float), (omniORB.tcInternal.tv_float, ), None)
Funcionario._d_salario_liquido = (((omniORB.tcInternal.tv_string,0), omniORB.tcInternal.tv_char, omniORB.tcInternal.tv_float, omniORB.tcInternal.tv_long), (omniORB.tcInternal.tv_float, ), None)
Funcionario._d_pode_aposentar = ((omniORB.tcInternal.tv_long, omniORB.tcInternal.tv_long), (omniORB.tcInternal.tv_boolean, ), None)
# Funcionario object reference
class _objref_Funcionario (CORBA.Object):
_NP_RepositoryId = Funcionario._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def reajuste(self, *args):
return self._obj.invoke("reajuste", _0_Lista01.Funcionario._d_reajuste, args)
def salario_liquido(self, *args):
return self._obj.invoke("salario_liquido", _0_Lista01.Funcionario._d_salario_liquido, args)
def pode_aposentar(self, *args):
return self._obj.invoke("pode_aposentar", _0_Lista01.Funcionario._d_pode_aposentar, args)
omniORB.registerObjref(Funcionario._NP_RepositoryId, _objref_Funcionario)
_0_Lista01._objref_Funcionario = _objref_Funcionario
del Funcionario, _objref_Funcionario
# Funcionario skeleton
__name__ = "Lista01__POA"
class Funcionario (PortableServer.Servant):
_NP_RepositoryId = _0_Lista01.Funcionario._NP_RepositoryId
_omni_op_d = {"reajuste": _0_Lista01.Funcionario._d_reajuste, "salario_liquido": _0_Lista01.Funcionario._d_salario_liquido, "pode_aposentar": _0_Lista01.Funcionario._d_pode_aposentar}
Funcionario._omni_skeleton = Funcionario
_0_Lista01__POA.Funcionario = Funcionario
omniORB.registerSkeleton(Funcionario._NP_RepositoryId, Funcionario)
del Funcionario
__name__ = "Lista01"
# interface Pessoa
_0_Lista01._d_Pessoa = (omniORB.tcInternal.tv_objref, "IDL:Lista01/Pessoa:1.0", "Pessoa")
omniORB.typeMapping["IDL:Lista01/Pessoa:1.0"] = _0_Lista01._d_Pessoa
_0_Lista01.Pessoa = omniORB.newEmptyClass()
class Pessoa :
_NP_RepositoryId = _0_Lista01._d_Pessoa[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_Lista01.Pessoa = Pessoa
_0_Lista01._tc_Pessoa = omniORB.tcInternal.createTypeCode(_0_Lista01._d_Pessoa)
omniORB.registerType(Pessoa._NP_RepositoryId, _0_Lista01._d_Pessoa, _0_Lista01._tc_Pessoa)
# Pessoa operations and attributes
Pessoa._d_atingiu_maior_idade = (((omniORB.tcInternal.tv_string,0), omniORB.tcInternal.tv_long), (omniORB.tcInternal.tv_boolean, ), None)
Pessoa._d_pesso_ideal = (((omniORB.tcInternal.tv_string,0), omniORB.tcInternal.tv_float), (omniORB.tcInternal.tv_float, ), None)
# Pessoa object reference
class _objref_Pessoa (CORBA.Object):
_NP_RepositoryId = Pessoa._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def atingiu_maior_idade(self, *args):
return self._obj.invoke("atingiu_maior_idade", _0_Lista01.Pessoa._d_atingiu_maior_idade, args)
def pesso_ideal(self, *args):
return self._obj.invoke("pesso_ideal", _0_Lista01.Pessoa._d_pesso_ideal, args)
omniORB.registerObjref(Pessoa._NP_RepositoryId, _objref_Pessoa)
_0_Lista01._objref_Pessoa = _objref_Pessoa
del Pessoa, _objref_Pessoa
# Pessoa skeleton
__name__ = "Lista01__POA"
class Pessoa (PortableServer.Servant):
_NP_RepositoryId = _0_Lista01.Pessoa._NP_RepositoryId
_omni_op_d = {"atingiu_maior_idade": _0_Lista01.Pessoa._d_atingiu_maior_idade, "pesso_ideal": _0_Lista01.Pessoa._d_pesso_ideal}
Pessoa._omni_skeleton = Pessoa
_0_Lista01__POA.Pessoa = Pessoa
omniORB.registerSkeleton(Pessoa._NP_RepositoryId, Pessoa)
del Pessoa
__name__ = "Lista01"
# interface Aluno
_0_Lista01._d_Aluno = (omniORB.tcInternal.tv_objref, "IDL:Lista01/Aluno:1.0", "Aluno")
omniORB.typeMapping["IDL:Lista01/Aluno:1.0"] = _0_Lista01._d_Aluno
_0_Lista01.Aluno = omniORB.newEmptyClass()
class Aluno :
_NP_RepositoryId = _0_Lista01._d_Aluno[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_Lista01.Aluno = Aluno
_0_Lista01._tc_Aluno = omniORB.tcInternal.createTypeCode(_0_Lista01._d_Aluno)
omniORB.registerType(Aluno._NP_RepositoryId, _0_Lista01._d_Aluno, _0_Lista01._tc_Aluno)
# Aluno operations and attributes
Aluno._d_media_n1_n2 = ((omniORB.tcInternal.tv_float, omniORB.tcInternal.tv_float), (omniORB.tcInternal.tv_boolean, ), None)
Aluno._d_media_n_n3 = ((omniORB.tcInternal.tv_float, omniORB.tcInternal.tv_float), (omniORB.tcInternal.tv_boolean, ), None)
# Aluno object reference
class _objref_Aluno (CORBA.Object):
_NP_RepositoryId = Aluno._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def media_n1_n2(self, *args):
return self._obj.invoke("media_n1_n2", _0_Lista01.Aluno._d_media_n1_n2, args)
def media_n_n3(self, *args):
return self._obj.invoke("media_n_n3", _0_Lista01.Aluno._d_media_n_n3, args)
omniORB.registerObjref(Aluno._NP_RepositoryId, _objref_Aluno)
_0_Lista01._objref_Aluno = _objref_Aluno
del Aluno, _objref_Aluno
# Aluno skeleton
__name__ = "Lista01__POA"
class Aluno (PortableServer.Servant):
_NP_RepositoryId = _0_Lista01.Aluno._NP_RepositoryId
_omni_op_d = {"media_n1_n2": _0_Lista01.Aluno._d_media_n1_n2, "media_n_n3": _0_Lista01.Aluno._d_media_n_n3}
Aluno._omni_skeleton = Aluno
_0_Lista01__POA.Aluno = Aluno
omniORB.registerSkeleton(Aluno._NP_RepositoryId, Aluno)
del Aluno
__name__ = "Lista01"
# interface Nadador
_0_Lista01._d_Nadador = (omniORB.tcInternal.tv_objref, "IDL:Lista01/Nadador:1.0", "Nadador")
omniORB.typeMapping["IDL:Lista01/Nadador:1.0"] = _0_Lista01._d_Nadador
_0_Lista01.Nadador = omniORB.newEmptyClass()
class Nadador :
_NP_RepositoryId = _0_Lista01._d_Nadador[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_Lista01.Nadador = Nadador
_0_Lista01._tc_Nadador = omniORB.tcInternal.createTypeCode(_0_Lista01._d_Nadador)
omniORB.registerType(Nadador._NP_RepositoryId, _0_Lista01._d_Nadador, _0_Lista01._tc_Nadador)
# Nadador operations and attributes
Nadador._d_categoria = ((omniORB.tcInternal.tv_long, ), ((omniORB.tcInternal.tv_string,0), ), None)
# Nadador object reference
class _objref_Nadador (CORBA.Object):
_NP_RepositoryId = Nadador._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def categoria(self, *args):
return self._obj.invoke("categoria", _0_Lista01.Nadador._d_categoria, args)
omniORB.registerObjref(Nadador._NP_RepositoryId, _objref_Nadador)
_0_Lista01._objref_Nadador = _objref_Nadador
del Nadador, _objref_Nadador
# Nadador skeleton
__name__ = "Lista01__POA"
class Nadador (PortableServer.Servant):
_NP_RepositoryId = _0_Lista01.Nadador._NP_RepositoryId
_omni_op_d = {"categoria": _0_Lista01.Nadador._d_categoria}
Nadador._omni_skeleton = Nadador
_0_Lista01__POA.Nadador = Nadador
omniORB.registerSkeleton(Nadador._NP_RepositoryId, Nadador)
del Nadador
__name__ = "Lista01"
# interface Cliente
_0_Lista01._d_Cliente = (omniORB.tcInternal.tv_objref, "IDL:Lista01/Cliente:1.0", "Cliente")
omniORB.typeMapping["IDL:Lista01/Cliente:1.0"] = _0_Lista01._d_Cliente
_0_Lista01.Cliente = omniORB.newEmptyClass()
class Cliente :
_NP_RepositoryId = _0_Lista01._d_Cliente[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_Lista01.Cliente = Cliente
_0_Lista01._tc_Cliente = omniORB.tcInternal.createTypeCode(_0_Lista01._d_Cliente)
omniORB.registerType(Cliente._NP_RepositoryId, _0_Lista01._d_Cliente, _0_Lista01._tc_Cliente)
# Cliente operations and attributes
Cliente._d_credito = ((omniORB.tcInternal.tv_float, ), (omniORB.tcInternal.tv_float, ), None)
# Cliente object reference
class _objref_Cliente (CORBA.Object):
_NP_RepositoryId = Cliente._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def credito(self, *args):
return self._obj.invoke("credito", _0_Lista01.Cliente._d_credito, args)
omniORB.registerObjref(Cliente._NP_RepositoryId, _objref_Cliente)
_0_Lista01._objref_Cliente = _objref_Cliente
del Cliente, _objref_Cliente
# Cliente skeleton
__name__ = "Lista01__POA"
class Cliente (PortableServer.Servant):
_NP_RepositoryId = _0_Lista01.Cliente._NP_RepositoryId
_omni_op_d = {"credito": _0_Lista01.Cliente._d_credito}
Cliente._omni_skeleton = Cliente
_0_Lista01__POA.Cliente = Cliente
omniORB.registerSkeleton(Cliente._NP_RepositoryId, Cliente)
del Cliente
__name__ = "Lista01"
#
# End of module "Lista01"
#
__name__ = "ex1_idl"
_exported_modules = ( "Lista01", )
# The end.
|
[
"ramon.jsa@gmail.com"
] |
ramon.jsa@gmail.com
|
2f31bf8d1a5732d5f37e014050e8f02ea9baa77a
|
bfc00e523249cebab3303cb08e5b8bbe7a780f35
|
/icub-nightly/tools/icub-csv-angle-compare.py
|
e894853078f956e0e1ca03b048d39fb199efc668
|
[] |
no_license
|
jon-weisz/fsmt.experiments
|
14d02b5931907ce074a52858eef27afaab3f96cd
|
08bf741b625853717b8e72d14f8f550b19b8d045
|
refs/heads/master
| 2021-01-02T08:19:29.936254
| 2015-09-10T18:22:25
| 2015-09-10T18:22:25
| 41,696,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 964
|
py
|
__author__ = 'flier'
import os
import sys
import csv
import time
logpath = os.environ['FSMLOG']
logfile = "/right_arm.dump"
raw_file = logpath + logfile
csv_repr = "/right_arm.csv"
csv_file = logpath + csv_repr
# Create an actual CSV File
with open(raw_file) as infile, open(csv_file, 'w') as outfile:
outfile.write(infile.read().replace(" ", ", "))
# Accessible representation
joint_2 = []
with open(csv_file) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
joint_2.append(float(row[1]))
last = len(joint_2)
first_value = joint_2[0]
last_value = joint_2[last - 1]
time.sleep(2)
if first_value < 25.0 or first_value > 35.0:
print "First joint value is off by 5 degree: %s" % str(first_value)
sys.exit(1)
if last_value < 25.0 or last_value > 35.0:
print "Last joint value is off by 5 degree: %s" % str(first_value)
sys.exit(1)
print "First and last joint values are correct"
while True:
time.sleep(2)
|
[
"flier@techfak.uni-bielefeld.de"
] |
flier@techfak.uni-bielefeld.de
|
f67257c7ffde4b5aa13e4e6e961f2a3a9daf65c8
|
18c2ede12808a0c17a1eef3540cf54ad8f8c1420
|
/com/hhcf/fun/base-0.py
|
240d47ad5a7fae24fe50575aade3588da6f6410b
|
[] |
no_license
|
shulanztf/python-cluster
|
d9e670f151887aeb4357a820473d03939e749ce1
|
f680d6e6bf55f7aac1cf0532dcc11e5fce593d5e
|
refs/heads/master
| 2021-06-25T10:47:28.810944
| 2020-09-27T10:10:20
| 2020-09-27T10:10:20
| 99,528,331
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
#! /usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@version: 1.0
@author: zhaotf
@file: base-0.py
@time: 2017/8/9 0009 15:29
"""
# # python 引用/对象
# L1 = [1, 2, 3]
# L2 = L1
# print(L1)
# print(L2)
# print(L1 == L2)
# L2[0] = 5
# print(L1)
# print(L2)
# print(L1[0])
# print(L1 == L2)
# L1 = [88,99]
# print(L1 == L2)
# 查看命名空间
dir()
def func():
pass
class Main():
def __init__(self):
pass
if __name__ == '__main__':
pass
|
[
"1426763507@qq.com"
] |
1426763507@qq.com
|
3fd03a10da1ee86b313c8f9fa695fa3f5b8704e9
|
022f659eb5703191c43ad07040a1b388686f1f3a
|
/venv/bin/wheel
|
bd6088288d6478420600dd88d28a538e816b5d34
|
[] |
no_license
|
MusawerAli/scrappy
|
b296286fc1b60501fc9e218fcac6e51ac5227c7d
|
16b0e293bf8313c32af8ce42bda8f19e58367ff5
|
refs/heads/master
| 2022-11-16T15:19:16.854679
| 2020-06-28T11:41:23
| 2020-06-28T11:41:23
| 274,632,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
#!/home/musawer/PycharmProjects/tutorial/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"pakjalihouse@gmail.com"
] |
pakjalihouse@gmail.com
|
|
52b26c5e1c3a48eb6128bda512ede5108d3c6150
|
612da563d2c01bb9824a45737b8109a30df1d631
|
/yielding_a_not_enumerated_property/evaluator.py
|
386240f07956225fc7bfb1ea192bf22b1bdab27d
|
[] |
no_license
|
romeorizzi/turingarena-numerability-theory
|
e97680e47787782a114a72ef23ae10e1b643efef
|
69764663d8b193873268e92190dbc49c84e79fa6
|
refs/heads/master
| 2020-05-01T06:08:37.141662
| 2019-04-16T09:09:00
| 2019-04-16T09:09:00
| 177,322,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,801
|
py
|
import random
import turingarena as ta
NUM_MACHINES = 100
for tc in range(7):
memoM = []
for i in range(0,NUM_MACHINES):
memoM.append([random.randint(0, 1) for _ in range(0,NUM_MACHINES)])
for n in range(0,i+1):
memoM[i][n] = 1 - memoM[n][n]
if tc % 3:
random.shuffle(memoM)
if tc % 2:
perm = list(range(0,NUM_MACHINES))
random.shuffle(perm)
for i in range(0,NUM_MACHINES):
bits_to_be_permuted = memoM[i][:]
for n in range(0,NUM_MACHINES):
memoM[i][n] = bits_to_be_permuted[perm[n]]
memoP = []
for n in range(0,NUM_MACHINES):
with ta.run_algorithm(ta.submission.source) as process:
def M(i, n):
return memoM[i][n]
try:
memoP.append(process.functions.P(n, callbacks = [M]))
except ta.AlgorithmError as e:
ta.goals["correct"] = False
print(e)
print(f"Your P({n}) = {memoP[n]}")
P_is_new = True
first_n_diff = None
for i in range(0,NUM_MACHINES):
P_and_Pi_differ = False
for j in range(0,i+1):
if memoM[i][j] != memoP[j]:
P_and_Pi_differ = True
first_n_diff = j
break
if P_and_Pi_differ:
print(f"The first diff with machine P_{i} is on the natural {first_n_diff}. Here, P({first_n_diff}) = {memoP[first_n_diff]} whereas P({i}) = {memoM[i][first_n_diff]}")
else:
ta.goals.setdefault("correct", False)
print(f"NO: your property P coincides with our property P_{i} for all n <= {i}. Indeed, P[0..{i}] = {memoP[:i+1]} and P_{i}[0..{i}] = {memoM[i][:i+1]}")
ta.goals.setdefault("correct", True)
print(ta.goals)
|
[
"romeo.rizzi@univr.it"
] |
romeo.rizzi@univr.it
|
090cc6c260c1c4101656b919e078907d482c83f1
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_74/1231.py
|
6e8b5095b7fc81541810f871636b8b16ccb1216d
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
fin = open('input.txt', 'r')
fout = open('output.txt', 'w')
n = int(fin.readline())
for i in range(n):
st = fin.readline()
t = st.split()
t_t = 0
robots = {'O':[1,0,0], 'B':[1,0,0]}
prev = t[1]
for j in range(int(t[0])):
robot = t[2*j+1]
loc = int(t[2*j+2])
robots[robot][1] = 0
if robot != prev: robots[robot][2]=0
temp_t = abs(loc - robots[robot][0])
if (robot != prev):
if ((temp_t - robots[prev][2])>0):
temp_t -= robots[prev][2]
else:
temp_t = 0
temp_t += 1
t_t += temp_t
robots[robot][0] = loc
robots[robot][1] = temp_t
robots[robot][2] += temp_t
prev = robot
fout.write('Case #'+str(i+1)+': '+str(t_t)+'\n')
fin.close()
fout.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
a642387dc2f788a485abf40e92d06f5d8ccdd576
|
25040cc2a98c5e4b70e50acea619e480cd10037a
|
/Util.py
|
05e0a9b6e67fcef5c426b85fe386746e54793736
|
[] |
no_license
|
emanuelbaquero/modelo_properatti
|
a85adcc472ec0b2070a9ddf7095315217443d4b7
|
8a568e53a32654570f0d773214a92b0eb9b6320b
|
refs/heads/master
| 2022-06-16T22:37:54.272166
| 2020-05-06T16:31:16
| 2020-05-06T16:31:16
| 257,413,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40,883
|
py
|
import numpy as np
import pandas as pd
import unidecode as uni
from sklearn.cross_validation import cross_val_score
from sklearn import metrics
from sklearn import linear_model
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import statsmodels.api as sm
pd.set_option('chained_assignment',None)
def eliminar_features(p_modeloMatriz):
modeloMatriz = p_modeloMatriz
xs = modeloMatriz.iloc[:,1:]
y = modeloMatriz.iloc[:,0]
df = pd.DataFrame(modeloMatriz.precio_m2)
for i in xs.columns:
#DEJO 30 VARIABLES PARA QUE EXPLIQUEN EL MODELO
if abs(y.corr(xs[i]))>0.0719:
df = pd.concat([df, pd.DataFrame(xs[i])],axis=1)
return df
def eliminar_features_lasso(p_modeloMatriz):
modeloMatriz_2 = p_modeloMatriz
cantidad_indices = 0
for i in range (1, 10):
modelo_lasso = modelo_lasso_cross_validation(modeloMatriz_2)
df1 =pd.DataFrame({'coef':modelo_lasso.coef_})
df2 = pd.DataFrame(modeloMatriz_2.columns)
df2 = df2[1:]
df2 = df2.reset_index(drop=True)
df0 =pd.concat([df1,df2],axis=1)
indices = []
for i in df0.index:
if abs(df0.coef[i])!=0.000000:
indices.append(df0.index[i])
print('len indices; ',len(indices))
print('tamaño df0: ', df0.shape[0])
if len(indices) == df0.shape[0]:
print('break')
break
columnas = pd.DataFrame(df0.iloc[indices])[0]
modeloMatriz_2 = modeloMatriz_2[columnas]
modeloMatriz_2 = pd.concat([p_modeloMatriz.precio_m2, modeloMatriz_2],axis=1)
return modeloMatriz_2
def summary(p_modeloMatriz):
modeloMatriz = p_modeloMatriz
xs = modeloMatriz.iloc[:,1:]
y = modeloMatriz.iloc[:,0]
x_train, x_test, y_train, y_test = train_test_split(xs, y, test_size=0.6)
model = sm.OLS(y_train, x_train).fit()
predictions = model.predict(x_test)
print ("EMC:", metrics.mean_squared_error(y_test, predictions))
print(model.summary())
return model
def modelo_lasso_cross_validation(p_modeloMatriz):
modeloMatriz = p_modeloMatriz
xs = modeloMatriz.iloc[:,1:]
y = modeloMatriz.iloc[:,0]
xs = xs.as_matrix()
y = y.as_matrix()
lassocv = linear_model.LassoCV(alphas=np.linspace(0.01,100, 1000), cv=5, normalize=True)
x_train, x_test, y_train, y_test = train_test_split(xs, y, test_size=0.4)
lassocv.fit(x_train, y_train)
alpha_lasso = lassocv.alpha_
lasso = linear_model.Lasso(alpha=alpha_lasso, normalize=True)
x_train, x_test, y_train, y_test = train_test_split(xs, y, test_size=0.4)
lasso_model =lasso.fit(x_train, y_train)
scores = cross_val_score(lasso_model, x_train, y_train, cv=5)
y_predict = lasso_model.predict(x_test)
plt.scatter(y_test, y_predict, color='blue')
#plt.scatter(x_test[:,0], y_predict, color='red')
print('LASSO REGRESSION')
print('CROSS VALIDATION:', scores[0], scores[1], scores[2], scores[3],scores[4])
print ('MAE LASSO:', metrics.mean_absolute_error(y_test, y_predict))
print ('MSE LASSO:', metrics.mean_squared_error(y_test, y_predict))
print ('RMSE LASSO:', np.sqrt(metrics.mean_squared_error(y_test, y_predict)))
print ("LASSO -> R2 TRAIN: ", lasso_model.score(x_train, y_train))
print ("LASSO -> R2 TEST: ", lasso_model.score(x_test, y_test))
return lasso_model
def modelo_ridge_cross_validation(p_modeloMatriz):
modeloMatriz = p_modeloMatriz
xs = modeloMatriz.iloc[:,1:]
y = modeloMatriz.iloc[:,0]
xs = xs.as_matrix()
y = y.as_matrix()
rlmcv = linear_model.RidgeCV(alphas=np.linspace(0.01,100, 1000), cv=5, normalize=True)
x_train, x_test, y_train, y_test = train_test_split(xs, y, test_size=0.4)
rlmcv.fit(x_train, y_train)
predictions = rlmcv.predict(x_test)
alpha_ridge = rlmcv.alpha_
rlm = linear_model.Ridge(alpha=alpha_ridge, normalize=True)
x_train, x_test, y_train, y_test = train_test_split(xs, y, test_size=0.4)
ridge_model = rlm.fit(x_train, y_train)
scores = cross_val_score(ridge_model, x_train, y_train, cv=5)
y_predict = ridge_model.predict(x_test)
plt.scatter(y_test, y_predict, color='blue')
#plt.scatter(x_test[:,0], y_predict, color='red')
print('REGULARIZACION CON RIDGE')
print('CROSS VALIDATION:', scores[0], scores[1], scores[2], scores[3],scores[4])
print ('MAE RIDGE:', metrics.mean_absolute_error(y_test, y_predict))
print ('MSE RIDGE:', metrics.mean_squared_error(y_test, y_predict))
print ('RMSE RIDGE:', np.sqrt(metrics.mean_squared_error(y_test, y_predict)))
print ("RIDGE -> R2 TRAIN: ", ridge_model.score(x_train, y_train))
print ("RIDGE -> R2 TEST: ", ridge_model.score(x_test, y_test))
return ridge_model
def modelo_regresion_lineal_normalizar_antes(p_modeloMatriz):
modeloMatriz = p_modeloMatriz
xs = modeloMatriz.iloc[:,1:]
y = modeloMatriz.iloc[:,0]
#ESTANDARIZAR
stdscaler = StandardScaler()
#NORMALIZO VARIABLES DE ENTRENAMIENTO
for i in xs.columns:
xs[i] = stdscaler.fit_transform(xs[[i]])
y = stdscaler.fit_transform(pd.DataFrame(y))
x_train, x_test, y_train, y_test = train_test_split(xs, y, test_size=0.4)
#NORMALIZO VARIABLES DE TESTING
#for i in x_test.columns:
# x_test[i] = stdscaler.fit_transform(x_test[[i]])
#y_test = stdscaler.fit_transform(pd.DataFrame(y_test))
#FIT
modelo = linear_model.LinearRegression(fit_intercept=False,normalize=False)
modelo.fit(x_train,y_train)
#CROSS VALIDATION
scores = cross_val_score(modelo, x_train, y_train, cv=5)
#PREDECIR DATOS "Y" DE "X" TEST
y_predict = modelo.predict(x_test)
#GENERO EJE X -> SUPERFICIE TOTAL
x1 = x_test.superficie_total
#GENERO EJE Y -> PRECIO M2 DE TEST
x2 = y_test
# EJE Y -> PRECIO M2 PREDICHO
x3 = y_predict
#PLOT
plt.scatter(x1,x2,label='test modelo', color='blue')
#plt.scatter(x1,x3,label='prediccion modelo', color='red')
#plt.scatter(x2,x3,label='prediccion modelo_2', color='yellow')
plt.title('grafico modelo')
plt.show()
print('CROSS VALIDATION:', scores[0], scores[1], scores[2], scores[3],scores[4])
print ('MAE:', metrics.mean_absolute_error(y_test, y_predict))
print ('MSE:', metrics.mean_squared_error(y_test, y_predict))
print ('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_predict)))
print('EL R2 TRAIN ES DE: ', modelo.score(x_train,y_train))
print('EL R2 TEST ES DE: ', modelo.score(x_test,y_test))
return modelo
def modelo_regresion_lineal(p_modeloMatriz):
modeloMatriz = p_modeloMatriz
xs = modeloMatriz.iloc[:,1:]
y = modeloMatriz.iloc[:,0]
#TRANSFORMO VARIABLES INDEPENDIENTES EN FORMATO MATRIZ
xs = xs.as_matrix()
#TRANSFORMO VARIABLE DEPENDIENTE EN FORMATO MATRIZ
y = y.as_matrix()
#PARTICIONAR DATOS DE ENTRENAMIENTO Y TESTING
x_train, x_test, y_train, y_test = train_test_split(xs, y, test_size=0.6)
#FIT
modelo = linear_model.LinearRegression(fit_intercept=False,normalize=True, n_jobs=1)
modelo.fit(x_train,y_train)
#CROSS VALIDATION
scores = cross_val_score(modelo, x_train, y_train, cv=5)
#PREDECIR DATOS "Y" DE "X" TEST
y_predict = modelo.predict(x_test)
#PENDIENTES
pendientes = modelo.coef_
#ORDENADA
ordenada = modelo.intercept_
#GENERO EJE X -> SUPERFICIE TOTAL
x1 = x_test[:,0]
#GENERO EJE Y -> PRECIO M2 DE TEST
x2 = y_test
# EJE Y -> PRECIO M2 PREDICHO
x3 = y_predict
#PLOT
plt.scatter(x2,x3,label='test modelo', color='blue')
#plt.scatter(x1,x3,label='prediccion modelo', color='red')
#plt.scatter(x2,x3,label='prediccion modelo_2', color='yellow')
plt.title('grafico modelo')
plt.show()
print('CROSS VALIDATION:', scores[0], scores[1], scores[2], scores[3],scores[4])
print ('MAE:', metrics.mean_absolute_error(y_test, y_predict))
print ('MSE:', metrics.mean_squared_error(y_test, y_predict))
print ('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_predict)))
print('EL R2 TRAIN ES DE: ', modelo.score(x_train,y_train))
print('EL R2 TEST ES DE: ', modelo.score(x_test,y_test))
return modelo
def limpiarDatos(p_data, alpha=1):
data=p_data
#NULL LAS FILAS REPETIDAS DEL CAMPO DESCRIPCION
data = data.drop_duplicates(subset=['description'], keep='first')
#QUITO LOS STORE
data = data[~(data.property_type.str.contains('store'))]
#NULL LAS SUPERFICIES CUBIERTAS MAYORES A LAS TOTALES
data.surface_covered_in_m2 = data.surface_covered_in_m2[(data.surface_covered_in_m2>data.surface_total_in_m2)]
#NULL LAS FILAS CON SUPERFICIE CUBIERTA MENOR A 16
data.surface_covered_in_m2[(data.surface_covered_in_m2<16)&(data.property_type.str.contains('apartment'))] = np.nan
#NULL LAS FILAS CON SUPERFICIE TOTAL MENOR A 16
data.surface_total_in_m2[(data.surface_total_in_m2<16)&(data.property_type.str.contains('apartment'))] = np.nan
#NULL LAS FILAS CON SUPERFICIES CUBIERTAS MENOR 50 DE CASAS
#data = data[(~((data.surface_covered_in_m2<50)&(data.property_type.str.contains('house'))))]
data.surface_covered_in_m2[(data.surface_covered_in_m2<50)&(data.property_type.str.contains('house'))] = np.nan
#NULL LAS FILAS CON SUPERFICIES TOTALES MENOR A 50 DE CASAS
data.surface_total_in_m2[(data.surface_total_in_m2<50)&(data.property_type.str.contains('house'))] = np.nan
##NULL LAS FILAS CON SUPERFICIES CUBIERTAS MENOR A 30 DE PH
data.surface_covered_in_m2[(data.surface_covered_in_m2<50)&(data.property_type.str.contains('PH'))] = np.nan
##NULL LAS FILAS CON SUPERFICIES TOTALES MENOR A 30 DE PH
data.surface_total_in_m2[(data.surface_total_in_m2<50)&(data.property_type.str.contains('PH'))] = np.nan
#NULL LAS FILAS CON SUPERFICIES TOTALES MAYORES A 500 DE DTO
data.surface_total_in_m2[(data.surface_total_in_m2>1000)&(data.property_type.str.contains('apartment'))] = np.nan
#NULL LAS FILAS CON SUPERFICIES CUBIERTAS MAYORES A 500 DE DTO
data.surface_covered_in_m2[(data.surface_covered_in_m2>1000)&(data.property_type.str.contains('apartment'))] = np.nan
#NULL LAS FILAS CON SUPERFICIES CUBIERTAS MAYORES A 500 DE CASAS
data.surface_covered_in_m2[(data.surface_covered_in_m2>30000)&(data.property_type.str.contains('house'))] = np.nan
#NULL LAS FILAS CON SUPERFICIES TOTALES MAYORES A 500 DE CASAS
data.surface_total_in_m2[(data.surface_total_in_m2>30000)&(data.property_type.str.contains('house'))] = np.nan
#NULL LAS FILAS CON SUPERFICIES CUBIERTAS MAYORES DE PH
data.surface_covered_in_m2[(data.surface_covered_in_m2>1200)&(data.property_type.str.contains('PH'))] = np.nan
#NULL LAS FILAS CON SUPERFICIES TOTAL MAYORES DE PH
data.surface_total_in_m2[(data.surface_total_in_m2>1200)&(data.property_type.str.contains('PH'))] = np.nan
#NULL lAS FILAS CON SUPERFICIES TOTALES MENORES A
data.surface_total_in_m2[data.surface_total_in_m2<16] = np.nan
#QUITAS LAS FILAS CON SUPERFICIES TOTALES MAYORES A
data = data[(data.surface_total_in_m2<10000)|(data.surface_total_in_m2.isnull())]
#NULL CUBIERTAS MENORES A
data.surface_covered_in_m2[data.surface_covered_in_m2<16] = np.nan
#QUITO LAS FILAS CON SUPERFICIES CUBIERTA MAYORES A
data = data[(data.surface_covered_in_m2<10000)|(data.surface_covered_in_m2.isnull())]
#NULL FILAS CON PRECIOS MENORES A $500
data.price_aprox_usd[data.price_aprox_usd<500] = np.nan
data.price_aprox_usd[data.price_aprox_usd>2000000] = np.nan
#QUITAMOS LOS PRECIOS M2
data = data[~((data.price_usd_per_m2>6000)|(data.price_usd_per_m2<500))]
def generar_m2(p_data):
data = p_data
data['nuevos_precios_m2'] = data.price_aprox_usd/data.surface_total_in_m2
data.price_aprox_usd[data.nuevos_precios_m2>6000] = np.nan
data.price_aprox_usd[data.nuevos_precios_m2<500] = np.nan
return data.price_aprox_usd
data.price_aprox_usd = generar_m2(data)
# PONGO NULOS LOS OUTLIERS CON ->> Z-SCORE = alpha
data = OutliersSupTotal(data, alpha)
data = OutliersSupCubierta(data, alpha)
data = OutliersPrecioUSD(data, alpha)
data = OutliersPrecioM2(data, alpha)
data = quitarMayusculasAcentos(data)
data['ambientes'] = generoAmbientes(data)
#data = utl.TransformacionData(data)
data['superficieJardines'] = generarSupJardines(data)
data['superficieTerraza'] = generarSupTerrazas(data)
data['superficieJarTer'] = generarSupJarTer(data)
#IMPUTAR POR LA MEDIA ESTIMADA POR LOCALIDAD, BARRIO, PROPIEDAD, ETC
data.imputar_ambientes = imputarAmbientes(data)
data.surface_covered_in_m2 = ImputarSupCubierta(data)
data.imputar_ambientes[data.imputar_ambientes==0]=np.nan
data.surface_total_in_m2 = ImputarSupTotal(data)
data.surface_total_in_m2 = ImputarTotalMenorCubierta(data)
data.price_aprox_usd = imputarPrecio(data)
#data.price_usd_per_m2 = imputarPrecioM2(data)
data = OutliersSupTotal(data, alpha+2)
data = OutliersSupCubierta(data, alpha+2)
data = OutliersPrecioUSD(data, alpha+2)
data = OutliersPrecioM2(data, alpha+3)
return data
def nuevosDatos (p_modeloMatriz, superficie_total, jardin, terraza, ambientes, tipo, barrio):
modeloMatriz = p_modeloMatriz
##SUPERFICIE TOTAL
df0 = pd.DataFrame({'superficie_total':pd.Series(superficie_total)})
##BARRIOS
barrios = pd.Series(modeloMatriz.iloc[:,7:].columns)
barrios = (barrios.str.replace('_',' '))
df1 = barrios.apply(lambda x: 1 if x==barrio else 0)
df2 = pd.DataFrame(columns=barrios)
df2 = df2.append({ 'flores' : 0 } , ignore_index=True)
df2 = df2.fillna(0).astype(int)
df2.iloc[:,barrios[barrios.str.contains(barrio+'$',regex=True)].index] = '1'
##AMBIENTES Y TIPOS
if jardin=='1':
var_jardin = 'jardin'
else:
var_jardin = ''
if terraza=='1':
var_terraza = 'terraza'
else:
var_terraza = ''
if (jardin == '1') & (terraza == '1'):
var_jardinTerraza = 'jardinTerraza'
else:
var_jardinTerraza = ''
df4 = pd.DataFrame({'jardin':pd.Series(0),'jardinTerraza':pd.Series(0),'CASA':pd.Series(0),'PH':pd.Series(0),'DTO':pd.Series(0)})
indices = df4.columns
indices = pd.Series(indices).astype(str)
indices_bool = (indices.apply(lambda x: x=='CASA')) | (indices.apply(lambda x: x==var_jardin)) | (indices.apply(lambda x: x==var_terraza)) | (indices.apply(lambda x: x==var_jardinTerraza))
serie_df4 = indices_bool.apply(lambda x : 1 if x else 0)
df4_proc = pd.DataFrame({
'jardin':pd.Series(serie_df4[0]),
'jardinTerraza':pd.Series(serie_df4[1]),
'CASA':pd.Series(serie_df4[2]),
'PH':pd.Series(serie_df4[3]),
'DTO':pd.Series(serie_df4[4])
})
predecir_data = pd.concat([df0,df4_proc],axis=1)
predecir_data = pd.concat([predecir_data, df2],axis=1)
#predecir_data.superficie_total_2 = predecir_data.superficie_total**2
return predecir_data
def generarDummies(p_matriz):
matriz = p_matriz
# TRANSFORMO A FLOAT PARA QUE PUEDA COMPARAR EL PROXIMO PROCESO
matriz.ambientes = matriz.ambientes.astype(float)
#GENERO DUMMYS DE AMBIENTES
matriz['1_AMBIENTE'] = (matriz.ambientes>=1)&(matriz.ambientes<2)
matriz['2_AMBIENTE'] = (matriz.ambientes>=2)&(matriz.ambientes<3)
matriz['3_AMBIENTE'] = (matriz.ambientes>=3)&(matriz.ambientes<4)
matriz['4_AMBIENTE'] = (matriz.ambientes>=4)&(matriz.ambientes<5)
matriz['5_AMBIENTE'] = (matriz.ambientes>=5)&(matriz.ambientes<6)
matriz['6_AMBIENTE'] = (matriz.ambientes>=6)&(matriz.ambientes<7)
matriz['7_AMBIENTE'] = (matriz.ambientes>=7)&(matriz.ambientes<8)
matriz[['1_AMBIENTE','2_AMBIENTE','3_AMBIENTE','4_AMBIENTE', '5_AMBIENTE','6_AMBIENTE','7_AMBIENTE']] = matriz[['1_AMBIENTE','2_AMBIENTE','3_AMBIENTE','4_AMBIENTE', '5_AMBIENTE','6_AMBIENTE','7_AMBIENTE']].applymap(lambda x : 1 if (x) else 0)
#GENERO DUMMYS TIPO DE PROPIEDAD
matriz['CASA'] = matriz.propiedad.str.contains('house')
matriz['PH'] = matriz.propiedad.str.contains('PH')
matriz['DTO'] = matriz.propiedad.str.contains('apartment')
matriz[['CASA','PH','DTO']] = matriz[['CASA','PH','DTO']].applymap(lambda x : 1 if x else 0)
#ELIMINO REGISTROS NULOS DE VARIABLES A UTILIZAR EN EL MODELO
matriz=matriz[matriz.precio_m2.notnull()]
matriz=matriz[matriz.superficie_total.notnull()]
matriz=matriz[matriz.ambientes.notnull()]
#GENERO DUMMYS DE BARRIOS
#QUITO NULOS DE LA COLUMNA STATE_NAME
matriz = matriz[matriz.barrio.notnull()]
#CREO LISTA DE BARRIOS
barrios = matriz[matriz.localidad.str.contains('capital')].barrio.unique()
#GENERO DUMMYS
for barrio in barrios:
indices_barrios = (matriz.index[matriz.barrio.str.contains(barrio)])
barrio = barrio.lower().replace(' ','_')
df = matriz
df.barrio = df.barrio.apply(lambda x : x.lower().replace(' ','_'))
df[barrio] = df.barrio.str.contains(barrio)
numero_barrios = len(matriz.barrio[matriz.localidad.str.contains('capital')].unique())
indices_dummys_barrios = matriz.shape[1]-numero_barrios
#CREO EL DATAFRAME CON LAS DUMMYS DE BARRIOS
dummys_barrios = matriz.iloc[:,indices_dummys_barrios:]
dummys_barrios = dummys_barrios.applymap(lambda x : 1 if (x) else 0)
#GENERO DUMMYS DE BARRIOS EN EL DATAFRAME
matriz.iloc[:,indices_dummys_barrios:] = dummys_barrios
matriz = matriz.loc[matriz.localidad.str.contains('capital')]
#SKLEARN
nuevos_feactures = matriz[['superficieJardines','superficieTerrazas','superficieJardinesTerrazas']].applymap(lambda x: 1 if x>0 else 0) #GENERO VARIABLES INDEPENDIENTES
#nuevos_feactures_2 = pd.DataFrame(matriz.superficieJardines + matriz.superficieTerrazas + matriz.superficieJardinesTerrazas)
x_feactures=matriz.iloc[:,16:]
df1 = pd.concat([matriz['superficie_total'],nuevos_feactures],axis=1)
xs = pd.concat([df1,x_feactures],axis=1)
#GENERO VARIABLE DEPENDIENTE
y = matriz.precio_m2
matriz = pd.concat([y,xs],axis=1)
#matriz['superficie_total_2'] = matriz.superficie_total**2
#matriz['superficie_total_3'] = (matriz.superficie_total**2)**2
return matriz
def GenerarMatriz(p_data):
data = p_data
#GENERAR MATRIZ
matriz = pd.DataFrame({ 'id':data['Unnamed: 0'],
'tipo':data['operation'],
'propiedad':data.property_type,
'id_localizacion':data.geonames_id,
'pais':data.country_name.astype(str).apply(uni.unidecode).str.lower(),
'localidad':data.state_name.astype(str).apply(uni.unidecode).str.lower(),
'barrio':data.place_name.astype(str).apply(uni.unidecode).str.lower(),
'moneda':data.currency.str.lower(),
'ambientes':data.imputar_ambientes,
'superficie_total':data.surface_total_in_m2,
'superficie_cubierta_m2':data.surface_covered_in_m2,
'precio_aprox_usd':data.price_aprox_usd,
'precio_m2':data.price_usd_per_m2,
'superficieJardines':data.superficieJardines.apply(lambda x: 1 if x else 0),
'superficieTerrazas':data.superficieTerraza.apply(lambda x: 1 if x else 0),
'superficieJardinesTerrazas':data.superficieJarTer.apply(lambda x: 1 if x else 0)
})
return matriz
def imputarAmbientes(p_data):
data = p_data
##IMPUTANDO AMBIENTES
data.ambientesImputados = ImputarAmbientesProceso(data,10)
data.ambientesImputados.update(data.ambientes)
return data.ambientesImputados
def ImputarSupCubierta(p_data):
data = p_data
#IMPUTAR FALTANTES CANTIDAD_AMBIENTES CON SUPERFICIES CUBIERTAS
data['superficie_cubierta_imputada'] = np.nan
imputar_serie = ImputarSupCubiertaProceso(data,5)
data.superficie_cubierta_imputada.update(imputar_serie)
data.superficie_cubierta_imputada.update(data.surface_total_in_m2)
data.superficie_cubierta_imputada.update(data.surface_covered_in_m2)
return data.superficie_cubierta_imputada
def ImputarSupTotal(p_data):
data = p_data
data['superficie_total_imputada_Cubierta'] = ImputarSupTotalCubierta(data,10)
data['superficie_total_imputada_Ambientes'] = ImputarSupTotalAmbientes(data,5)
data.superficie_total_imputada_Ambientes.update(data.superficie_total_imputada_Cubierta)
data.superficie_total_imputada_Ambientes.update(data.surface_total_in_m2)
return data.superficie_total_imputada_Ambientes
def ImputarTotalMenorCubierta(p_data):
data = p_data
#CUANDO LA SUPERFICIE TOTAL < SUPERFICIE CUBIERTA REEMPLAZO CON SUPERFICIE CUBIERTA + JARDIN/TERRAZA
superficie_jardin_imputada_ceros = data.superficieJardines.fillna(0)
superficie_terraza_imputada_ceros = data.superficieTerraza.fillna(0)
sup_terraza_jardin_imputada_ceros = data.superficieJarTer.fillna(0)
data.surface_total_in_m2.loc[data.surface_total_in_m2-data.surface_covered_in_m2<0] = data.surface_covered_in_m2 + superficie_jardin_imputada_ceros + superficie_terraza_imputada_ceros + sup_terraza_jardin_imputada_ceros
return data.surface_total_in_m2
def generarSupJardines(p_data):
data = p_data
##OBTENGOS JARDINES, TERRAZAS
booleanos_jardines =(data.description.str.contains('parquizado'))|(data.description.str.contains('patio'))|(data.description.str.contains('jardin'))
booleanos_terraza = (data.description.str.contains('terraza'))|(data.description.str.contains('quincho'))
##CALCULO SUPERFICIES DE JARDINES (SIN TERRAZA)
serie_jardines = (booleanos_jardines) & (~booleanos_terraza)
data['superficies_jardines'] = serie_jardines
return data.superficies_jardines
def generarSupTerrazas(p_data):
data = p_data
##OBTENGOS JARDINES, TERRAZAS
booleanos_jardines =(data.description.str.contains('parquizado'))|(data.description.str.contains('patio'))|(data.description.str.contains('jardin'))
booleanos_terraza = (data.description.str.contains('terraza'))|(data.description.str.contains('quincho'))
##CALCULO SUPERFICIES DE TERRAZAS (SIN JARDINES)
serie_terraza = (booleanos_terraza) & (~booleanos_jardines)
data['superficie_terraza'] = serie_terraza
return data.superficie_terraza
def generarSupJarTer(p_data):
data = p_data
##OBTENGOS JARDINES, TERRAZAS
booleanos_jardines =(data.description.str.contains('parquizado'))|(data.description.str.contains('patio'))|(data.description.str.contains('jardin'))
booleanos_terraza = (data.description.str.contains('terraza'))|(data.description.str.contains('quincho'))
##CALCULO SUPERFICIES DE TERRAZAS CON JARDINES
serie_terraza_jardin = (booleanos_terraza) & (booleanos_jardines)
data['superficie_terraza_jardin'] = serie_terraza_jardin
return data.superficie_terraza_jardin
def quitarMayusculasAcentos(p_data):
data = p_data
##REEMPLAZO COLUMNAS DESCRIPCION Y TITULO (MINUSCULAS Y ACENTOS)
data.description = data.description.astype(str).apply(uni.unidecode).str.lower()
data.title = data.title.astype(str).apply(uni.unidecode).str.lower()
return data
def generoAmbientes(p_data):
data = p_data
##CONTIENE AMBIENTES EN CAMPO DESCRIPCION
un_ambiente = data[data.rooms<=7].description.str.contains("ambiente ") | data.description.str.contains("amb.","amb ") & data.description.str.contains("1 amb")
dos_o_mas_ambientes = data.description.str.contains("ambientes") | data.description.str.contains("2 amb")
data["un_ambiente"]=un_ambiente
##CONTIENE AMBIENTES DE CAMPOS TITULO Y DESCRIPCION
cant_ambientes_old_desc = data[data.rooms<=7].description.astype(str).apply(obtengo_ambiente)
cant_ambientes_old_title = data[data.rooms<=7].title.astype(str).apply(obtengo_ambiente)
cant_ambientes_desc = cant_ambientes_old_desc.str.extract(r'(\d+)')
cant_ambientes_title = cant_ambientes_old_title.str.extract(r'(\d+)')
data['cantidad_ambientes_desc'] = cant_ambientes_desc
data['cantidad_ambientes_title'] = cant_ambientes_title
##CONTIENE AMBIENTES DE DESCIPCIONES CON 1 AMBIENTE
data['un_ambiente'] = data.un_ambiente
data['monoambiente'] = data[data.rooms<=7].description.str.contains('monoambiente') | data.description.str.contains('mono ambiente') | data.title.str.contains('monoambiente') | data.title.str.contains('mono ambiente')
data['ambientes'] = data.rooms[data.rooms.fillna(100).astype(int)<6].astype(int)
##LO AGREGO LOS DE 1 AMBIENTES A LOS QUE YA TENGO
var_un_ambiente = data.un_ambiente.apply(devolver_un_ambiente)
var_monoambiente = data.monoambiente.apply(devolver_un_ambiente)
#data.cantidad_ambientes_title.update(data.cantidad_ambientes_desc)
data.ambientes.update(data.cantidad_ambientes_title)
data.ambientes.update(data.cantidad_ambientes_desc)
##SUMARIZO TODOS LOS AMBIENTES
data['var_un_ambiente'] = var_un_ambiente
data['var_monoambiente'] = var_monoambiente
data.var_un_ambiente.update(data.ambientes)
data.var_monoambiente.update(data.var_un_ambiente)
##GUARDO LA COLUMNA DE AMBIENTES EN DATA.NUEVOS_AMBIENTES
data['nuevos_ambientes'] = data.var_monoambiente
data['ambientes_ceros'] = data.nuevos_ambientes.fillna(0).astype(int)
return data.nuevos_ambientes
def TransformacionData(p_data):
data = p_data
#QUITO LAS FILAS REPETIDAS DEL CAMPO DESCRIPCION
data = data.drop_duplicates(subset=['description'], keep='first')
#QUITO LAS FILAS CON SUPERFICIE CUBIERTA MENOR A 16
data = data[(data.surface_covered_in_m2>16)|(data.surface_covered_in_m2.isnull())]
#QUITO LAS FILAS CON SUPERFICIE TOTAL MENOR A 16
data = data[(data.surface_total_in_m2>16)|(data.surface_total_in_m2.isnull())]
#QUITO LAS FILAS CON SUPERFICIES CUBIERTAS MENOR 50 DE CASAS
data = data[(~((data.surface_covered_in_m2<50)&(data.property_type.str.contains('house'))))]
#QUITO LAS FILAS CON SUPERFICIE TOTAL MENOR 50 DE CASAS
data = data[(~((data.surface_total_in_m2<50)&(data.property_type.str.contains('house'))))]
#PONGO NULOS LOS VALORES DE SUPERFICIE CUBIERTA CUANDO SUPERFICIE_CUBIERTA>SUPERFICIE_TOTAL
data.surface_covered_in_m2[data.surface_covered_in_m2>data.surface_total_in_m2] = np.nan
#data.surface_total_in_m2.update(data.surface_covered_in_m2)
return data
def devolver_un_ambiente (x):
if x :
return 1
def obtengo_ambiente(x):
v_1 = x.lower() # texto en minuscula
v_2 = v_1.find('amb') # posicion "amb"
if v_2<0:
return -1
else:
v_3 = v_2-2 # posicion -2 OBTENGO NUMERO DE AMBIENTES
v_4 = v_2-1 # posicion -1 OBTENGO NUMERO DE AMBIENTES
v_5 = v_1[v_3:v_4]
return v_5
def OutliersSupTotal(p_data, Desviacion):
data_modificada = p_data
# GENERO CULUMNA DE MEDIAS AGRUPANDO POR PCIA, BARRIO, TIPO DE PROPIEDAD
media__ = data_modificada.groupby(['state_name', 'place_name', 'property_type'])['surface_total_in_m2'].transform('mean')
#GENERO COLUMNA DE STD AGRUPANDO POR PCIA, BARRIO, TIPO DE PROPIEDAD
str__ = data_modificada.groupby(['state_name','place_name','property_type'])['surface_total_in_m2'].transform('std')
#GENERO COLUMNA CON LA FORMULA DE CHEUVENET PARA EL CALCULO DE OUTLIERS
criterio_cheuvenet__ = (abs(data_modificada.surface_total_in_m2-media__))/(str__)
#guardar_datos_outliers_originales = pd.DataFrame(data_modificada.surface_total_in_m2.loc[criterio_cheuvenet__>Desviacion])
#data.merge(guardar_datos_outliers_originales, how='left', left_index=True, right_index=True)['']
data_modificada.surface_total_in_m2.loc[criterio_cheuvenet__>Desviacion] = np.nan
return data_modificada
def OutliersSupCubierta(p_data, Desviacion):
data_modificada = p_data
# GENERO CULUMNA DE MEDIAS AGRUPANDO POR PCIA, BARRIO, TIPO DE PROPIEDAD
media__ = data_modificada.groupby(['state_name', 'place_name', 'property_type'])['surface_covered_in_m2'].transform('mean')
#GENERO COLUMNA DE STD AGRUPANDO POR PCIA, BARRIO, TIPO DE PROPIEDAD
str__ = data_modificada.groupby(['state_name','place_name','property_type'])['surface_covered_in_m2'].transform('std')
#GENERO COLUMNA CON LA FORMULA DE CHEUVENET PARA EL CALCULO DE OUTLIERS
criterio_cheuvenet__ = (abs(data_modificada.surface_covered_in_m2-media__))/(str__)
data_modificada.surface_covered_in_m2.loc[criterio_cheuvenet__>Desviacion] = np.nan
return data_modificada
def OutliersPrecioUSD(p_data, Desviacion):
data_modificada = p_data
# GENERO CULUMNA DE MEDIAS AGRUPANDO POR PCIA, BARRIO, TIPO DE PROPIEDAD
media__ = data_modificada.groupby(['state_name', 'place_name', 'property_type'])['price_aprox_usd'].transform('mean')
#GENERO COLUMNA DE STD AGRUPANDO POR PCIA, BARRIO, TIPO DE PROPIEDAD
str__ = data_modificada.groupby(['state_name','place_name','property_type'])['price_aprox_usd'].transform('std')
#GENERO COLUMNA CON LA FORMULA DE CHEUVENET PARA EL CALCULO DE OUTLIERS
criterio_cheuvenet__ = (abs(data_modificada.price_aprox_usd-media__))/(str__)
data_modificada.price_aprox_usd.loc[criterio_cheuvenet__>Desviacion] = np.nan
return data_modificada
def OutliersPrecioM2(p_data, Desviacion):
data_modificada = p_data
# GENERO CULUMNA DE MEDIAS AGRUPANDO POR PCIA, BARRIO, TIPO DE PROPIEDAD
media__ = data_modificada.groupby(['state_name', 'place_name', 'property_type'])['price_usd_per_m2'].transform('mean')
#GENERO COLUMNA DE STD AGRUPANDO POR PCIA, BARRIO, TIPO DE PROPIEDAD
str__ = data_modificada.groupby(['state_name','place_name','property_type'])['price_usd_per_m2'].transform('std')
#GENERO COLUMNA CON LA FORMULA DE CHEUVENET PARA EL CALCULO DE OUTLIERS
criterio_cheuvenet__ = (abs(data_modificada.price_usd_per_m2-media__))/(str__)
data_modificada.price_usd_per_m2.loc[criterio_cheuvenet__>Desviacion] = np.nan
return data_modificada
def ImputarAmbientesProceso(p_data, rango):
data = p_data
data['imputar_ambientes'] = np.nan
for i in range(1,rango):
#GENERAR GRUPOS DE SUPERFICIES
data['categorias_sup_cubierta_por_m2'] = pd.qcut(data[data.surface_covered_in_m2>10].surface_covered_in_m2,i)
#CALCULAR MEDIAS CANTIDAD_AMBIENTES
dfImputacionesAmbientes = pd.DataFrame(data[data.ambientes_ceros!=0].groupby(['state_name','place_name','categorias_sup_cubierta_por_m2'])['ambientes_ceros'].mean())
serie_imputaciones_ambientes = data.merge(dfImputacionesAmbientes,how='left',left_on=['state_name','place_name','categorias_sup_cubierta_por_m2'],right_on=['state_name','place_name','categorias_sup_cubierta_por_m2'])['ambientes_ceros_y']
data.imputar_ambientes.update(serie_imputaciones_ambientes)
break
data.rooms[data.rooms>7] = np.nan
data.imputar_ambientes.update(data.rooms)
return data.imputar_ambientes
def ImputarSupCubiertaProceso(p_data, rango):
data = p_data
data['imputando_superficies_cubiertas'] = np.nan
data.imputar_ambientes = data.imputar_ambientes.fillna(0).astype(int)
for i in range(1,rango):
#GENERAR GRUPOS DE AMBIENTES EN PESOS
data['ambientes_imputados_ceros'] = data.ambientesImputados.fillna(0).astype(float)
data['categorias_ambientes'] = pd.qcut(data.imputar_ambientes,i)
#CALCULAR MEDIAS SUPERFICIES CUBIERTAS
df_superficies_imput = pd.DataFrame(data[data.ambientes_imputados_ceros>=1].groupby(['state_name','place_name','property_type','categorias_ambientes'])['surface_covered_in_m2'].mean())
imputar_serie = data.merge(df_superficies_imput,how='left',left_on=['state_name','place_name','property_type','categorias_ambientes'],right_on=['state_name','place_name','property_type','categorias_ambientes'])['surface_covered_in_m2_y']
data.imputando_superficies_cubiertas.update(imputar_serie)
break
for i in range (1,rango):
#GENERAR GRUPOS DE SUPERFICIES TOTAL
data['categorias_sup_total'] = pd.qcut(data.surface_total_in_m2,rango)
imputar_serie_Cubierta_con_total = data.groupby(['state_name','place_name','property_type','categorias_sup_total'])['surface_covered_in_m2'].transform('mean')
data.imputando_superficies_cubiertas.update(imputar_serie_Cubierta_con_total)
break
return data.imputando_superficies_cubiertas
def ImputarSupTotalCubierta(p_data, rango):
data = p_data
data['imputando_superficies_total'] = np.nan
for i in range(1,rango):
#GENERAR GRUPOS DE SUPERFICIES
data['categorias_sup_cubierta_por_m2'] = pd.qcut(data.surface_covered_in_m2,i)
#CALCULAR MEDIAS CANTIDAD_AMBIENTES
dfImputarTotal = pd.DataFrame(data.groupby(['state_name','place_name','categorias_sup_cubierta_por_m2'])['surface_total_in_m2'].mean())
serie_imputaciones_ambientes = data.merge(dfImputarTotal,how='left',left_on=['state_name','place_name','categorias_sup_cubierta_por_m2'],right_on=['state_name','place_name','categorias_sup_cubierta_por_m2'])['surface_total_in_m2_y']
data.imputando_superficies_total.update(serie_imputaciones_ambientes)
break
return data.imputando_superficies_total
def ImputarSupTotalAmbientes(p_data, rango):
data = p_data
data['imputando_superficies_total'] = np.nan
data.imputar_ambientes = data.imputar_ambientes.fillna(0).astype(int)
for i in range(1, rango):
data['categorias_ambientes'] = pd.qcut(data.imputar_ambientes,i)
#CALCULAR MEDIAS AGRUPANDO POR AMBIENTES
dfImputarTotal = pd.DataFrame(data.groupby(['state_name','place_name','categorias_ambientes'])['surface_total_in_m2'].mean())
serie_imputaciones_ambientes = data.merge(dfImputarTotal,how='left',left_on=['state_name','place_name','categorias_ambientes'],right_on=['state_name','place_name','categorias_ambientes'])['surface_total_in_m2_y']
data.imputando_superficies_total.update(serie_imputaciones_ambientes)
break
return data.imputando_superficies_total
def imputarPrecio(p_data):
data = p_data
#data['imputandoPrecioSupTotalJarTer'] = ImputarPrecioJarTer(data)
data['imputandoPrecioSupTotal'] = ImputarPrecioSupTotal(data)
#data.imputandoPrecioSupTotalJarTer.update(data.imputandoPrecioSupTotal)
data.imputandoPrecioSupTotal.update(data.price_aprox_usd)
return data.imputandoPrecioSupTotal
def ImputarPrecioSupTotal(p_data, rango=5):
data = p_data
data['imputar_precios_usd'] = np.nan
for i in range(1,rango):
data['categorias_superficie_total_m2'] = pd.qcut(data.surface_total_in_m2,i)
df_precio_sup_total = pd.DataFrame(data.groupby(['state_name','place_name','property_type','categorias_superficie_total_m2'])['price_aprox_usd'].mean())
serie_imputada_precio_sup_total = data.merge(df_precio_sup_total,how='left',left_on=['state_name','place_name','property_type','categorias_superficie_total_m2'], right_on=['state_name','place_name','property_type','categorias_superficie_total_m2'])['price_aprox_usd_y']
data.imputar_precios_usd.update(serie_imputada_precio_sup_total)
break
return data.imputar_precios_usd
def ImputarPrecioJarTer(p_data, rango=5):
data = p_data
data['imputar_precios_usd'] = np.nan
data['categoria_superficie_cubierta_imputada'] = pd.qcut(data.surface_covered_in_m2,5)
for i in range(1,rango):
data['categorias_superficie_terraza'] = pd.qcut(data.superficieTerraza,i)
##IMPUTAR PRECIOS DE SUPERFICIES CON TERRAZA
df_terraza = pd.DataFrame(data[data.categorias_superficie_terraza.notnull()].groupby(['state_name','place_name','property_type','categoria_superficie_cubierta_imputada','categorias_superficie_terraza'])['price_aprox_usd'].mean())
data['imputar_precios_terraza'] = data.merge(df_terraza, how='left', left_on=['state_name','place_name','property_type','categoria_superficie_cubierta_imputada','categorias_superficie_terraza'],right_on=['state_name','place_name','property_type','categoria_superficie_cubierta_imputada','categorias_superficie_terraza'])['price_aprox_usd_y']
data.imputar_precios_usd.update(data.imputar_precios_terraza)
data['categorias_superficie_jardines'] = pd.qcut(data.superficieJardines,i)
##IMPUTAR PRECIOS DE SUPERFICIES CON JARDINES
df_jardin = pd.DataFrame(data[data.categorias_superficie_jardines.notnull()].groupby(['state_name','place_name','property_type','categoria_superficie_cubierta_imputada','categorias_superficie_jardines'])['price_aprox_usd'].mean())
data['imputar_precios_jardines'] = data.merge(df_jardin,how='left',left_on=['state_name','place_name','property_type','categoria_superficie_cubierta_imputada','categorias_superficie_jardines'], right_on=['state_name','place_name','property_type','categoria_superficie_cubierta_imputada','categorias_superficie_jardines'])['price_aprox_usd_y']
data.imputar_precios_usd.update(data.imputar_precios_jardines)
data['categorias_superficie_terraza_jardin'] = pd.qcut(data.superficieJarTer,i)
##IMPUTAR PRECIOS DE SUPERFICIES CON TERRAZAS Y JARDINES
df_terraza_jardin = pd.DataFrame(data[data.categorias_superficie_terraza_jardin.notnull()].groupby(['state_name','place_name','property_type','categoria_superficie_cubierta_imputada','categorias_superficie_terraza_jardin'])['price_aprox_usd'].mean())
data['imputar_precios_terraza_jardin'] = data.merge(df_terraza_jardin, how='left', left_on=['state_name','place_name','property_type','categoria_superficie_cubierta_imputada','categorias_superficie_terraza_jardin'], right_on=['state_name','place_name','property_type','categoria_superficie_cubierta_imputada','categorias_superficie_terraza_jardin'])['price_aprox_usd_y']
data.imputar_precios_usd.update(data.imputar_precios_terraza_jardin)
break
return data.imputar_precios_usd
def imputarPrecioM2(p_data):
data = p_data
data['nuevos_precios_m2'] = data.price_aprox_usd/data.surface_total_in_m2
data.nuevos_precios_m2[data.nuevos_precios_m2<500] = np.nan
data.nuevos_precios_m2[data.nuevos_precios_m2>6000] = np.nan
#data['nuevos_precios_m2'].update(data.price_usd_per_m2)
return data.nuevos_precios_m2
#def imputarPrecioM2(p_data):
#data = p_data
#data['imputar_precios_m2'] = np.nan
#data['categorias_superficie_total_m2'] = pd.qcut(data.surface_total_in_m2,20)
#data['categorias_precios_aprox_usd'] = pd.qcut(data.price_aprox_usd,20)
#df_precio_sup_total = pd.DataFrame(data.groupby(['state_name','place_name','property_type','surface_total_in_m2','price_aprox_usd'])['price_usd_per_m2'].mean())
#serie_imputada_precio_sup_total = data.merge(df_precio_sup_total,how='left',left_on=['state_name','place_name','property_type','surface_total_in_m2','price_aprox_usd'], right_on=['state_name','place_name','property_type','surface_total_in_m2','price_aprox_usd'])['price_usd_per_m2_y']
#data.imputar_precios_m2.update(serie_imputada_precio_sup_total)
#data.imputar_precios_m2.update(data.price_usd_per_m2)
#return data.imputar_precios_m2
|
[
"baqueroemanuel@gmail.com"
] |
baqueroemanuel@gmail.com
|
ec4aa967893296c95ba645ee12fe5e385358928c
|
5456502f97627278cbd6e16d002d50f1de3da7bb
|
/chrome/common/extensions/docs/server2/schema_processor_test.py
|
f7862583b704fe17595d6b4258eff8a9acf0d5c9
|
[
"BSD-3-Clause"
] |
permissive
|
TrellixVulnTeam/Chromium_7C66
|
72d108a413909eb3bd36c73a6c2f98de1573b6e5
|
c8649ab2a0f5a747369ed50351209a42f59672ee
|
refs/heads/master
| 2023-03-16T12:51:40.231959
| 2017-12-20T10:38:26
| 2017-12-20T10:38:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,584
|
py
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from copy import deepcopy
from schema_processor import SchemaProcessor
from future import Future
from object_store_creator import ObjectStoreCreator
from host_file_system_provider import HostFileSystemProvider
from compiled_file_system import CompiledFileSystem
class _FakeReferenceResolver():
def GetRefModel(self, ref, api_list):
return None, None
class _FakeAPIModels():
def GetNames(self):
return []
class _FakeFeaturesBundle():
def GetAPIFeatures(self):
return Future(value={})
class SchemaUtilTest(unittest.TestCase):
def testRemoveNoDocs(self):
expected_nodoc = [
{
'name': 'B',
'list': [
{
'name': 'B2'
}
]
},
{
'name': 'D',
'nodoc': False
},
{
'name': 'E',
'items1': [
{
'name': 'E1',
'items': [
{
'name': 'E1.3'
}
]
},
{
'name': 'E2'
}
]
}
]
nodoc_data = [
{
'name': 'A',
'nodoc': True
},
{
'name': 'B',
'list': [
{
'name': 'B1',
'nodoc': True
},
{
'name': 'B2'
},
{
'name': 'B3',
'nodoc': True
}
]
},
{
'name': 'C',
'nodoc': True
},
{
'name': 'D',
'nodoc': False
},
{
'name': 'E',
'dict': {
'name': 'Ed',
'nodoc': True
},
'items1': [
{
'name': 'E1',
'items': [
{
'name': 'E1.1',
'nodoc': True
},
{
'name': 'E1.2',
'nodoc': True
},
{
'name': 'E1.3'
}
]
},
{
'name': 'E2'
},
{
'name': 'E3',
'nodoc': True
}
]
}
]
object_store_creator = ObjectStoreCreator(start_empty=False)
host_file_system_provider = HostFileSystemProvider(object_store_creator)
schema_processor = SchemaProcessor(_FakeReferenceResolver(),
_FakeAPIModels(),
_FakeFeaturesBundle(),
CompiledFileSystem.Factory(
object_store_creator),
host_file_system_provider.GetMaster(),
True)
schema_processor._RemoveNoDocs(nodoc_data)
self.assertEquals(expected_nodoc, nodoc_data)
def testInlineDocs(self):
schema = {
'namespace': 'storage',
'properties': {
'key2': {
'description': 'second key',
'$ref': 'Key'
},
'key1': {
'description': 'first key',
'$ref': 'Key'
}
},
'types': [
{
'inline_doc': True,
'type': 'string',
'id': 'Key', # Should be inlined into both properties and be removed
# from types.
'description': 'This is a key.', # This description should disappear.
'marker': True # This should appear three times in the output.
},
{
'items': {
'$ref': 'Key'
},
'type': 'array',
'id': 'KeyList',
'description': 'A list of keys'
}
]
}
expected_schema = {
'namespace': 'storage',
'properties': {
'key2': {
'marker': True,
'type': 'string',
'description': 'second key'
},
'key1': {
'marker': True,
'type': 'string',
'description': 'first key'
}
},
'types': [
{
'items': {
'marker': True,
'type': 'string'
},
'type': 'array',
'id': 'KeyList',
'description': 'A list of keys'
}
]
}
object_store_creator = ObjectStoreCreator(start_empty=False)
host_file_system_provider = HostFileSystemProvider(object_store_creator)
schema_processor = SchemaProcessor(_FakeReferenceResolver(),
_FakeAPIModels(),
_FakeFeaturesBundle(),
CompiledFileSystem.Factory(
object_store_creator),
host_file_system_provider.GetMaster(),
False)
inlined_schema = deepcopy(schema)
schema_processor._InlineDocs(inlined_schema)
self.assertEqual(expected_schema, inlined_schema)
def testDetectInline(self):
schema = {
'types': [
{
'id': 'Key',
'items': {
'$ref': 'Value'
}
},
{
'id': 'Value',
'marker': True
}
]
}
expected_schema = {
'types': [
{
'id': 'Key',
'items': {
'marker': True,
}
}
]
}
object_store_creator = ObjectStoreCreator(start_empty=False)
host_file_system_provider = HostFileSystemProvider(object_store_creator)
schema_processor = SchemaProcessor(_FakeReferenceResolver(),
_FakeAPIModels(),
_FakeFeaturesBundle(),
CompiledFileSystem.Factory(
object_store_creator),
host_file_system_provider.GetMaster(),
False)
schema_processor._DetectInlineableTypes(schema)
schema_processor._InlineDocs(schema)
self.assertEqual(expected_schema, schema)
if __name__ == '__main__':
unittest.main()
|
[
"lixiaodonglove7@aliyun.com"
] |
lixiaodonglove7@aliyun.com
|
43f79234fced9ed3c907da15fc09b79d0db3f181
|
c2b0ee4ef8fb8e933966c7219a6cac6484ce03fb
|
/crawl/migrations/0001_initial.py
|
d57e478119a73813b3396ded1ae1f9ae44953d00
|
[
"MIT"
] |
permissive
|
dukuaris/Django
|
0b40e79d9e461c28064a83cc42d7710b49b43a19
|
d34f3e3f09028511e96b99cae7faa1b46458eed1
|
refs/heads/master
| 2022-12-09T04:05:09.329256
| 2020-03-21T02:17:20
| 2020-03-21T02:17:20
| 236,935,131
| 0
| 0
|
MIT
| 2022-12-08T01:51:39
| 2020-01-29T08:24:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
# Generated by Django 2.2.6 on 2020-03-16 15:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='TITLE')),
('description', models.CharField(blank=True, help_text='simple description text.', max_length=100, verbose_name='DESCRIPTION')),
('price', models.CharField(max_length=20, null=True, verbose_name='PRICE')),
('review', models.CharField(max_length=20, null=True, verbose_name='NUMBER_OF_REVIEWS')),
('score', models.CharField(max_length=20, null=True, verbose_name='SCORE')),
('image', models.ImageField(blank=True, null=True, upload_to='ProductImages', verbose_name='IMAGE')),
('rank', models.CharField(blank=True, max_length=10, null=True, verbose_name='RANK')),
('create_dt', models.DateTimeField(auto_now_add=True, verbose_name='CREATE DATE')),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='OWNER')),
],
options={
'verbose_name': 'product',
'verbose_name_plural': 'products',
'db_table': 'crawl_products',
'ordering': ('-rank',),
},
),
]
|
[
"dukuaris@gmail.com"
] |
dukuaris@gmail.com
|
bcde15fdabb4ae9833286607ebfe97eb4db771fa
|
32976cfb755fcc4ffabc47615b82122b3424eaaf
|
/manage.py
|
3838714a7d44389c739f0e9b9b511d8afa1dc506
|
[] |
no_license
|
henriquemsouza/Desafio-vizir
|
2664ee55be11a5658a0fe2f471da0ee5db04cb77
|
9eab2e662c08220fc2a03a7a2c5c183241bc7c52
|
refs/heads/master
| 2021-07-10T17:59:27.057189
| 2017-10-13T11:57:13
| 2017-10-13T11:57:13
| 105,031,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "showmethecode.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"henrique.m_s@hotmail.com"
] |
henrique.m_s@hotmail.com
|
2f4419826b5f78871ab89e3e2320477118c049cc
|
4b44a299bafbd4ca408ce1c89c9fe4a449632783
|
/python2/10_Modules/03_random/05_password_generator.py
|
1ffd148580e202e7cef103b05984f9dd1c8822c4
|
[] |
no_license
|
umunusb1/PythonMaterial
|
ecd33d32b2de664eaaae5192be7c3f6d6bef1d67
|
1e0785c55ccb8f5b9df1978e1773365a29479ce0
|
refs/heads/master
| 2023-01-23T23:39:35.797800
| 2020-12-02T19:29:00
| 2020-12-02T19:29:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
import string
from random import choice, randint, randrange
print 'string.ascii_letters :', string.ascii_letters
print 'string.digits :', string.digits
print 'string.punctuation :', string.punctuation
characters = string.ascii_letters + string.punctuation \
+ string.digits
password1 = "".join(choice(characters) for x in range(randint(8, 16)))
print 'password1 :', password1
password2 = "".join(choice(characters) for x in range(randrange(8, 16)))
print 'password2 :', password2
|
[
"uday3prakash@gmail.com"
] |
uday3prakash@gmail.com
|
9ee3283e8325a1a2a86e00956634bf487004910e
|
80ab528ecba746cd89666a2674adb9f9cc7f16a6
|
/app/dishes/views.py
|
4917352b82fc8d5965dba6c5bc7a478e46f51643
|
[] |
no_license
|
caffeinecodes/restaurantBooking
|
ce9da58329492bdd3bd4d39b1aca9c1e5c55427f
|
3e82ec55fd9bcd78fe7042190cbca2a68c17ec5e
|
refs/heads/master
| 2020-03-25T18:52:42.014722
| 2018-08-15T10:34:29
| 2018-08-15T10:34:29
| 144,053,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,191
|
py
|
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework import status, views
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated
from rest_framework.permissions import AllowAny
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from app.dishes.serializer import CategorySerializer, DishSerializer
from app.dishes.models import Category, Dish
@api_view(['GET', 'POST'])
@permission_classes((AllowAny, ))
def dish_list(request):
if request.method == 'GET':
response_data = {}
dish = Dish.objects.all()
dish_serializer = DishSerializer(dish, many=True)
return Response(
{
'code': 200,
'message': 'Dish List',
'data': dish_serializer.data
},
status=status.HTTP_200_OK)
elif request.method == 'POST':
data = request.data
dish_name = data.get("name")
category_id = data.get("category_id")
profile_id = 1
mrp = data.get("mrp")
offer_price = data.get("offer_price")
is_veg = data.get("is_veg")
time_slot = data.get("time_slot", [])
description = data.get("description")
dish, created = Dish.objects.get_or_create(
name=dish_name,
category_id=category_id,
profile_id=profile_id,
mrp=mrp,
offer_price=offer_price,
is_veg=is_veg,
time_slot=time_slot,
description=description)
dish_serializer = DishSerializer(dish, many=False)
return Response(
{
'code': 201 if created else 200,
'message': 'Category created successfully',
'data': dish_serializer.data
},
status=status.HTTP_201_CREATED)
@api_view(['GET'])
@permission_classes((AllowAny, ))
def categories(request):
if request.method == 'GET':
response_data = {}
data = request.query_params
show_products = data.get("products", 0)
selectedCategory = data.get("selectedCategory", None)
category = Category.objects.filter(
id=selectedCategory) if selectedCategory else Category.objects.all(
)
response_data["categories"] = CategorySerializer(
category,
many=True,
context={
'list_dishes': True if show_products else False
}).data
return Response(
{
'code': 200,
'message': 'Category list fetched successfully',
'data': response_data
},
status=status.HTTP_200_OK)
elif request.method == 'POST':
data = request.data
category_name = data.get("name", None)
category, created = Category.objects.get_or_created(name=category_name)
category_serializer = CategorySerializer(category, many=False)
return Response(
{
'code': 201 if created else 200,
'message': 'Category created successfully',
'data': category_serializer.data
},
status=status.HTTP_201_CREATED)
@api_view(['PUT'])
@permission_classes((AllowAny, ))
def dish_quantity(request):
data = request.data
dish_id = data.get("dish_id")
quantity = data.get("quantity", 0)
try:
dish = Dish.objects.get(id=dish_id)
dish.quantity = quantity
dish.save()
except Dish.DoesNotExist:
return Response(
{
'code': 400,
'message': 'Menu item not found',
'data': None
},
status=status.HTTP_404_NOT_FOUND)
dish_serializer = DishSerializer(dish, many=False)
return Response(
{
'code': 200,
'message': 'Quantity updated successfully',
'data': dish_serializer.data
},
status=status.HTTP_200_OK)
|
[
"write2afsal@gmail.com"
] |
write2afsal@gmail.com
|
1f6272dd78779c03efd2ad46f34e0a75950c930f
|
85c0ac04965a93864aadb311c3004dce7f62ae85
|
/new_controller_api.py
|
1349b0afe07bf3de0c6aa8da9f70e45818d3722f
|
[] |
no_license
|
azriel94530/SpectroCCDTools
|
d33db06b25afcece7c0490e465740c446581ee89
|
e7d63bbcc5c2e4dda4cfb216953c24f99d744d0f
|
refs/heads/master
| 2020-12-13T23:31:37.163081
| 2016-06-28T00:00:45
| 2016-06-28T00:00:45
| 48,209,618
| 0
| 0
| null | 2015-12-18T02:23:39
| 2015-12-18T02:23:39
| null |
UTF-8
|
Python
| false
| false
| 6,582
|
py
|
import socket
import struct
from new_controller_constants import *
from new_controller_messaging import mysocket
# for the messages to the controller this comes from the c struct on the server
# typedef struct {
# int cmd;
# int nw;
# char strmsg[128];
# lbnldata_t data[MAXCMDWORDS]; MAXCMDWORDS=12 (unsigned ints)
# 4-bytes per int (signed or unsigned)
#} cmdstruct_t;
# for the messages from the controller this comes from the c struct on the server
# typedef struct {
# char strmsg[128];
# lbnldata_t data[MAXRESPWORDS];
# int status;
#} respstruct_t;
# message TO the controller have format:
format_ = "ii128s12i"
# messages FROM the controller have format:
format_from_controller_ = "128s12ii"
cmd = 0
nw = 0
string_message = ""
d00 = 0; d01 = 10; d02 = 20; d03 = 30
d04 = 40; d05 = 50; d06 = 60; d07 = 70
d08 = 80; d09 = 90; d10 = 100; d11 = 110
# maybe nice to create a class for the controller as well
sock = mysocket()
def connect_socket_to_controller():
sock.connect('ccd-spectro.dhcp.lbl.gov', 15001)
def open_controller():
""" Establishes a lock and a driver file descriptor, method to take ownership of controller """
cmd = LBNL_OPEN
tuple_to_send = (cmd ,nw, string_message, d00, d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11)
string_to_send = struct.pack(format_, *tuple_to_send)
sock.mysend(string_to_send)
# receive the reply from controller
recv_message = sock.myreceive()
msg_str,dr00,dr01,dr02,dr03,dr04,dr05,dr06,dr07,dr08,dr09,dr10,dr11,r_status = struct.unpack(format_from_controller_,recv_message)
if (r_status != 0): print msg_str
return r_status
def close_controller():
""" Deletes lock and resets driver file descriptor, releases ownership of controller """
cmd = LBNL_CLOSE
tuple_to_send = (cmd ,nw, string_message, d00, d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11)
string_to_send = struct.pack(format_, *tuple_to_send)
sock.mysend(string_to_send)
# receive the reply from controller
recv_message = sock.myreceive()
msg_str,dr00,dr01,dr02,dr03,dr04,dr05,dr06,dr07,dr08,dr09,dr10,dr11,r_status = struct.unpack(format_from_controller_,recv_message)
if (r_status != 0): print msg_str
return r_status
def controller_analog_power_on():
""" Turn ON Analog power on controller (VDD, VReset.. DC-DCs) """
cmd = LBNL_POWER
d00 = 1
tuple_to_send = (cmd ,nw, string_message, d00, d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11)
string_to_send = struct.pack(format_, *tuple_to_send)
sock.mysend(string_to_send)
# receive the reply from controller
recv_message = sock.myreceive()
msg_str,dr00,dr01,dr02,dr03,dr04,dr05,dr06,dr07,dr08,dr09,dr10,dr11,r_status = struct.unpack(format_from_controller_,recv_message)
if (r_status != 0): print msg_str
return r_status
def controller_analog_power_off():
""" Turn ON Analog power on controller (VDD, VReset.. DC-DCs) """
cmd = LBNL_POWER
d00 = 0
tuple_to_send = (cmd ,nw, string_message, d00, d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11)
string_to_send = struct.pack(format_, *tuple_to_send)
sock.mysend(string_to_send)
# receive the reply from controller
recv_message = sock.myreceive()
msg_str,dr00,dr01,dr02,dr03,dr04,dr05,dr06,dr07,dr08,dr09,dr10,dr11,r_status = struct.unpack(format_from_controller_,recv_message)
if (r_status != 0): print msg_str
return r_status
def set_image_size(nx,ny):
""" Set the ccd image size, with nx pixels in x and ny pixels in y """
cmd = LBNL_IMSIZE
d00 = nx
d01 = ny
tuple_to_send = (cmd ,nw, string_message, d00, d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11)
string_to_send = struct.pack(format_, *tuple_to_send)
sock.mysend(string_to_send)
# receive the reply from controller
recv_message = sock.myreceive()
msg_str,dr00,dr01,dr02,dr03,dr04,dr05,dr06,dr07,dr08,dr09,dr10,dr11,r_status = struct.unpack(format_from_controller_,recv_message)
if (r_status != 0): print msg_str
return r_status
def set_artificial_data(artificial):
""" Set the flag for artificial data, artificial=0 is real image data, non-zero is artificial """
cmd = LBNL_ARTIF_DATA
d00 = artificial
tuple_to_send = (cmd ,nw, string_message, d00, d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11)
string_to_send = struct.pack(format_, *tuple_to_send)
sock.mysend(string_to_send)
# receive the reply from controller
recv_message = sock.myreceive()
msg_str,dr00,dr01,dr02,dr03,dr04,dr05,dr06,dr07,dr08,dr09,dr10,dr11,r_status = struct.unpack(format_from_controller_,recv_message)
if (r_status != 0): print msg_str
return r_status
def get_image_size():
""" Get the ccd image size, with nx pixels in x and ny pixels in y """
cmd = LBNL_GET_IMSIZE
tuple_to_send = (cmd ,nw, string_message, d00, d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11)
string_to_send = struct.pack(format_, *tuple_to_send)
sock.mysend(string_to_send)
# receive the reply from controller
recv_message = sock.myreceive()
msg_str,dr00,dr01,dr02,dr03,dr04,dr05,dr06,dr07,dr08,dr09,dr10,dr11,r_status = struct.unpack(format_from_controller_,recv_message)
nx = -1
ny = -1
if (r_status != 0):
print msg_str
else:
nx = dr00
ny = dr01
print nx,ny
return r_status, nx, ny
def read_register(reg_add0, reg_add1):
""" Read a register value. Input is ... Output is a 32bit unsigned """
cmd = LBNL_GET_REG
d00 = reg_add0
d01 = reg_add1
tuple_to_send = (cmd ,nw, string_message, d00, d01, d02, d03, d04, d05, d06, d07, d08, d09, d10, d11)
string_to_send = struct.pack(format_, *tuple_to_send)
sock.mysend(string_to_send)
# receive the reply from controller
recv_message = sock.myreceive()
msg_str,dr00,dr01,dr02,dr03,dr04,dr05,dr06,dr07,dr08,dr09,dr10,dr11,r_status = struct.unpack(format_from_controller_,recv_message)
reg_value = -9999
if (r_status != 0):
print msg_str
else:
reg_value = dr00
print "Register: "+str(reg_add0)+" "+str(reg_add1)+" is: "+str(reg_value)
return r_status, reg_value
|
[
"azriel.goldschmidt@gmail.com"
] |
azriel.goldschmidt@gmail.com
|
466a40a7cef03f6066e86be88110b116a3c6c2a1
|
d25cfc9256c18c53b3f6b36657f4b0fb5f8f6ffa
|
/metrix_ml/decisiontree_randomsearch_normed.py
|
e83e3cd46e9a8144091e63accffd984bf15a04ad
|
[
"BSD-3-Clause"
] |
permissive
|
mevol/metrix_ml
|
becd012312a2f82f5a7815f2ae7c2d3b550446b3
|
f09ef35eeec8fe64fce83bb238f1ba75362856ce
|
refs/heads/master
| 2022-03-09T06:36:02.623092
| 2022-02-21T14:51:01
| 2022-02-21T14:51:01
| 140,827,932
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,786
|
py
|
###############################################################################
#
# imports and set up environment
#
###############################################################################
'''Defining the environment for this class'''
import argparse
import pandas as pd
import os
import numpy as np
import joblib
import logging
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime
from scipy.stats import randint
from scipy.stats import uniform
from tbx import get_confidence_interval, feature_importances_best_estimator
from tbx import feature_importances_error_bars, confusion_matrix_and_stats
from tbx import training_cv_stats, testing_predict_stats, plot_hist_pred_proba
from tbx import plot_precision_recall_vs_threshold, plot_roc_curve, evaluate_threshold
from tbx import calibrate_classifier, plot_radar_chart, print_to_consol
def make_output_folder(outdir):
'''A small function for making an output directory
Args:
outdir (str): user provided directory where the output directory will be created
output_dir (str): the newly created output directory named
"decisiontree_randomsearch_normed"
Yields:
directory
'''
output_dir = os.path.join(outdir, 'decisiontree_randomsearch_normed')
os.makedirs(output_dir, exist_ok=True)
return output_dir
class TreeRandSearch():
''' A class to conduct a randomised search and training for best parameters for a
decision tree; the following steps are executed:
* loading input data in CSV format
* creating output directory to write results files to
* set up a log file to keep note of stats and processes
* prepare the input data by splitting into a calibration (5%), testing (20%) and
training (80%) sets and applying MinMaxScaling
* conduct randomised search to find best parameters for the best predictor
* save model to disk
* get 95% confidence interval for uncalibrated classifier
* get feature importances
* get statistics for training using 3-fold cross-validation and testing
* get more detailed statistics and plots for prediction performances on the testing
set; this includes a confusion matrix, histogram of prediction probabilities,
precision-recall curve and ROC curve
* explore sensitivity/specificity trade-off when using different probability
thresholds
* calibrate the predictor and write the calibrated version to disk
* get 95% confidence interval for calibrated classifier
Args:
data (str): file path to the input CSV file
directory (str): target output directory where an output folder will be created
and all results will be written to
numf (int): maximum number of features to use in training; default = 10
numc (int): number of search cycles for randomised search; default = 500
cv (int): number of cross-validation cycles to use during training; default = 3
bootiter (int): number of bootstrap cylces to use for getting confidence
intervals; default = 1000
Yields:
trained predictor: "best_predictor_<date>.pkl"
trained and calibrated predictor: "best_predictor_calibrated_<date>.pkl"
logfile: "decisiontree_randomsearch.log"
plots: "bootstrap_hist_uncalibrated_<date>.png"
"feature_importances_best_bar_plot_<date>.png"
"feature_importances_all_error_bars_<date>.png"
"confusion_matrix_for_test_set_<date>.png"
"hist_pred_proba_<date>.png"
"Precision_Recall_<date>.png"
"ROC_curve_<date>.png"
"bootstrap_hist_calibrated_<date>.png"
"radar_plot_prediction_metrics<date>.png"
'''
def __init__(self, data, directory, numf, numc, cv, bootiter):
self.numf = numf
self.numc = numc
self.cv = cv
self.bootiter = bootiter
self.data = pd.read_csv(data)
self.directory = make_output_folder(directory)
logging.basicConfig(level=logging.INFO, filename=os.path.join(self.directory,
'decisiontree_randomsearch.log'), filemode='w')
logging.info(f'Loaded input data \n'
f'Created output directories at {self.directory} \n')
self.start = datetime.now()
self.prepare_data()
self.randomised_search()
self.get_training_testing_prediction_stats()
self.detailed_analysis()
###############################################################################
#
# prepare input data
#
###############################################################################
def prepare_data(self):
print_to_consol('Preparing input data and split in train/test/calibration set')
for name in self.data.columns:
if 'success' in name or "ground_truth" in name:
y = self.data[name]
X = self.data.drop([name, 'Unnamed: 0'], axis=1).select_dtypes(
exclude=['object'])
# create a 5% calibration set if needed
X_temp, X_cal, y_temp, self.y_cal = train_test_split(X, y, test_size=0.05,
random_state=42)
# use the remaining data for 80/20 train-test split
X_train, X_test, self.y_train, self.y_test = train_test_split(X_temp,
y_temp,
test_size=0.2,
random_state=100)
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
X_cal_scaled = scaler.transform(X_cal)
self.X_train_scaled = pd.DataFrame(data=X_train_scaled,
index=X_train.index,
columns=X_train.columns)
self.X_test_scaled = pd.DataFrame(data=X_test_scaled,
index=X_test.index,
columns=X_test.columns)
self.X_cal_scaled = pd.DataFrame(data=X_cal_scaled,
index=X_cal.index,
columns=X_cal.columns)
logging.info(f'Created test, train and validation set \n'
f'Normalizing the train set and applying to test set and calibration set \n')
###############################################################################
#
# randomized search
#
###############################################################################
def randomised_search(self):
print_to_consol('Running randomized search to find best classifier')
#create the decision forest
clf1 = DecisionTreeClassifier(random_state=20,
class_weight='balanced',
max_features = self.numf)
logging.info(f'Initialised classifier')
#set up randomized search
param_dict = {
'criterion': ['gini', 'entropy'],
'min_samples_split': randint(2, 20),
'max_depth': randint(1, 10),
'min_samples_leaf': randint(1, 20),
'max_leaf_nodes': randint(10, 20)}
logging.info(f'Following parameters will be explored in randomized search \n'
f'{param_dict} \n')
#building and running the randomized search
rand_search = RandomizedSearchCV(clf1, param_dict, random_state=5,
cv=self.cv, n_iter=self.numc,
scoring='accuracy', n_jobs=-1)
rand_search_fitted = rand_search.fit(self.X_train_scaled,
self.y_train)
best_parameters = rand_search_fitted.best_params_
best_scores = rand_search_fitted.best_score_
logging.info(f'Running randomised search for best patameters of classifier \n'
f'Best parameters found: {best_parameters} \n'
f'Best accuracy scores found: {best_scores} \n')
self.model = rand_search_fitted.best_estimator_
datestring = datetime.strftime(datetime.now(), '%Y%m%d_%H%M')
joblib.dump(self.model, os.path.join(self.directory,
'best_predictor_'+datestring+'.pkl'))
logging.info(f'Writing best classifier to disk in {self.directory} \n')
print_to_consol('Getting 95% confidence interval for uncalibrated classifier')
alpha, upper, lower = get_confidence_interval(self.X_train_scaled, self.y_train,
self.X_test_scaled, self.y_test,
self.model, self.directory,
self.bootiter, 'uncalibrated')
logging.info(f'{alpha}% confidence interval {upper}% and {lower}% \n'
f'for uncalibrated classifier. \n')
print_to_consol('Getting feature importances for best classifier')
best_clf_feat_import = self.model.feature_importances_
best_clf_feat_import_sorted = sorted(zip(best_clf_feat_import,
self.X_train_scaled.columns),
reverse=True)
logging.info(f'Feature importances for best classifier {best_clf_feat_import_sorted} \n')
print_to_consol('Plotting feature importances for best classifier')
feature_importances_best_estimator(best_clf_feat_import_sorted, self.directory)
logging.info(f'Plotting feature importances for best classifier in decreasing order \n')
###############################################################################
#
# get training and testing stats
#
###############################################################################
def get_training_testing_prediction_stats(self):
print_to_consol('Getting basic stats for training set and cross-validation')
training_stats, y_train_pred, y_train_pred_proba = training_cv_stats(
self.model, self.X_train_scaled,
self.y_train, self.cv)
logging.info(f'Basic stats achieved for training set and 3-fold CV \n'
f'Accuracy for each individual fold of 3 CV folds: {training_stats["acc_cv"]} \n'
f'Accuracy across all 3 CV-folds: {training_stats["acc"]} \n'
f'ROC_AUC across all 3 CV-folds: {training_stats["roc_auc"]} \n'
f'Recall across all 3 CV-folds: {training_stats["recall"]} \n'
f'Precision across all 3 CV-folds: {training_stats["precision"]} \n'
f'F1 score across all 3 CV-folds: {training_stats["f1-score"]} \n'
f'Storing cross-validated y_train classes in y_train_pred \n'
f'Storing cross-validated y_train probabilities in y_train_pred_proba \n')
print_to_consol('Getting class predictions and probabilities for test set')
test_stats, self.y_pred, self.y_pred_proba = testing_predict_stats(
self.model, self.X_test_scaled, self.y_test)
logging.info(f'Predicting on the test set. \n'
f'Storing classes in y_pred and probabilities in y_pred_proba \n')
print_to_consol(
'Calculate prediction stats for y_pred and y_pred_proba of test set')
logging.info(f'Basic stats on the test set. \n'
f'Prediction accuracy on the test set: {test_stats["predict_acc"]} \n'
f'Class distributio in the test set: {test_stats["class_distribution"]} \n'
f'Matthews Correlation Coefficient: {test_stats["mcc"]} \n'
f'Average number of class 1 samples: {test_stats["class_one"]} \n'
f'Average number of class 0 samples: {test_stats["class_zero"]} \n'
f'Null accuracy: {test_stats["null_acc"]} \n')
print_to_consol(
'Plotting histogram for class 1 prediction probabilities for test set')
#store the predicted probabilities for class 1 of test set
self.y_pred_proba_ones = self.y_pred_proba[:, 1]
plot_hist_pred_proba(self.y_pred_proba_ones, self.directory)
logging.info(
f'Plotting prediction probabilities for class 1 in test set in histogram. \n')
###############################################################################
#
# get more detailed stats and plots
#
###############################################################################
def detailed_analysis(self):
print_to_consol('Making a confusion matrix for test set classification outcomes')
matrix_stats = confusion_matrix_and_stats(self.y_test, self.y_pred,
'before_cal', self.directory)
logging.info(f'Detailed analysis of confusion matrix for test set. \n'
f'True positives: {matrix_stats["TP"]} \n'
f'True negatives: {matrix_stats["TN"]} \n'
f'False positives: {matrix_stats["FP"]} \n'
f'False negatives: {matrix_stats["FN"]} \n'
f'Classification accuracy: {matrix_stats["acc"]} \n'
f'Classification error: {matrix_stats["err"]} \n'
f'Sensitivity: {matrix_stats["sensitivity"]} \n'
f'Specificity: {matrix_stats["specificity"]} \n'
f'False positive rate: {matrix_stats["FP-rate"]} \n'
f'False negative rate: {matrix_stats["FN-rate"]} \n'
f'Precision: {matrix_stats["precision"]} \n'
f'F1-score: {matrix_stats["F1-score"]} \n')
print_to_consol(
'Plotting precision recall curve for test set class 1 probabilities')
logging.info(
f'Plotting precision recall curve for class 1 in test set probabilities. \n')
plot_precision_recall_vs_threshold(self.y_test, self.y_pred_proba_ones,
self.directory)
print_to_consol(
'Plotting ROC curve ad calculating AUC for test set class 1 probabilities')
logging.info(
f'Plotting ROC curve for class 1 in test set probabilities. \n')
self.fpr, self.tpr, self.thresholds = plot_roc_curve(self.y_test,
self.y_pred_proba_ones, self.directory)
AUC = round(roc_auc_score(self.y_test, self.y_pred_proba_ones) * 100, 2)
logging.info(
f'Calculating AUC for ROC curve for class 1 in test set probabilities: {AUC} \n')
print_to_consol('Make a radar plot for performance metrics')
radar_dict = {'Classification accuracy' : matrix_stats["acc"],
'Classification error' : matrix_stats["err"],
'Sensitivity' : matrix_stats["sensitivity"],
'Specificity' : matrix_stats["specificity"],
'False positive rate' : matrix_stats["FP-rate"],
'False negative rate' : matrix_stats["FN-rate"],
'Precision' : matrix_stats["precision"],
'F1-score' : matrix_stats["F1-score"],
'ROC AUC' : AUC}
plot_radar_chart(radar_dict, self.directory)
print_to_consol(
'Exploring probability thresholds, sensitivity, specificity for class 1')
threshold_dict = evaluate_threshold(self.tpr, self.fpr, self.thresholds)
logging.info(
f'Exploring different probability thresholds and sensitivity-specificity trade-offs. \n'
f'Threshold 0.2: {threshold_dict["0.2"]} \n'
f'Threshold 0.3: {threshold_dict["0.3"]} \n'
f'Threshold 0.4: {threshold_dict["0.4"]} \n'
f'Threshold 0.5: {threshold_dict["0.5"]} \n'
f'Threshold 0.6: {threshold_dict["0.6"]} \n'
f'Threshold 0.7: {threshold_dict["0.7"]} \n'
f'Threshold 0.8: {threshold_dict["0.8"]} \n'
f'Threshold 0.9: {threshold_dict["0.9"]} \n')
print_to_consol(
'Calibrating classifier and writing to disk; getting new accuracy')
self.calibrated_clf, clf_acc = calibrate_classifier(self.model, self.X_cal_scaled,
self.y_cal)
date = datetime.strftime(datetime.now(), '%Y%m%d_%H%M')
joblib.dump(self.calibrated_clf, os.path.join(self.directory,
'best_calibrated_predictor_'+date+'.pkl'))
logging.info(
f'Calibrated the best classifier with X_cal and y_cal and new accuracy {clf_acc}\n'
f'Writing file to disk disk in {self.directory} \n')
print_to_consol('Getting 95% confidence interval for calibrated classifier')
alpha, upper, lower = get_confidence_interval(self.X_train_scaled, self.y_train,
self.X_test_scaled, self.y_test,
self.calibrated_clf, self.directory,
self.bootiter, 'calibrated')
logging.info(f'{alpha}% confidence interval {upper}% and {lower}% \n'
f'for calibrated classifier. \n')
print_to_consol('Running prediction for calibrated classifier')
print_to_consol(
'Getting class predictions and probabilities for test set with calibrated classifier')
test_stats_cal, self.y_pred_cal, self.y_pred_proba_cal = testing_predict_stats(
self.calibrated_clf,
self.X_test_scaled, self.y_test)
logging.info(
f'Predicting on the test set with calibrated classifier. \n'
f'Storing classes for calibrated classifier in y_pred and probabilities in y_pred_proba. \n')
print_to_consol(
'Calculate prediction stats for y_pred and y_pred_proba of test set with calibrated classifier')
logging.info(f'Basic stats on the test set woth calibrated classifier. \n'
f'Prediction accuracy on the test set: {test_stats_cal["predict_acc"]} \n'
f'Class distributio in the test set: {test_stats_cal["class_distribution"]} \n'
f'Matthews Correlation Coefficient: {test_stats_cal["mcc"]} \n'
f'Average number of class 1 samples: {test_stats_cal["class_one"]} \n'
f'Average number of class 0 samples: {test_stats_cal["class_zero"]} \n'
f'Null accuracy: {test_stats_cal["null_acc"]} \n')
print_to_consol(
'Plotting histogram for class 1 prediction probabilities for test set')
#store the predicted probabilities for class 1 of test set
self.y_pred_proba_cal_ones = self.y_pred_proba_cal[:, 1]
plot_hist_pred_proba(self.y_pred_proba_cal_ones, self.directory)
logging.info(
f'Plotting prediction probabilities for class 1 in test set in histogram for calibrated classifier. \n')
print_to_consol(
'Making a confusion matrix for test set classification outcomes with calibrated classifier')
matrix_stats_cal = confusion_matrix_and_stats(self.y_test, self.y_pred_cal,
'after_cal', self.directory)
logging.info(f'Detailed analysis of confusion matrix for test set with calibrated classifier. \n'
f'True positives: {matrix_stats_cal["TP"]} \n'
f'True negatives: {matrix_stats_cal["TN"]} \n'
f'False positives: {matrix_stats_cal["FP"]} \n'
f'False negatives: {matrix_stats_cal["FN"]} \n'
f'Classification accuracy: {matrix_stats_cal["acc"]} \n'
f'Classification error: {matrix_stats_cal["err"]} \n'
f'Sensitivity: {matrix_stats_cal["sensitivity"]} \n'
f'Specificity: {matrix_stats_cal["specificity"]} \n'
f'False positive rate: {matrix_stats_cal["FP-rate"]} \n'
f'False negative rate: {matrix_stats_cal["FN-rate"]} \n'
f'Precision: {matrix_stats_cal["precision"]} \n'
f'F1-score: {matrix_stats_cal["F1-score"]} \n')
print_to_consol(
'Plotting precision recall curve for test set class 1 probabilities with calibrated classifier')
logging.info(
f'Plotting precision recall curve for class 1 in test set probabilities with calibrated classifier. \n')
plot_precision_recall_vs_threshold(self.y_test, self.y_pred_proba_cal_ones,
self.directory)
print_to_consol(
'Plotting ROC curve ad calculating AUC for test set class 1 probabilities with calibrated classifier')
logging.info(
f'Plotting ROC curve for class 1 in test set probabilities with calibrated classifier. \n')
self.fpr_cal, self.tpr_cal, self.thresholds_cal = plot_roc_curve(self.y_test,
self.y_pred_proba_cal_ones, self.directory)
AUC_cal = round(roc_auc_score(self.y_test, self.y_pred_proba_cal_ones) * 100, 2)
logging.info(
f'Calculating AUC for ROC curve for class 1 in test set probabilities with calibrated classifier: {AUC_cal} \n')
print_to_consol('Make a radar plot for performance metrics with calibrated classifier')
radar_dict_cal = {'Classification accuracy' : matrix_stats_cal["acc"],
'Classification error' : matrix_stats_cal["err"],
'Sensitivity' : matrix_stats_cal["sensitivity"],
'Specificity' : matrix_stats_cal["specificity"],
'False positive rate' : matrix_stats_cal["FP-rate"],
'False negative rate' : matrix_stats_cal["FN-rate"],
'Precision' : matrix_stats_cal["precision"],
'F1-score' : matrix_stats_cal["F1-score"],
'ROC AUC' : AUC_cal}
plot_radar_chart(radar_dict_cal, self.directory)
print_to_consol(
'Exploring probability thresholds, sensitivity, specificity for class 1 with calibrated classifier')
threshold_dict_cal = evaluate_threshold(self.tpr_cal, self.fpr_cal, self.thresholds_cal)
logging.info(
f'Exploring different probability thresholds and sensitivity-specificity trade-offs \n'
f'for calibrated classifier. \n'
f'Threshold 0.2: {threshold_dict_cal["0.2"]} \n'
f'Threshold 0.3: {threshold_dict_cal["0.3"]} \n'
f'Threshold 0.4: {threshold_dict_cal["0.4"]} \n'
f'Threshold 0.5: {threshold_dict_cal["0.5"]} \n'
f'Threshold 0.6: {threshold_dict_cal["0.6"]} \n'
f'Threshold 0.7: {threshold_dict_cal["0.7"]} \n'
f'Threshold 0.8: {threshold_dict_cal["0.8"]} \n'
f'Threshold 0.9: {threshold_dict_cal["0.9"]} \n')
end = datetime.now()
duration = end - self.start
logging.info(f'Training lasted for {duration} minutes \n')
logging.info(f'Training completed \n')
print_to_consol('Training completed')
def run(input_csv, output_dir, features, cycles, boot_iter, cv):
TreeRandSearch(input_csv, output_dir, features, cycles, boot_iter, cv)
def main():
'''defining the command line input to make it runable'''
parser = argparse.ArgumentParser(description='DecisionTree randomized search')
parser.add_argument(
'--input',
type=str,
dest='input',
default='',
help='The input CSV file')
parser.add_argument(
'--outdir',
type=str,
dest='outdir',
default='',
help='Specify output directory')
parser.add_argument(
'--num_features',
type=int,
dest='num_features',
default=10,
help='Number of features to look for')
parser.add_argument(
'--num_cycles',
type=int,
dest='num_cycles',
default=500,
help='Number of randomized search cycles')
parser.add_argument(
'--cv',
type=int,
dest='cv',
default=3,
help='Number of cross-validation repeats to use during training')
parser.add_argument(
'--boot_iter',
type=int,
dest='boot_iter',
default=1000,
help='Number of bootstrap cycles')
args = parser.parse_args()
if args.input == '':
parser.print_help()
exit(0)
run(args.input,
args.outdir,
args.num_features,
args.num_cycles,
args.cv,
args.boot_iter)
if __name__ == "__main__":
main()
|
[
"melanie.vollmar@diamond.ac.uk"
] |
melanie.vollmar@diamond.ac.uk
|
63e498a3b73548e8213de06933bc5cb6abb190f5
|
ba3556d1c76a04e6a978a7aa656bb24f48cf18d0
|
/no.0_liner_model.py
|
c62baa9eb14eed6b11c8e86b2d10cf6333c375fd
|
[] |
no_license
|
AugusterHub/Pytorch
|
f763d134b5a027123a6328915fbcd70f2379bcdf
|
a139288c95af672c029bfb277d45f926473c5d5e
|
refs/heads/master
| 2023-07-09T03:49:16.461324
| 2021-08-15T08:03:42
| 2021-08-15T08:03:42
| 396,273,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
""" 0 线性模型 """
import numpy as np
import matplotlib.pyplot as plt
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
def forward(x):
# 前向传播
return x * w
def loss(x, y):
# 定义loss
y_pred = forward(x)
return (y_pred - y) * (y_pred - y)
w_list = []
mse_list = []
for w in np.arange(0.0, 4.1, 0.1):
print('w=', w)
l_sum = 0
for x_val, y_val in zip(x_data, y_data):
y_pred_val = forward(x_val)
loss_val = loss(x_val, y_val)
l_sum += loss_val
print('\t', x_val, y_val, y_pred_val, loss_val)
print('MSE=', l_sum / 3)
w_list.append(w)
mse_list.append(l_sum / 3)
plt.plot(w_list, mse_list)
plt.ylabel('loss')
plt.xlabel('w')
plt.show()
|
[
"zhaipeng@topscomm.com"
] |
zhaipeng@topscomm.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.