blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
084af231761d48ccdf9950ed5fbab1a7a44f86ab | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/SjSzRZMR/YW_RZMR_SZSJ_150.py | 60204aa1a24e2cc122eacbff5931e061d9482cba | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,334 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import json
sys.path.append("/home/yhl2/workspace/xtp_test")
from xtp.api.xtp_test_case import xtp_test_case, Api, unittest
from service.ServiceConfig import *
from financing.service.mainService import ParmIni, serviceTest
from financing.service.QueryStkPriceQty import QueryStkPriceQty
from service.log import *
from financing.service.CaseParmInsertMysql import *
from mysql.QueryOrderErrorMsg import queryOrderErrorMsg
reload(sys)
sys.setdefaultencoding('utf-8')
class YW_RZMR_SZSJ_150(xtp_test_case):
# YW_RZMR_SZSJ_150 YW_RZMR_SZSJ_150 YW_RZMR_SZSJ_150 YW_RZMR_SZSJ_150
def test_YW_RZMR_SZSJ_150(self):
title = '对方最优转限价买——错误的价格(价格10亿)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '0', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_MARGIN'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_MARGIN_TRADE'],
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_REVERSE_BEST_LIMIT'],
'price': 1000000000,
'quantity': 200
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
if rs['用例测试结果']:
logger.warning('执行结果为{0}'.format(str(rs['用例测试结果'])))
else:
logger.warning('执行结果为{0},{1},{2}'.format(
str(rs['用例测试结果']), str(rs['用例错误源']),
json.dumps(rs['用例错误原因'], encoding='UTF-8', ensure_ascii=False)))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
b7cc2f316f58f8c410aa9035f28402e0155c148e | 774b249ab38b925741a28daf552dd71e706ccdfe | /mysite/settings.py | 32f0595591775a11fa64de72e7e14e3c787a6b1e | [] | no_license | LucieGal/mon-nouveau-blog | ab1de4b5abfac4c06baf9e123ec604ece7e3c95b | bacf1bb2325b02e4b43a0cc94d676e3f68ab3604 | refs/heads/master | 2023-01-04T21:10:17.665230 | 2020-10-18T17:36:16 | 2020-10-18T17:36:16 | 304,846,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,300 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.16.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lfs^$7uqn^^yh+0nlnx4gbwx#izrpimhg#7@h$*a8v^e@x5-)h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
LOGIN_REDIRECT_URL ='/'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'fr-fr'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | [
"galea.lucie@gmail.com"
] | galea.lucie@gmail.com |
f4a7e68daf70584049de24fcf0e3d0d1aa07c352 | 0ff99fc75b1f42811f72aa86f4b32d1e3f8b6b48 | /PR_inference/maskrcnn_benchmark/data/datasets/__init__.py | a9fdd2e7e4b7c500fbf9a66017cd0a5759e6581a | [
"MIT"
] | permissive | alwc/buildings2vec | f95a4468a0d5c21f2732c177474350e767d459f7 | bd5121e715bc28f6e88163f18407a762736d38d5 | refs/heads/master | 2022-04-25T05:26:50.053219 | 2020-04-28T20:49:49 | 2020-04-28T20:49:49 | 263,258,064 | 0 | 1 | null | 2020-05-12T06:57:24 | 2020-05-12T06:57:23 | null | UTF-8 | Python | false | false | 302 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .coco import COCODataset
from .voc import PascalVOCDataset
from .buildings import BuildingsDataset
from .concat_dataset import ConcatDataset
__all__ = ["COCODataset", "ConcatDataset", "PascalVOCDataset", "BuildingsDataset"]
| [
"ennauata@gmail.com"
] | ennauata@gmail.com |
4bac95965a18f0f6ed8d54cd5350c5180c7b129b | e972ebd59711a786780f4178f5b717fe245fd0a3 | /Python/11thoct-6.py | 08d9abdf498235c0a0ef9b876d6710b56b781b4e | [] | no_license | sai-varshith/make-pull-request | a46ed30803070fc28fd0b70a152779e53f98bcad | 8e1faa342fae423731090d271314cd5b9e23a9db | refs/heads/master | 2023-08-14T11:25:41.460714 | 2021-10-02T05:00:27 | 2021-10-02T05:00:27 | 412,333,564 | 3 | 0 | null | 2021-10-01T05:02:53 | 2021-10-01T05:02:52 | null | UTF-8 | Python | false | false | 314 | py |
def Fibonacci(n):
if n<=0:
print("Incorrect input")
# First Fibonacci number is 0
elif n==1:
return 0
# Second Fibonacci number is 1
elif n==2:
return 1
else:
return Fibonacci(n-1)+Fibonacci(n-2)
n=int(input("enter number of term"))
print(Fibonacci(n))
| [
"tushar2525252@gmail.com"
] | tushar2525252@gmail.com |
079db36b4b59f01d928e4d88533b54c93ff7df80 | e0ce33a90969747f57db754bc66072ae76ea3db2 | /user/admin.py | f03e58454c6f245b50de4fec01ac02d400f2e1be | [] | no_license | czemiello/biblioteka | f342bc998c215c3b34669dc3a2424dd4a79b4723 | 16a1860d31689f8c1cbdd5f719b047975f72ffc2 | refs/heads/master | 2021-01-11T12:01:20.177278 | 2017-01-21T10:32:26 | 2017-01-21T10:32:26 | 79,638,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from user.models import SiteUser
# Define an inline admin descriptor for Employee model
# which acts a bit like a singleton
class SiteUserInline(admin.StackedInline):
model = SiteUser
can_delete = False
verbose_name_plural = 'site_user'
# Define a new User admin
class UserAdmin(BaseUserAdmin):
inlines = (SiteUserInline, )
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| [
"aexol@aexol.com"
] | aexol@aexol.com |
75925e15b82a2124fd11f9ee64e75dec38efa6b2 | 3b7866902cba9c97998751a1bf3cac08d302031e | /Ch03/3-2.py | 35ba30f991a286d6ed4dc74a69123f1f651bfd7c | [] | no_license | tsbslteisrhy/Python | 7da07b8f5e3e10c86fd35522a3570788a9122adc | 95a6083c3d584c9df6b3bbf6c41ad33ca3419ad6 | refs/heads/master | 2022-12-22T03:05:57.696017 | 2020-09-21T08:31:03 | 2020-09-21T08:31:03 | 274,085,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | """
날짜 : 2020/06/23
이름 : 유효진
내용 : while문 실습하기 교재 p130
"""
# while
num = 1
while num < 5:
print('num이 5보다 작다.')
num = num + 1
# 1부터 10까지 합
total = 0
start = 1
while start <= 10:
total += start
start += 1
print('1부터 10까지 합 :', total)
# 리스트 합
scores = [80, 90, 92, 78, 62]
tot = i = 0
while i < len(scores):
tot += scores[i]
i += 1
print('리스트 scores 전체 합 :', tot)
# break
num = 1
while True:
if num % 5 == 0 and num % 7 == 0:
break
num += 1
print('5와 7의 최소공배수 :', num)
# continue
sum = 0
k = 0
while k <= 10:
k += 1
if k % 2 == 1:
continue
sum += k
print('1부터 10까지 짝수합 :', sum) | [
"tsbslteisrhy@naver.com"
] | tsbslteisrhy@naver.com |
bb219f4b5d7945d52abc467136cd1614c4bdbee4 | a2908938c39e1c233760eb3666089ad32f32e9dd | /ballGame.py | 9efc44ff435e8739160985091f05fcbc21c1acaf | [] | no_license | ParthSSharma/bouncyBall | 14223e80b4071d2f43581500cb025c0ccf399b28 | 7d7053ddb135dc5de51947881094c0c6c769fb79 | refs/heads/master | 2020-06-28T21:07:27.094894 | 2019-08-03T06:35:03 | 2019-08-03T06:35:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | import numpy as np
import cv2
from directKeys import click, queryMousePosition
import time
from PIL import ImageGrab
gameCoords = [0, 25, 500, 525]
exitGame = 0
def shootSomeBalls(screen):
global gameCoords, exitGame
state = 1
for y in range(gameCoords[1], gameCoords[3] - 25, 5):
for x in range(gameCoords[0], gameCoords[2], 5):
if(screen[y][x][0] == 255) and (screen[y][x][1] == 0):
click(x + 1, y + 26)
print("Clicked!")
state = 0
break
if(not state):
break
if(y > gameCoords[3] - 31):
exitGame = 1
print("Alright, let's move out!")
while True:
mousePos = queryMousePosition()
if mousePos.x <= 500:
break
print("GO, GO, GO!")
time.sleep(2)
while True:
mousePos = queryMousePosition()
if((gameCoords[0] < mousePos.x < gameCoords[2]) and (gameCoords[1] < mousePos.y < gameCoords[3])):
screen = np.array(ImageGrab.grab(bbox = gameCoords))
shootSomeBalls(screen)
if exitGame:
print("I'm done!")
break
| [
"53370354+ParthTatsuki@users.noreply.github.com"
] | 53370354+ParthTatsuki@users.noreply.github.com |
4fdd0d64d5e0afc3a45079ae2b9ccd03a8bc552d | a6dafde76d4cf3408e61828acb9ba9d6200363ef | /testdir/Students.py | 6dd66075fc942ddb20e1c1bc2351f0749d65998a | [] | no_license | gubanovpm/C01-019_Gubanov_python | b55a3cb2724141450cd5d9e9834b65e711b57426 | 71795854b22e3e713ee16de695e7b7b9aa20d877 | refs/heads/main | 2023-01-31T19:09:50.436433 | 2020-12-13T22:14:45 | 2020-12-13T22:14:45 | 313,232,096 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | learners_french = input().split()
pianists = input().split()
swimmers = input().split()
A = set()
B = set()
C = set()
for x in learners_french:
A.add(x)
for x in pianists:
B.add(x)
for x in swimmers:
C.add(x)
D = set()
D = (B.intersection(C)).difference(A)
result = []
for x in D:
result.append(x)
print(*result)
#example
#Savior tywok LimberG
#tywok BryanChen The_Hedgehog
#tywok BryanChen The_Hedgehog
#
#correct answer: BryanChen The_Hedgehog
#my answer:BryanChen The_Hedgehog
| [
"noreply@github.com"
] | gubanovpm.noreply@github.com |
4ce317d08e1c724135be938452d22fe612cfa189 | 763a9816046bcfec4958ae593cb7cb58696b9dc8 | /rules/rnammer.smk | 981f4e9347324dd8eb8b495a112fdfd35eeffca0 | [] | no_license | davidecarlson/snakemake-trinotate | ad877563668d173e288cdb1b3b4d55a6e95bd432 | 45f7e2f92de144c10be29b47c90e1335f2f6abd7 | refs/heads/master | 2023-02-17T03:38:44.069123 | 2021-01-15T20:29:26 | 2021-01-15T20:29:26 | 280,492,880 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | smk | RNAMMERTRANS=config['RNAMMERTRANS']
RNAMMER=config['RNAMMER']
rule rnammer:
input:
assembly=INPUT + "/{sample}_trinity.Trinity.fasta"
output:
results=RESULTS + "/rnammer/{sample}/{sample}_trinity.Trinity.fasta.rnammer.gff"
threads:
1
params:
wrapper=RNAMMERTRANS + "/RnammerTranscriptome.pl",
rnammer=RNAMMER + "/rnammer",
indir=RESULTS + "/rnammer/{sample}"
log:
RESULTS + "/logs/rnammer/{sample}.rnammer.log"
shell:
"cd {params.indir} && "
"{params.wrapper} --transcriptome {input.assembly} "
"--path_to_rnammer {params.rnammer} 2> {log} "
" && cd -"
| [
"david.carlson@stonybrook.edu"
] | david.carlson@stonybrook.edu |
33bb2b026299cda1ef4cc5c15c5b013e87879902 | 45e65a396dc324b84a8cf602baed86353bcec5ae | /fibonacci.py | afda3b104e154766c6f2ac70db100824b4d596c5 | [] | no_license | knappeddy55/cmpt120JarosiewitzKnapp | f603d2ac6cb18be7bff4185c9cc9174bff2890d2 | 7172bce717fcba352f1d5b55769ae690770b5f82 | refs/heads/master | 2021-05-09T15:12:22.273991 | 2018-02-27T03:55:47 | 2018-02-27T03:55:47 | 119,086,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # This program will find out the nth number in the fibonacci sequence obtained by the user
def fib(n):
if (n == 1):
return 1
if (n == 2):
return 1
return fib(n-1)+ fib(n-2)
def main():
print("This program computes the nth Fibonacci number where n is a value input by the user")
n = eval(input("Enter a value for n:"))
print(fib(n))
main()
| [
"edward.jarosiewitzknap1@marist.edu"
] | edward.jarosiewitzknap1@marist.edu |
956d9fa0e6e3eacbff9d77dab2253bd585bbbf87 | e8d2c1f5d3d508dd9a60238ae0a5db0b584a9454 | /modules/ReplayMemory.py | 7038ddf8d5e9809aeb4e86778b8ba5b2dfd20bad | [] | no_license | WeichangWang195/Improved_3MRL | bc667eb98e186e99b21fbf2d6dfbc163480d5442 | 32a13e9333d3f891f236c76c1447fed523060150 | refs/heads/master | 2023-06-24T04:06:25.578776 | 2021-07-25T21:23:06 | 2021-07-25T21:23:06 | 362,308,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | import random
from collections import namedtuple
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, item):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = item
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
if len(self.memory) < batch_size:
return self.memory
else:
return random.sample(self.memory, batch_size)
def clear_memory(self):
self.memory = []
self.position = 0
def return_all(self):
return self.memory
def return_len(self):
return len(self.memory)
def __len__(self):
return len(self.memory) | [
"wwang195@hotmail.com"
] | wwang195@hotmail.com |
14f37638e65febf11f9114def8c5f318cb8713d6 | 328630193505577e71fdd52aa2b04b5f98ac338b | /src/app.py | e2e866e986b36060d928689c692f69c55f252de4 | [] | no_license | Pawel-Matuszny/flask-crud-app | 310eaf426624168af7c0d40ce87139f9d22d8ef7 | dc2bd44c76a2b56f879d0a6a7d72dae3a86d65c0 | refs/heads/master | 2023-06-30T04:02:19.410600 | 2021-07-30T11:46:43 | 2021-07-30T11:46:43 | 391,044,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,237 | py | from flask import Flask, jsonify, request, make_response
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
import datetime
from functools import wraps
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn="https://233dada331c140fb971a819e5a05bd71@o933564.ingest.sentry.io/5882670",
integrations=[FlaskIntegration()],
traces_sample_rate=1.0
)
import os
env_username = os.environ['DB_USER']
env_password = os.environ['DB_PASS']
env_host = os.environ['DB_HOST']
env_db_name = os.environ['DB_NAME']
env_port = os.environ['DB_PORT']
app = Flask(__name__)
sql_url ='postgresql+psycopg2://%s:%s@%s:%s/%s?sslmode=prefer&sslrootcert=/app/docker/ssl/server-ca.pem&sslcert=/app/docker/ssl/client-cert.pem&sslkey=/app/docker/ssl/client-key.pem' % (env_username, env_password, env_host, env_port, env_db_name)
app.config['SECRET_KEY'] = os.environ['SECRET_KEY']
app.config['SQLALCHEMY_DATABASE_URI'] = sql_url
db = SQLAlchemy(app)
migrate = Migrate(app, db,compare_type=True)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
public_id = db.Column(db.String(50), unique=True)
name = db.Column(db.String(50))
password = db.Column(db.String(100))
admin = db.Column(db.Boolean)
class Articles(db.Model):
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.Text)
date_created = db.Column(db.DateTime, default=datetime.datetime.utcnow())
is_finished = db.Column(db.Boolean)
user_public_id = db.Column(db.String(50))
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return jsonify({'message' : 'Token is missing'}), 401
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = User.query.filter_by(public_id=data['public_id']).first()
except:
return jsonify({'message' : 'Token is invalid'}), 401
return f(current_user, *args, **kwargs)
return decorated
@app.route("/user", methods=['POST'])
@token_required
def create_user(current_user):
if not current_user.admin:
return jsonify({'message' : 'Cannot perform that function'})
data = request.get_json()
hashed_password = generate_password_hash(data['password'], method='sha256')
new_user = User(public_id=str(uuid.uuid4()), name=data['name'], password=hashed_password, admin=False)
db.session.add(new_user)
db.session.commit()
return jsonify({'message' : 'New user created'})
@app.route('/user/<public_id>', methods=['PUT'])
@token_required
def promote_user(public_id, current_user):
if not current_user.admin:
return jsonify({'message' : 'Cannot perform that function'})
user = User.query.filter_by(public_id=public_id).first()
if not user:
return jsonify({'message' : 'No user found'})
user.admin = True
db.session.commit()
return jsonify({'mesage' : 'The user has been promoted'})
@app.route('/user/<public_id>', methods=['GET'])
@token_required
def get_one_user(public_id, current_user):
if not current_user.admin:
return jsonify({'message' : 'Cannot perform that function'})
user = User.query.filter_by(public_id=public_id).first()
if not user:
return jsonify({'message' : 'No user found'})
user_data = {}
user_data['public_id'] = user.public_id
user_data['name'] = user.name
user_data['password'] = user.password
user_data['admin'] = user.admin
return jsonify({'user' : user_data})
@app.route('/user', methods=['GET'])
@token_required
def get_all_users(current_user):
if not current_user.admin:
return jsonify({'message' : 'Cannot perform that function'})
users = User.query.all()
output = []
for user in users:
user_data = {}
user_data['public_id'] = user.public_id
user_data['name'] = user.name
user_data['password'] = user.password
user_data['admin'] = user.admin
output.append(user_data)
return jsonify({'users' : output})
@app.route('/user/<public_id>', methods=['DELETE'])
@token_required
def delete_user(public_id, current_user):
if not current_user.admin:
return jsonify({'message' : 'Cannot perform that function'})
user = User.query.filter_by(public_id=public_id).first()
if not user:
return jsonify({'message' : 'No user found'})
db.session.delete(user)
db.session.commit()
return jsonify({'mesage' : 'The user has been deleted'})
@app.route('/article', methods=['GET'])
@token_required
def get_all_articles(current_user):
articles = Articles.query.filter_by(user_public_id=current_user.public_id).all()
output = []
for article in articles:
article_data = {}
article_data['id'] = article.id
article_data['text'] = article.text
article_data['is_finished'] = article.is_finished
article_data['date_created'] = article.date_created
output.append(article_data)
return jsonify({'articles' : output})
@app.route('/article/<article_id>', methods=['GET'])
@token_required
def get_one_article(current_user, article_id):
article = Articles.query.filter_by(id=article_id, user_public_id=current_user.public_id).first()
if not article:
return jsonify({'message' : 'No article found'})
article_data = {}
article_data['id'] = article.id
article_data['text'] = article.text
article_data['is_finished'] = article.is_finished
article_data['date_created'] = article.date_created
return jsonify(article_data)
@app.route('/article', methods=['POST'])
@token_required
def create_article(current_user):
data = request.get_json()
new_article = Articles(text=data['text'], is_finished=False, user_public_id=current_user.public_id)
db.session.add(new_article)
db.session.commit()
return jsonify({'message' : "Article created"})
@app.route('/article/<article_id>', methods=['PUT'])
@token_required
def finish_article(current_user, article_id):
article = Articles.query.filter_by(id=article_id, user_public_id=current_user.public_id).first()
if not article:
return jsonify({'message' : 'No article found'})
article.is_finished = True
db.session.commit()
return jsonify({'message' : 'Article has been finished'})
@app.route('/article/<article_id>', methods=['DELETE'])
@token_required
def delete_article(current_user, article_id):
article = Articles.query.filter_by(id=article_id, user_public_id=current_user.public_id).first()
if not article:
return jsonify({'message' : 'No article found'})
db.session.delete(article)
db.session.commit()
return jsonify({'message' : 'Article has been deleted'})
@app.route('/login')
def login():
auth = request.authorization
if not auth or not auth.username or not auth.password:
return make_response('Could not verify', 401, {'WWW-Authenticate' : 'Basic realm="Login required!"'})
user = User.query.filter_by(name=auth.username).first()
if not user:
return make_response('Could not verify', 401, {'WWW-Authenticate' : 'Basic realm="Login required!"'})
if check_password_hash(user.password, auth.password):
token = jwt.encode({'public_id' : user.public_id, 'exp' : datetime.datetime.utcnow() + datetime.timedelta(minutes=int(os.environ['TOKEN_EXPIRATION_TIME_IN_MINUTES']))}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
return make_response('Could not verify', 401, {'WWW-Authenticate' : 'Basic realm="Login required!"'})
@app.route('/status')
def health_check():
try:
conn = db.engine.connect()
conn.close()
except Exception as e:
return jsonify({'database_status' : 'offline'})
return jsonify({'database_status' : 'online'})
@app.route('/http-health')
def http_health():
return "OK"
if __name__ == "__main__":
app.run() | [
"pm@katarti.io"
] | pm@katarti.io |
65001c22614b411a3aa525a2a14d83e9c74fd77f | 257192c02c6967ff08b03c124764532273f5198e | /服务器脚本(自己放服务器去)/scripts/cell/NPC.py | e51cde5e5781b50f2ae31eb19784cd1266b8c79a | [] | no_license | flodoo/kbengine_warring_upgrade | a17c8affea70a4b1718358adc7ca0574f472f01c | df6e6ede3048d509c9af0ea9f3c3974cc1908daa | refs/heads/master | 2020-05-19T20:55:46.895144 | 2015-04-10T07:39:31 | 2015-04-10T07:39:31 | 33,582,982 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # -*- coding: utf-8 -*-
import random
import math
import time
import KBEngine
from KBEDebug import *
from interfaces.NPCObject import NPCObject
from interfaces.Motion import Motion
class NPC(KBEngine.Entity, NPCObject, Motion):
def __init__(self):
KBEngine.Entity.__init__(self)
NPCObject.__init__(self)
Motion.__init__(self)
def onDestroy(self):
"""
entity销毁
"""
NPCObject.onDestroy(self)
def isNPC(self):
"""
virtual method.
"""
return True
NPC._timermap = {}
NPC._timermap.update(NPCObject._timermap)
NPC._timermap.update(Motion._timermap) | [
"liquidx@163.com"
] | liquidx@163.com |
b79bfa210ba5980a546701abc154cefdccc86ce5 | b607fd95712f9d6be753052db2449174b60de5e9 | /docker/features/installer/dependency_check.py | 6ea4ce53aef232d98e6d7b54377f97cb246d866b | [
"Apache-2.0"
] | permissive | krdpk17/twitter-neo4j | bb84d7bc3210587dd5df125958f8b96bc28db1ac | bb7e62743651082726db373d118dcc90cce48532 | refs/heads/master | 2021-05-19T08:24:03.772900 | 2021-04-26T07:40:06 | 2021-04-26T07:40:06 | 251,603,624 | 1 | 1 | Apache-2.0 | 2020-03-31T12:59:17 | 2020-03-31T12:59:16 | null | UTF-8 | Python | false | false | 311 | py | import pdb
import pip3
print("checking dependency and performing installation")
def install(package):
if hasattr(pip, 'main'):
pip.main(['install', package])
else:
pip._internal.main(['install', package])
install('python-dotenv')
install('oauth2')
install('py2neo')
install('retrying')
| [
"62544105+krdpk1717@users.noreply.github.com"
] | 62544105+krdpk1717@users.noreply.github.com |
64969951f65d3112409366d438d7f5d161123c2f | 1bd2d7189d6ec4b6a1289e136098e947f7fd5ca3 | /luisapi/asgi.py | fc869631afe74d2993b99aee7aa39a6ba8db07a3 | [] | no_license | Piotr1103/LUIS-API | 5590357607f903575a89a3eef4139f6b658109f2 | a16c9b6188b7ea86b1ad10bf8c69cdae5777e56b | refs/heads/main | 2023-08-01T17:47:19.157634 | 2021-09-27T11:09:15 | 2021-09-27T11:09:15 | 410,850,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
ASGI config for luisapi project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'luisapi.settings')
application = get_asgi_application()
| [
"manjudamin@gmail.com"
] | manjudamin@gmail.com |
7ebaf958ee53b6084d2a254324bd9f469cfd76b0 | 3e79bfd23dd2dbfe3b057ba3a879b4a99ba7f66e | /Pizza Crust.py | 26c764309d50948a0508a59bf32e8db9c741984a | [] | no_license | parhamgh2020/kattis | 1b701d1f768e2337d2477ea12be56399b3ed1003 | 40a0969e347be3df9ab23cbb3fd8272b40ffa16f | refs/heads/main | 2023-03-19T22:51:54.643201 | 2021-03-14T09:30:59 | 2021-03-14T09:30:59 | 347,017,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from math import pi
r, c = map(int, input().split())
percentage = ((pi * (r - c) ** 2) / (pi * r ** 2)) * 100
print(percentage)
| [
"parham.ghashghaee@gmail.com"
] | parham.ghashghaee@gmail.com |
b9e6b4a89933c10efc3d61dd4e0bd03bc076a9b1 | d1cb5b13bf134f51cb03a292870f1486a4aab840 | /abc/160/e_.py | f64501f27f3b75a5f6d6c8965a5aa3a2119729a2 | [] | no_license | tanimutomo/atcoder | 73717646cff68b6bc3b42a0e1e3e48a1d61c2cb6 | cc270a2794aa351eedc78707db379f4f803439d6 | refs/heads/master | 2021-06-28T20:27:37.648870 | 2020-11-29T12:13:08 | 2020-11-29T12:13:08 | 193,248,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | x, y, a, b, c = map(int, input().split())
p = list(map(int, input().split()))
q = list(map(int, input().split()))
r = list(map(int, input().split()))
p = sorted(p, reverse=True)[:x]
q = sorted(q, reverse=True)[:y]
r = sorted(r+p+q, reverse=True)[:x+y]
print(sum(r)) | [
"tanimutomo@gmail.com"
] | tanimutomo@gmail.com |
92e7c14d496c1f0fc8a4ad15849cd49159029b26 | de6207e901110ff060dec96748f2e88855388f6a | /crawler/venv/bin/pip | 93f0d5d3d77dc01a7fbfe016aa9bfb4df21af2ca | [] | no_license | arnobmonir/Local_Net_Tools | d618b77226c30d09176975f8d11b45c6e75573a0 | 12e34f6834e756081d63b476facead2d32274904 | refs/heads/master | 2020-09-11T00:11:41.039230 | 2019-11-14T12:15:12 | 2019-11-14T12:15:12 | 221,875,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | #!/root/PycharmProjects/crawler/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"aarnob.monir@gmail.com"
] | aarnob.monir@gmail.com | |
33d8a3f172e02a3326ddf12f3ac7502ff17d2f00 | 557c70397b350aa85933180dc3648c9be5c9db39 | /interpretNN/sequence_attribution.py | 05474583f2639e4d9a558940661f733a78f4a328 | [] | no_license | DivyanshiSrivastava/chromNN | 68e41f1489c4fcefe576c7c7e80eb6ebd600a91e | 32541b3d6cfe00972116b63dfe1f82cb099c3478 | refs/heads/master | 2020-04-19T01:33:16.879436 | 2019-01-28T01:13:27 | 2019-01-28T01:13:27 | 167,875,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,047 | py | import numpy as np
from keras import backend as K
# The GradientSaliency class is modified from:
# https://github.com/experiencor/deep-viz-keras/blob/master/saliency.py
class GradientSaliency(object):
""" Compute saliency masks with gradient."""
def __init__(self, model, output_index=0):
# Define the function to compute the gradient
input_tensors = [model.layers[0].input, # placeholder for input image tensor
model.layers[4].input, # chromatin input
K.learning_phase(), # placeholder for mode (train or test) tense
]
# Taking the gradient w.r.t the sequence. (In the presence of chromatin? Should it change? I think so)
gradients = model.optimizer.get_gradients(model.output[0][output_index], model.layers[0].input)
self.compute_gradients = K.function(inputs=input_tensors, outputs=gradients)
def get_mask(self, input_sequence, chrom_value):
""" Returns a vanilla gradient mask """
# Execute the function to compute the gradient
x_value = np.expand_dims(input_sequence, axis=0) # makes it a 1,500,4 image vector.
c_value = chrom_value
gradients = self.compute_gradients([x_value, c_value, 0])[0][0]
return gradients
class IntegratedGradients(GradientSaliency):
""" Implement the integrated gradients method"""
def GetMask(self, input_sequence, chrom_value, input_baseline=None, nsamples=10):
"""Returns an integrated gradients mask"""
if input_baseline is None:
input_baseline = np.zeros_like(input_sequence)
assert input_baseline.shape == input_sequence.shape
input_diff = input_sequence - input_baseline
# define a holding vector for the the input sequence.
total_gradients = np.zeros_like(input_sequence)
for alpha in np.linspace(0, 1, nsamples):
input_step_sequence = input_baseline + alpha * input_diff
input_step_sequence = input_step_sequence.astype('float64')
step_gradients = super(IntegratedGradients, self).get_mask(input_step_sequence, chrom_value)
np.add(total_gradients, step_gradients, out=total_gradients, casting='unsafe')
return total_gradients * input_diff
def random_baseline_attribution(gs, boundX, boundC):
system_attribution = []
for idx in range(boundX.shape[0]):
print idx
baseline = np.zeros_like(boundX) + 0.25
grads = gs.GetMask(boundX[idx], boundC[idx].reshape(-1, 130),
input_baseline=baseline[0]) # the baseline[0] cause we go one seq at a time.
attribution = np.sum(grads, axis=1) # this should be a (500,) vector.
system_attribution.append(attribution)
return np.array(system_attribution)
def get_sequence_attribution(datapath, model, input_data):
boundX, boundC = input_data
grad_sal = IntegratedGradients(model)
rb = random_baseline_attribution(grad_sal, boundX, boundC)
return rb
| [
"s.divyanshi91@gmail.com"
] | s.divyanshi91@gmail.com |
346f9be15be33ed6d7a104aea444b4a2dc9ba754 | dccd1058e723b6617148824dc0243dbec4c9bd48 | /atcoder/abc083/c.py | 3bda8a9bf4626f9a95318a39ab49b5aaae251926 | [] | no_license | imulan/procon | 488e49de3bcbab36c624290cf9e370abfc8735bf | 2a86f47614fe0c34e403ffb35108705522785092 | refs/heads/master | 2021-05-22T09:24:19.691191 | 2021-01-02T14:27:13 | 2021-01-02T14:27:13 | 46,834,567 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | x,y = map(int,input().split())
ans = 0
while x<=y:
ans += 1
x *= 2
print(ans)
| [
"k0223.teru@gmail.com"
] | k0223.teru@gmail.com |
fdbfd91b1ecd4517cbd0450d6ef0269a24e7a240 | 579526149437ef74cd870bfbd96a06d07b1cf25e | /formula/migrations/0001_initial.py | 5bf61c8ea28d2aedf671971d4fdddcbd5b59b074 | [] | no_license | khalilashkar/scm | 22461b896c585df34c319df74688d673570ff6d6 | 31a37450c9e55c22416b1123945c38f85706b243 | refs/heads/master | 2022-12-03T15:22:22.693671 | 2020-08-17T02:13:54 | 2020-08-17T02:13:54 | 288,055,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,611 | py | # Generated by Django 3.0.4 on 2020-06-11 14:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
import formula.validators
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Registration',
fields=[
('Application_number', models.AutoField(default=None, primary_key=True, serialize=False, unique=True)),
('Application_date', models.DateField(auto_now_add=True, null=True, verbose_name='application_date ')),
('gender', models.CharField(choices=[('0', 'Female'), ('1', 'Male'), ('2', 'prefer not to say')], max_length=30, null=True, verbose_name='الجنس')),
('first_name', models.CharField(max_length=30, verbose_name='الاسم الاول')),
('nick_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='الاسم المستعار')),
('last_name', models.CharField(max_length=255, null=True, verbose_name='الاسم الاخير')),
('medical_state_inf', models.CharField(blank=True, choices=[('0', 'نعم'), ('1', 'لا')], max_length=255, null=True, verbose_name='هل لديك وضع صحي خاص ')),
('medical_note_inf', models.CharField(blank=True, max_length=255, null=True, verbose_name='شرح مختصر لحالتك الصحية ')),
('birth_date', models.DateField(verbose_name='تاريخ الميلاد ')),
('birth_place', models.CharField(max_length=255, verbose_name='مكان الولادة')),
('country', django_countries.fields.CountryField(max_length=255, verbose_name=' الدولة')),
('city', models.CharField(max_length=255, null=True, verbose_name='المدينة')),
('country_rec', django_countries.fields.CountryField(default='some_value', max_length=255, verbose_name='الدولة الحالية')),
('current_city', models.CharField(default='', max_length=255, verbose_name='المدينة الحالية?')),
('mail', models.EmailField(max_length=255, verbose_name='البريدالالكتروني')),
('phone', models.CharField(blank=True, max_length=255, null=True, verbose_name='الهاتف ')),
('educatton_level', models.CharField(blank=True, choices=[('0', 'الثانوي'), ('1', 'الجامعي'), ('2', 'ما بعد جامعي'), ('3', 'مادون الثانوي ')], max_length=255, null=True, verbose_name='التحصيل العل ')),
('job', models.CharField(blank=True, max_length=255, null=True, verbose_name='المهنة ')),
('start_date', models.DateField(null=True, verbose_name='تاريخ بدء العمل')),
('document_1', models.FileField(blank=True, null=True, upload_to='documents/', validators=[formula.validators.validate_file_extensison], verbose_name='Document_1')),
('document_2', models.FileField(blank=True, null=True, upload_to='documents/', validators=[formula.validators.validate_file_extensison], verbose_name='Document_2')),
('current_org_comp', models.CharField(blank=True, max_length=255, null=True, verbose_name='Current employer ')),
('Previous_employers', models.CharField(blank=True, max_length=255, null=True, verbose_name='Previous employers ')),
('org_memeber', models.CharField(blank=True, max_length=255, null=True, verbose_name='هل كنت عضوا في أحد المجمعات الصحفية')),
('paid_job', models.CharField(blank=True, choices=[('0', 'نعم'), ('1', 'لا')], max_length=255, null=True, verbose_name='هل عملت بإجر مسبقاُ')),
('name_of_company_paid', models.CharField(blank=True, max_length=255, null=True, verbose_name='اذكر اسم الجهة او المنظمة ')),
('details', models.CharField(blank=True, max_length=255, null=True, verbose_name='يرجى ذكر التفاصيل ')),
('training_media', models.CharField(blank=True, max_length=255, null=True, verbose_name='هل سبق أن شاركت بأي ورشات أو دورات لتطوير الخبرات الإعلامية أو الحقوقية؟')),
('details_traning_media', models.CharField(blank=True, max_length=255, null=True, verbose_name='يرجى ذكر التفاصيل ')),
('violations', models.CharField(blank=True, max_length=255, null=True, verbose_name=' هل سبق أن تعرضت لأي نوع من أنواع الانتهاكات؟')),
('kind_of_violation', models.CharField(blank=True, max_length=255, null=True, verbose_name=' لأي نوع من أنواع الانتهاكات؟ ')),
('date_of_violations', models.DateField(blank=True, null=True, verbose_name='ما تاريخ حدوث الانتهاك؟ ')),
('relation_with_org', models.CharField(blank=True, max_length=255, null=True, verbose_name='هل لديك أي ارتباطات تنظيمية مع أي فصيل عسكري أو ديني أو تجمع سياسي أو ديني؟')),
('summary_of_relations', models.TextField(blank=True, null=True, verbose_name='الرجاء الإجابة مع ذكر التفاصيل')),
('articls_link_1', models.CharField(blank=True, max_length=200, null=True, verbose_name='روابط إعلامية منشورة باسمك الصريح أو المستعار')),
('articls_link_2', models.CharField(blank=True, max_length=200, null=True, verbose_name='روابط إعلامية منشورة باسمك الصريح أو المستعار')),
('summary_of_your_state', models.TextField(blank=True, null=True, verbose_name='اكتب ملخص عن حالتك')),
('type_of_dmande', models.CharField(blank=True, choices=[('0', 'دعم معيشي'), ('1', 'إيجاد فرصة عمل'), ('2', 'خروج آمن'), ('3', 'دعم ملف اللجوء - تأشيرات خروج'), ('4', 'دعم تقني وبطاقات صحفية'), ('5', 'دعم طبي'), ('6', 'غير ذلك')], max_length=30, null=True, verbose_name='طبيعة المساعدة المطلوبة')),
('resaon_for_help', models.TextField(blank=True, null=True, verbose_name='رجاء اكتب ملخص لسبب طلب المساعدة ')),
('list_of_tools', models.CharField(blank=True, max_length=255, null=True, verbose_name='إن كان الدعم المطلوب متعلق بمستلزمات خاصة بالعمل، نرجو تزويدنا بقائمة الأسعار')),
('last_job_salary', models.CharField(blank=True, max_length=255, null=True, verbose_name='يرجى ذكر آخر عمل تقاضيت منه أجر وقيمة الأجر')),
('reason_stopping_job', models.CharField(blank=True, max_length=255, null=True, verbose_name='لماذا لا تستطيع أن تعمل بأجر في الوقت الحالي؟')),
('family_state', models.CharField(choices=[('0', 'متزوج/ة'), ('1', 'عازب/ة')], max_length=30, null=True, verbose_name='الوضع العائلي')),
('have_kids', models.CharField(blank=True, choices=[('0', 'نعم'), ('1', 'لا')], max_length=255, null=True, verbose_name='هل لديك أولاد؟')),
('number_kids', models.CharField(blank=True, max_length=255, null=True, verbose_name='كم عدد أفراد العائلة')),
('summary_of_recsituation', models.TextField(blank=True, null=True, verbose_name='يرجى شرح الوضع الحالي')),
('other_org_demand', models.CharField(blank=True, max_length=255, null=True, verbose_name='هل تقدمت بطلب مساعدة لأي منظمة سابقاً؟ ')),
('name_org', models.CharField(blank=True, max_length=255, null=True, verbose_name='ما هي المنظمة أو المنظمات')),
('date_of_demand_org', models.DateField(blank=True, null=True, verbose_name='نرجو معرفة تاريخ تقديم الطلب ؟ ')),
('tyoe_of_demand_other_org', models.CharField(blank=True, max_length=255, null=True, verbose_name='ما هي طبيعة الطلب؟')),
('result_of_demand_other_org', models.CharField(blank=True, max_length=255, null=True, verbose_name='ما هي نتيجة الطلب؟')),
('recmond_1', models.CharField(blank=True, max_length=255, null=True, verbose_name='مصدر1 للتثبت من عملك')),
('phon_1', models.CharField(blank=True, max_length=255, null=True, verbose_name='رقم هاتف للمصدر الاول ')),
('email_1', models.EmailField(blank=True, max_length=255, null=True, verbose_name='بريد الكتروني للمصدر الاول')),
('recmond_2', models.CharField(blank=True, max_length=255, null=True, verbose_name='مصدر 2 للتثبت من عملك')),
('phon_2', models.CharField(blank=True, max_length=255, null=True, verbose_name='رقم هاتف للمصدر الثاني ')),
('email_2', models.EmailField(blank=True, max_length=255, null=True, verbose_name='بريد الكتروني للمصدر الثاني')),
('state_step', models.CharField(blank=True, choices=[('0', 'لم يتم البدء بالمعالجة'), ('1', 'الخطوة الاولى'), ('2', 'الخطوة الثانية'), ('3', 'الخطوة الثالثة'), ('4', 'تمت المعالجة'), ('5', 'تحميل ملفات مرفقة')], max_length=30, null=True, verbose_name='المعالجة')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Support_descrption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('suppo', models.CharField(blank=True, max_length=255, null=True, verbose_name='اسم الجهة الداعمة ')),
('suppo_description', models.CharField(blank=True, max_length=255, null=True, verbose_name='وصف حول الجهة ')),
],
),
migrations.CreateModel(
name='SupportOrgchild',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('support1', models.CharField(blank=True, choices=[('0', 'مراسلون بلا حدود | RSF'), ('1', 'فري بريس أنليميتيد | FPU'), ('2', 'مؤسسة الإعلام النسوي الدولية | IWMF'), ('3', 'مؤسسة كاليتي | Kality Foundation'), ('4', 'لايف لاين | Lifeline')], max_length=255, null=True, verbose_name='الجهة الداعمة ')),
('cost', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='التكلفة مقدرة باليورو')),
('support', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='formula.Registration')),
],
),
migrations.CreateModel(
name='SupportOrg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_of_response', models.DateField(verbose_name='تاريخ الإحالة ')),
('result_of_org', models.CharField(choices=[('0', 'مقبول'), ('1', 'مرفوض')], default=False, max_length=255, null=True, verbose_name='النتيجة')),
('date_of_result', models.DateField(verbose_name='تاريخ الإحالة ')),
('support', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='formula.Registration')),
],
),
migrations.AddField(
model_name='registration',
name='support_org_state_1',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='formula.Support_descrption'),
),
migrations.CreateModel(
name='Checking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_of_updat', models.DateField(auto_now=True, null=True, verbose_name='تاريخ اخر تحديث ')),
('tiitle_of_state', models.CharField(blank=True, max_length=255, null=True, verbose_name='عنوان الحالة')),
('urg_mark', models.CharField(blank=True, max_length=255, null=True, verbose_name='درجة الطوارئ ')),
('confirm_stat', models.CharField(blank=True, max_length=255, null=True, verbose_name='نوع الحالة ')),
('verfication_method', models.CharField(blank=True, max_length=255, null=True, verbose_name='آلية التحقق ')),
('total_of_note', models.CharField(blank=True, default=0, max_length=255, null=True, verbose_name=' مجموع النقاط')),
('family_state_1', models.CharField(blank=True, max_length=30, null=True, verbose_name='الوضع العائلي')),
('medical_state', models.CharField(blank=True, max_length=30, null=True, verbose_name='-الوضع الطبي ')),
('medical_state_note', models.CharField(blank=True, max_length=30, null=True, verbose_name='تقيم الوضع الصحي')),
('educatton_level_1', models.CharField(blank=True, max_length=255, null=True, verbose_name='التحصيل العلمي')),
('cruntly_adre', models.CharField(blank=True, max_length=30, null=True, verbose_name='\tتقيم خطورة مكان الإقامة الحالي ')),
('traning_partcipate', models.CharField(blank=True, max_length=30, null=True, verbose_name='\tالمشاركة بورشات سابقة')),
('member_in_journal', models.CharField(blank=True, max_length=30, null=True, verbose_name='هل هو عضو في مجمع صحفي')),
('hase_violants', models.CharField(blank=True, default=1, max_length=30, null=True, verbose_name='تعرّض مُقدّم الطلب لأيّ انتهاكات ')),
('is_related_with_media', models.CharField(blank=True, choices=[('0', 'نعم'), ('1', 'لا')], max_length=255, null=True, verbose_name='هل طلب الدعم مرتبط بالعمل الصحفي')),
('number_of_year_exprince', models.CharField(blank=True, choices=[('0', 'أقل من عامين'), ('1', 'عامين إلى خمسة'), ('2', 'أكثر من خمسة ')], max_length=255, null=True, verbose_name='عدد سنوات الخبرة في العمل')),
('note_of_year_experince', models.CharField(blank=True, default=0, max_length=255, null=True, verbose_name='تقيم عدد سنوات الخبرة في العمل')),
('note_paid_job', models.CharField(blank=True, max_length=255, null=True, verbose_name='تقيم العمل بإجر')),
('manitry_realtion', models.CharField(blank=True, choices=[('0', 'نعم'), ('1', 'لا')], max_length=255, null=True, verbose_name='هل لديه ارتباطات عسكرية')),
('note_manitry_realtion', models.CharField(blank=True, max_length=255, null=True, verbose_name='ملاحظة حول لارتباطات العسكرية')),
('is_thier_info_correct', models.CharField(blank=True, choices=[('0', 'نعم'), ('1', 'لا')], max_length=255, null=True, verbose_name='هل قدم معلومات صحيحة ضمن طلب الدعم ')),
('is_thier_heate_speech', models.CharField(blank=True, choices=[('0', 'نعم'), ('1', 'لا')], max_length=255, null=True, verbose_name='-\tهل يُحرّض على العنف والكراهية؟ أو الإرهاب أو الطائفيّة؟ ')),
('is_thier_heate_speech_note', models.CharField(blank=True, max_length=255, null=True, verbose_name='شرح لتقيم التحريض ')),
('type_heate_speech', models.CharField(blank=True, choices=[('0', 'نعم'), ('1', 'لا')], max_length=255, null=True, verbose_name='هل هو خطاب تميّزي على أساس العرق أو الدين أو أو النوع الجندري أو الطائفة أو القوميّة؟ ')),
('note_type_heate_speech', models.CharField(blank=True, max_length=255, null=True, verbose_name='هل هو خطاب تميّزي على أساس العرق أو الدين أو أو النوع الجندري أو الطائفة أو القوميّة؟ ')),
('rspect_legal_coppyright', models.CharField(blank=True, choices=[('0', 'نعم'), ('1', 'لا')], max_length=255, null=True, verbose_name='-هل يُراعي الحق في الخصوصيّة والصور؟ ')),
('note_rspect_legal_coppyright', models.CharField(blank=True, max_length=255, null=True, verbose_name='شرح لمراعاة خصوصية الصور ')),
('mark_rspect_legal_coppyright', models.CharField(blank=True, default=0, max_length=255, null=True, verbose_name='-تقيم الحق في الخصوصيّة والصور؟')),
('rspect_coppyright', models.CharField(blank=True, choices=[('0', 'نعم'), ('1', 'لا')], max_length=255, null=True, verbose_name='هل يُراعي حقوق الملكية الفكرية؟ ')),
('note_rspect_coppyright', models.CharField(blank=True, max_length=255, null=True, verbose_name='شرح لاحترام حقوق النشر ')),
('mark_rspect_coppyright', models.CharField(blank=True, default=0, max_length=255, null=True, verbose_name=' تقيم شرح لاحترام حقوق النشر ')),
('rspect_right_human', models.CharField(blank=True, choices=[('0', 'نعم'), ('1', 'لا')], max_length=255, null=True, verbose_name='هل يُراعي شرعة حقوق الإنسان؟ ')),
('note_rspect_right_human', models.CharField(blank=True, max_length=255, null=True, verbose_name='شرح لتقيم احترام حقوق الانسان ')),
('mark_rspect_right_human', models.CharField(blank=True, default=0, max_length=255, null=True, verbose_name='تقيم احترام حقوق الانسان ')),
('prof_media', models.CharField(blank=True, choices=[('0', '0'), ('1', '1')], max_length=255, null=True, verbose_name='المهنية في صياغة الاخبار')),
('first_recmond_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='اسم المعرف الاول')),
('here_speech_1', models.CharField(blank=True, choices=[('0', 'التنيجة ايجابية'), ('1', 'النتيجة سلبية')], max_length=255, null=True, verbose_name='شهادة المعرف')),
('recmond_1att', models.CharField(blank=True, max_length=255, null=True, verbose_name='اثبات شهادة المعرف الأول')),
('second_recmond_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='اسم المعرف الثاني')),
('here_speech_2', models.CharField(blank=True, choices=[('0', 'التنيجة ايجابية'), ('1', 'النتيجة سلبية')], max_length=255, null=True, verbose_name='شهادة المعرف')),
('recmond_2_att', models.CharField(blank=True, max_length=255, null=True, verbose_name='اثبات شهادة المعرف الثاني')),
('check_responsabl_group_opnion', models.TextField(blank=True, max_length=255, null=True, verbose_name='تحقق مسؤول التواصل أو المتعاونين تسجيل المعلومات الواردة حول طالب الدعم ')),
('date_of_verficaton', models.DateField(blank=True, null=True, verbose_name='تاريخ الانتهاء من التحقق ')),
('result_of_verfication', models.CharField(blank=True, choices=[('0', 'مقبولة'), ('1', 'مرفوضة'), ('2', 'بحث عن مانحين')], max_length=255, null=True, verbose_name='نتيجة التحقق ')),
('sumary_of_study', models.TextField(blank=True, max_length=255, null=True, verbose_name='ملاحظات إضافية تتضمن أية ملاحظات حول الحالة ')),
('registration', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='formula.Registration')),
],
),
migrations.CreateModel(
name='CaseFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(blank=True, null=True, upload_to='documents/')),
('descrpiton', models.CharField(max_length=255, null=True, verbose_name='وصف الملف المرفق ')),
('case', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='formula.Registration')),
],
),
]
| [
"jh.programmer@scm.bz"
] | jh.programmer@scm.bz |
f8346c4580ceeac53566875cd0d61c1958b8a7b8 | 720621eb58129414eb7b4717d4b2d40b04ec1f01 | /reto3.py | e26dba7a936060dd69954adf7cc17a8cda20eafe | [] | no_license | Jorgemacias91/ejercicios-python | 4a5faa826864356513b85bcfa5156a2d43c7f38e | cb7299abb7486309bd3581aae486b930b2253e3b | refs/heads/master | 2023-06-29T02:06:11.732480 | 2021-07-12T16:26:09 | 2021-07-12T16:26:09 | 385,308,244 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 413 | py |
n = int(input())
listas = []
for i in range(n):
valor = input().split()
listas.append(valor)
bandera = False
for i in range(len(listas)):
if(
int(listas[i][0]) >= 3
and int(listas[i][1]) >= 4
and int(listas[i][2]) <= 24
and int(listas[i][3]) <= 10
):
bandera = True
print(int(listas[i][4]))
if(bandera == False):
print('NO DISPONIBLE')
| [
"jmaciasvesga@gmail.com"
] | jmaciasvesga@gmail.com |
c92eb73452c18a2297bb716fa73b4aeb74d7822b | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4182/codes/1644_2703.py | 494d16f9af8086445803a09cef9f1f6afbb7ded8 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | idade = int(input())
if (idade >= 18):
mensagem = "eleitor"
else:
mensagem = "nao_eleitor"
print(mensagem) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
4b18745174f3d59dffd1c39b71f47792fc415a53 | 897f53f73dde4f94a4292fa826a1e65494fe082b | /Zprime_UFO/parameters.py | f7f9720f638576a45a17216edc5a41650d2f8ca1 | [] | no_license | TemplateJetSubstructure/EventGeneration | b35dc875c944d3ef067d418748830d1a524ec3ef | 5f79e735942c606a8cedcff6b2dbd4e637259ab3 | refs/heads/master | 2021-07-05T08:48:57.981621 | 2017-09-17T05:46:56 | 2017-09-17T05:46:56 | 103,061,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,019 | py | # This file was automatically created by FeynRules $Revision: 595 $
# Mathematica version: 7.0 for Mac OS X x86 (64-bit) (February 19, 2009)
# Date: Fri 27 Jul 2012 19:56:48
from object_library import all_parameters, Parameter
from function_library import complexconjugate, re, im, csc, sec, acsc, asec
# This is a default parameter object representing 0.
ZERO = Parameter(name = 'ZERO',
nature = 'internal',
type = 'real',
value = '0.0',
texname = '0')
# User-defined parameters.
cabi = Parameter(name = 'cabi',
nature = 'external',
type = 'real',
value = 0.227736,
texname = '\\theta _c',
lhablock = 'CKMBLOCK',
lhacode = [ 1 ])
aEWM1 = Parameter(name = 'aEWM1',
nature = 'external',
type = 'real',
value = 127.9,
texname = '\\text{aEWM1}',
lhablock = 'SMINPUTS',
lhacode = [ 1 ])
Gf = Parameter(name = 'Gf',
nature = 'external',
type = 'real',
value = 0.0000116637,
texname = 'G_f',
lhablock = 'SMINPUTS',
lhacode = [ 2 ])
aS = Parameter(name = 'aS',
nature = 'external',
type = 'real',
value = 0.1184,
texname = '\\text{aS}',
lhablock = 'SMINPUTS',
lhacode = [ 3 ])
ymdo = Parameter(name = 'ymdo',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{ymdo}',
lhablock = 'YUKAWA',
lhacode = [ 1 ])
ymup = Parameter(name = 'ymup',
nature = 'external',
type = 'real',
value = 0.0025499999999999997,
texname = '\\text{ymup}',
lhablock = 'YUKAWA',
lhacode = [ 2 ])
yms = Parameter(name = 'yms',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{yms}',
lhablock = 'YUKAWA',
lhacode = [ 3 ])
ymc = Parameter(name = 'ymc',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{ymc}',
lhablock = 'YUKAWA',
lhacode = [ 4 ])
ymb = Parameter(name = 'ymb',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{ymb}',
lhablock = 'YUKAWA',
lhacode = [ 5 ])
ymt = Parameter(name = 'ymt',
nature = 'external',
type = 'real',
value = 172.,
texname = '\\text{ymt}',
lhablock = 'YUKAWA',
lhacode = [ 6 ])
yme = Parameter(name = 'yme',
nature = 'external',
type = 'real',
value = 0.0005110000000000001,
texname = '\\text{yme}',
lhablock = 'YUKAWA',
lhacode = [ 11 ])
ymm = Parameter(name = 'ymm',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{ymm}',
lhablock = 'YUKAWA',
lhacode = [ 13 ])
ymtau = Parameter(name = 'ymtau',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{ymtau}',
lhablock = 'YUKAWA',
lhacode = [ 15 ])
gz = Parameter(name = 'gz',
nature = 'external',
type = 'real',
value = 0.2,
texname = '\\text{gz}',
lhablock = 'FRBlock',
lhacode = [ 1 ])
Me = Parameter(name = 'Me',
nature = 'external',
type = 'real',
value = 0.0005110000000000001,
texname = '\\text{Me}',
lhablock = 'MASS',
lhacode = [ 11 ])
MM = Parameter(name = 'MM',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{MM}',
lhablock = 'MASS',
lhacode = [ 13 ])
MTA = Parameter(name = 'MTA',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{MTA}',
lhablock = 'MASS',
lhacode = [ 15 ])
MU = Parameter(name = 'MU',
nature = 'external',
type = 'real',
value = 0.0025499999999999997,
texname = 'M',
lhablock = 'MASS',
lhacode = [ 2 ])
MC = Parameter(name = 'MC',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{MC}',
lhablock = 'MASS',
lhacode = [ 4 ])
MT = Parameter(name = 'MT',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{MT}',
lhablock = 'MASS',
lhacode = [ 6 ])
MD = Parameter(name = 'MD',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{MD}',
lhablock = 'MASS',
lhacode = [ 1 ])
MS = Parameter(name = 'MS',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{MS}',
lhablock = 'MASS',
lhacode = [ 3 ])
MB = Parameter(name = 'MB',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{MB}',
lhablock = 'MASS',
lhacode = [ 5 ])
MZ = Parameter(name = 'MZ',
nature = 'external',
type = 'real',
value = 91.1876,
texname = '\\text{MZ}',
lhablock = 'MASS',
lhacode = [ 23 ])
MZph = Parameter(name = 'MZph',
nature = 'external',
type = 'real',
value = 600,
texname = '\\text{MZph}',
lhablock = 'MASS',
lhacode = [ 10030 ])
MZpl = Parameter(name = 'MZpl',
nature = 'external',
type = 'real',
value = 300,
texname = '\\text{MZpl}',
lhablock = 'MASS',
lhacode = [ 10031 ])
MH = Parameter(name = 'MH',
nature = 'external',
type = 'real',
value = 120,
texname = '\\text{MH}',
lhablock = 'MASS',
lhacode = [ 25 ])
WT = Parameter(name = 'WT',
nature = 'external',
type = 'real',
value = 1.50833649,
texname = '\\text{WT}',
lhablock = 'DECAY',
lhacode = [ 6 ])
WZ = Parameter(name = 'WZ',
nature = 'external',
type = 'real',
value = 2.4952,
texname = '\\text{WZ}',
lhablock = 'DECAY',
lhacode = [ 23 ])
WW = Parameter(name = 'WW',
nature = 'external',
type = 'real',
value = 2.085,
texname = '\\text{WW}',
lhablock = 'DECAY',
lhacode = [ 24 ])
WH = Parameter(name = 'WH',
nature = 'external',
type = 'real',
value = 0.00575308848,
texname = '\\text{WH}',
lhablock = 'DECAY',
lhacode = [ 25 ])
aEW = Parameter(name = 'aEW',
nature = 'internal',
type = 'real',
value = '1/aEWM1',
texname = '\\text{aEW}')
G = Parameter(name = 'G',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)',
texname = 'G')
CKM11 = Parameter(name = 'CKM11',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM11}')
CKM12 = Parameter(name = 'CKM12',
nature = 'internal',
type = 'complex',
value = 'cmath.sin(cabi)',
texname = '\\text{CKM12}')
CKM13 = Parameter(name = 'CKM13',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM13}')
CKM21 = Parameter(name = 'CKM21',
nature = 'internal',
type = 'complex',
value = '-cmath.sin(cabi)',
texname = '\\text{CKM21}')
CKM22 = Parameter(name = 'CKM22',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM22}')
CKM23 = Parameter(name = 'CKM23',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM23}')
CKM31 = Parameter(name = 'CKM31',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM31}')
CKM32 = Parameter(name = 'CKM32',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM32}')
CKM33 = Parameter(name = 'CKM33',
nature = 'internal',
type = 'complex',
value = '1',
texname = '\\text{CKM33}')
WZph = Parameter(name = 'WZph',
nature = 'internal',
type = 'real',
value = '(gz**2*MZph*(5 + cmath.sqrt(1 - (4*MT**2)/MZph**2)))/(144.*cmath.pi)',
texname = '\\text{WZph}')
WZpl = Parameter(name = 'WZpl',
nature = 'internal',
type = 'real',
value = '(5*gz**2*MZpl)/(144.*cmath.pi)',
texname = '\\text{WZpl}')
MW = Parameter(name = 'MW',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))',
texname = 'M_W')
ee = Parameter(name = 'ee',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)',
texname = 'e')
sw2 = Parameter(name = 'sw2',
nature = 'internal',
type = 'real',
value = '1 - MW**2/MZ**2',
texname = '\\text{sw2}')
cw = Parameter(name = 'cw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - sw2)',
texname = 'c_w')
sw = Parameter(name = 'sw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(sw2)',
texname = 's_w')
g1 = Parameter(name = 'g1',
nature = 'internal',
type = 'real',
value = 'ee/cw',
texname = 'g_1')
gw = Parameter(name = 'gw',
nature = 'internal',
type = 'real',
value = 'ee/sw',
texname = 'g_w')
v = Parameter(name = 'v',
nature = 'internal',
type = 'real',
value = '(2*MW*sw)/ee',
texname = 'v')
lam = Parameter(name = 'lam',
nature = 'internal',
type = 'real',
value = 'MH**2/(2.*v**2)',
texname = '\\text{lam}')
yb = Parameter(name = 'yb',
nature = 'internal',
type = 'real',
value = '(ymb*cmath.sqrt(2))/v',
texname = '\\text{yb}')
yc = Parameter(name = 'yc',
nature = 'internal',
type = 'real',
value = '(ymc*cmath.sqrt(2))/v',
texname = '\\text{yc}')
ydo = Parameter(name = 'ydo',
nature = 'internal',
type = 'real',
value = '(ymdo*cmath.sqrt(2))/v',
texname = '\\text{ydo}')
ye = Parameter(name = 'ye',
nature = 'internal',
type = 'real',
value = '(yme*cmath.sqrt(2))/v',
texname = '\\text{ye}')
ym = Parameter(name = 'ym',
nature = 'internal',
type = 'real',
value = '(ymm*cmath.sqrt(2))/v',
texname = '\\text{ym}')
ys = Parameter(name = 'ys',
nature = 'internal',
type = 'real',
value = '(yms*cmath.sqrt(2))/v',
texname = '\\text{ys}')
yt = Parameter(name = 'yt',
nature = 'internal',
type = 'real',
value = '(ymt*cmath.sqrt(2))/v',
texname = '\\text{yt}')
ytau = Parameter(name = 'ytau',
nature = 'internal',
type = 'real',
value = '(ymtau*cmath.sqrt(2))/v',
texname = '\\text{ytau}')
yup = Parameter(name = 'yup',
nature = 'internal',
type = 'real',
value = '(ymup*cmath.sqrt(2))/v',
texname = '\\text{yup}')
muH = Parameter(name = 'muH',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(lam*v**2)',
texname = '\\mu ')
| [
"bnachman@cern.ch"
] | bnachman@cern.ch |
de789c3f754d85ff308906be6e264806b95ff074 | 6a25f68bd4104fb564f21bde048015aafa489bf2 | /env/bin/easy_install-3.6 | d47aeac991e2678415e06787df91510f7510876e | [] | no_license | HustleCorp/Django-Microservice | c5aa4053f1001286d395a49be03e2569103712d3 | 7e54843190c72c9f66c157b8e62b85d19f83735c | refs/heads/master | 2023-01-07T18:56:43.608124 | 2019-12-09T17:29:09 | 2019-12-09T17:29:09 | 226,013,742 | 1 | 0 | null | 2023-01-04T13:31:19 | 2019-12-05T04:09:49 | JavaScript | UTF-8 | Python | false | false | 271 | 6 | #!/home/shegerking/Desktop/django-docker/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"bktotient@gmail.com"
] | bktotient@gmail.com |
409166fb8dd65259d5804eba3ea3d79e673f93bc | fe0e7985fb0c7d2b57dbf8e3fbe83b27b0a0cdeb | /ServiceApplication_SourceFiles/Mod_FlexmapImage.py | 08a14158f7b5d73796552db83814b39ac5ecbb9d | [
"MIT"
] | permissive | lynch829/FluoMV | 04e7516c3aa3d6ac2d485ce2d7c26abd0f1108ff | 6fc226ee0ca1427495f0ab483c63a2ca5195954e | refs/heads/master | 2022-11-23T06:04:09.812686 | 2020-08-03T15:05:22 | 2020-08-03T15:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,698 | py | # -*- coding: utf-8 -*-
import numpy as np
import sys
import os
#===============================================================================================================
class FlexmapImage:
def __init__(self, thisSettingsObj):
self.Status = True
self.SettingsObj = thisSettingsObj
#--------------------------------------------------------------------------------------------------------------------------------------------
def Execute(self,thisImArray):
self.ImArrayFiltered = thisImArray
#Zone de l'image ou doit se trouver le ballbearing
self.ImArrayBallBearing = self.ImArrayFiltered[462:562,462:562]#[y][x]
self.Normalisation = 0 #Intensite du 100% du champ de radiation.
self.LowThreshold = 0 #Intensite de l'image self.ImArrayBallBearing. Toutes les pixels avec des niveaux de gris inférieurs à self.LowThreshold appartienne au Ballbearing.
self.BBXPosi = 0
self.BBYPosi = 0
#Appel des fonctions
self.GetThresholds() #Fonction pour déterminer self.Normalisation, self.LowThreshold
self.GetBallBearing()
return np.around(self.BBXPosi,1), np.around(self.BBYPosi,1)
#------------------------------------------------------------------------------------------
def GetThresholds(self):
PixList = list()
for i in range(0,self.ImArrayBallBearing.shape[0]):
for j in range(0,self.ImArrayBallBearing.shape[1]):
PixList.append(self.ImArrayBallBearing[i][j])
n,bin = np.histogram(PixList,10)
self.LowThreshold = bin[2] #Approximation for the gray level of the BB
self.Normalisation = (bin[-2] + bin[-1])*0.5 # Approximation for the gray level at the center of the field
#------------------------------------------------------------------------------------------------------------------------------------
def GetBallBearing(self):
Xlist = list()
Ylist = list()
for y in range(462,562):
for x in range(462,562):
if(self.ImArrayFiltered[y][x] <= self.LowThreshold):
Ylist.append(y)
Xlist.append(x)
self.BBXPosiMedian = np.median(Xlist)
self.BBYPosiMedian = np.median(Ylist)
self.BBXPosiMoyen = np.mean(Xlist)
self.BBYPosiMoyen = np.mean(Ylist)
if abs(self.BBXPosiMedian - self.BBXPosiMoyen)<=1:
self.BBXPosi = self.BBXPosiMoyen
else:
self.BBXPosi = self.BBXPosiMedian
if abs(self.BBYPosiMedian - self.BBYPosiMoyen)<=1:
self.BBYPosi = self.BBYPosiMoyen
else:
self.BBYPosi = self.BBYPosiMedian
#print 'Ball bearing position X (pixels):',self.BBXPosi
#print 'Ball bearing position Y (pixels):',self.BBYPosi
#==============================================================================================================================================
| [
"mathieu.guillot@usherbrooke.ca"
] | mathieu.guillot@usherbrooke.ca |
ee4637d7c5f80fb4a059cda9c706b0e18d4033a1 | e9b04482febf819214a81f9da051d87ece89d19f | /PythonExercicios/ex010.py | a0c9ba185f95f4e933a817d9bc4c3d1479c7dfe1 | [
"MIT"
] | permissive | ViniciusQueiros16/Python | 6a20ba9874aadec7dc90eea7cad3fcdb4a685995 | c03fd08d4c204104bf0196b0bd129427fd2067ae | refs/heads/main | 2022-12-31T07:56:59.287860 | 2020-10-11T21:40:24 | 2020-10-11T21:40:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import emoji
print('\033[33m=\033[m'*12,'\033[34mConversor de Moedas\033[m','\033[33m=\033[m'*12)
n = float(input('\033[1;31;43mQuanto dinheiro vc tem na carteira? R$\033[m'))
print('\033[32mCom R$\033[35m{}\033[m \033[32mvocê pode comprar US$\033[35m{:.2f}\033[m \033[32mou £\033[35m{:.2f}\033[35m'.format(n,(n/5.31), (n/5.77))) | [
"70244206+lordvinick@users.noreply.github.com"
] | 70244206+lordvinick@users.noreply.github.com |
71805448f7085ea893f12fd7723c5398abb76fa6 | e2ae5b6b7143987a3b8b64a4d502e89d50f807c9 | /SeparandoDigitosNum.py | 719399519490756b0af80769b724436ae570a11c | [] | no_license | welleson-lukas/python-projects | 7cc95f28a2268e7f090c073d8dd68600231bf943 | d8de21a52eb1a89e8312afc650fa3ec341bd31a2 | refs/heads/master | 2020-12-20T11:00:00.075766 | 2020-03-16T20:29:06 | 2020-03-16T20:29:06 | 236,051,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | #separando digitos de numeros ate 9999
#unidades, dezenas, centenas, milhares
num = int(input('Digite o numero: '))
u = num // 1 % 10
d = num // 10 % 10
c = num // 100 % 10
m = num // 1000 % 10
print('Analisando o número {}'.format(num))
print('Milhares {}'.format(m))
print('Centenas {}'.format(c))
print('Dezenas {}'.format(d))
print('Unidades {}'.format(u))
| [
"noreply@github.com"
] | welleson-lukas.noreply@github.com |
59aeeb5cfbbd52e95cf09691c8180bb4633af9c4 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /606/606.construct-string-from-binary-tree.234546044.Wrong-Answer.leetcode.py | 30bee179f09e3deb04c3c0ab49b7e971a008aac3 | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | class Solution:
def tree2str(self, t):
if not t:
return ''
left = '({})'.format(self.tree2str(t.left)) if (t.left or t.right) else ''
right = '({})'.format(self.tree2str(t.right)) if t.right else ''
return '{}{}{}'.format(t.val, left, right)
def tree2str(self, t):
if not t:
return ""
subleft = "(%s)" % (self.tree2str(t.left) if t.left or t.right else "")
subright = "(%s)" % (self.tree2str(t.right) if t.right else "")
return ("%s%s%s" % (str(t.val), subleft, subright)).replace("()", "")
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
f4602cd7303062ababc188c3b723277d0c1806dc | 5cd8654fd6db6789d3be0db676ab2aa73b17417d | /lab5_Conditional_GANs/parser.py | 58fe7ceec8bd1601b1430c875813137188fd3059 | [] | no_license | bhbruce/DLP_Lab | be23936d2025cf155b3a71f86303dc11853d1c29 | dd28f9cd063044757ae97819099f68f44919de44 | refs/heads/master | 2022-12-30T16:41:30.991055 | 2020-09-17T11:30:15 | 2020-09-17T11:30:15 | 296,305,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,717 | py | from argparse import ArgumentParser
def argparser():
parser = ArgumentParser(description='PyTorch Conditional GAN')
parser.add_argument("--n_epochs", type=int, default=800, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=2e-4, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--n_classes", type=int, default=24, help="number of classes for dataset")
parser.add_argument("--img_size", type=int, default=64, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=400, help="interval between image sampling")
parser.add_argument("--seed", type=int, default=666, help="specific seed")
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
return parser.parse_args()
if __name__ == '__main__':
args = argparser()
print(args)
| [
"bhbruce.cs07g@nctu.edu.tw"
] | bhbruce.cs07g@nctu.edu.tw |
1870dfbf850c058f2747b95d3dfcfb2717daf4ef | 2cd90c3056b5fb2031136e3d13d8aee3b52fda54 | /exevada/apps/exevada/migrations/0001_initial.py | b807af914f3e22bb74cd39d692b6d3dd754b7d67 | [
"Apache-2.0"
] | permissive | C3S-attribution-service/exevada | b706e9ac7aacfedbf78648ae437edc60be8ffb37 | db121cb61fdd82e0d767955d7df8b25cb041f05d | refs/heads/master | 2023-04-23T12:23:21.194046 | 2021-05-19T14:43:02 | 2021-05-19T14:43:02 | 269,430,055 | 2 | 0 | Apache-2.0 | 2021-05-19T14:43:02 | 2020-06-04T18:04:03 | HTML | UTF-8 | Python | false | false | 18,827 | py | # Generated by Django 3.1.1 on 2020-12-17 22:30
import django.contrib.gis.db.models.fields
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Attribution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('PR', models.FloatField(default=0.0, help_text='Probability ratio', validators=[django.core.validators.MinValueValidator(0.0)])),
('PR_min', models.FloatField(blank=True, help_text='Probability ratio lower bound', null=True)),
('PR_max', models.FloatField(blank=True, help_text='Probability ratio upper bound', null=True)),
('Delta_I', models.FloatField(default=0.0, help_text='Intensity change', validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('Delta_I_min', models.FloatField(blank=True, help_text='Intensity change lower bound', null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('Delta_I_max', models.FloatField(blank=True, help_text='Intensity change upper bound', null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('comments', models.TextField(blank=True, help_text='Remarks')),
('attribution_request', models.TextField(blank=True, help_text='Request for attribution')),
('description', models.CharField(help_text='Short descriptive string', max_length=256, unique=True)),
('return_period', models.PositiveIntegerField(help_text='Rounded return period (yr)', null=True, validators=[django.core.validators.MinValueValidator(0.0)])),
('conclusions', models.TextField(blank=True, help_text='Synthesis conclusions')),
('contact', models.CharField(blank=True, help_text='Contact email adress', max_length=1024)),
('webpage', models.URLField(blank=True, default='https://attribution.climate.copernicus.eu', help_text='Relevant web page', max_length=512)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AttributionVariable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('short_name', models.CharField(help_text='Abbreviated variable name', max_length=16)),
('long_name', models.CharField(blank=True, help_text='Full variable name', max_length=128)),
('description', models.TextField(blank=True, help_text='Description')),
('unit', models.CharField(blank=True, help_text='Unit of the variable', max_length=64)),
('unit_symbol', models.CharField(blank=True, help_text='Unit symbol of the variable', max_length=16)),
('delta_I_unit_symbol', models.CharField(blank=True, help_text='Unit symbol of intensity change', max_length=16)),
],
),
migrations.CreateModel(
name='DistributionType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Distribution name', max_length=128, unique=True)),
('abbreviation', models.CharField(blank=True, help_text='Abbreviation', max_length=32)),
('has_shape_parameter', models.BooleanField(help_text='Does the distribution contain tuneable shape parameter')),
],
),
migrations.CreateModel(
name='EventType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Event type', max_length=32, unique=True)),
('description', models.TextField(blank=True)),
('icon', models.ImageField(blank=True, upload_to='img/')),
],
),
migrations.CreateModel(
name='JournalPaper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='Publication title', max_length=1024)),
('doi', models.CharField(blank=True, help_text='DOI (no URL) of related publication', max_length=256)),
('authors', models.CharField(help_text='Author list', max_length=1024)),
('date', models.DateField(help_text='Publication date')),
('url', models.URLField(blank=True, max_length=512)),
('journal', models.CharField(help_text='Journal', max_length=256)),
('issue', models.IntegerField(blank=True, help_text='Issue', null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Location name', max_length=256)),
('area', django.contrib.gis.db.models.fields.GeometryField(help_text='Geographic location or region', srid=4326)),
('description', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='PressCommunication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='Publication title', max_length=1024)),
('doi', models.CharField(blank=True, help_text='DOI (no URL) of related publication', max_length=256)),
('authors', models.CharField(help_text='Author list', max_length=1024)),
('date', models.DateField(help_text='Publication date')),
('url', models.URLField(blank=True, max_length=512)),
('medium', models.CharField(help_text='Medium', max_length=256)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='StatisticalMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name of the method', max_length=32, unique=True)),
('description', models.TextField(blank=True, help_text='Short description of the method')),
('covariate', models.CharField(help_text='Proxy for anthropogenic forcing', max_length=32)),
('dispersion_fit', models.BooleanField(help_text='Does the method fit the dispersion (sigma/mu) or scale (sigma) parameter?')),
],
),
migrations.CreateModel(
name='ObservationDataSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Observation dataset', max_length=256)),
('url', models.URLField(blank=True, max_length=512)),
('description', models.TextField(blank=True, help_text='Dataset description')),
('doi', models.CharField(blank=True, help_text='DOI of dataset', max_length=256)),
('papers', models.ManyToManyField(blank=True, help_text='Reviewed papers describing the attribution', to='exevada.JournalPaper')),
],
),
migrations.CreateModel(
name='ObservationAnalysis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('PR', models.FloatField(default=0.0, help_text='Probability ratio', validators=[django.core.validators.MinValueValidator(0.0)])),
('PR_min', models.FloatField(blank=True, help_text='Probability ratio lower bound', null=True)),
('PR_max', models.FloatField(blank=True, help_text='Probability ratio upper bound', null=True)),
('Delta_I', models.FloatField(default=0.0, help_text='Intensity change', validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('Delta_I_min', models.FloatField(blank=True, help_text='Intensity change lower bound', null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('Delta_I_max', models.FloatField(blank=True, help_text='Intensity change upper bound', null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('comments', models.TextField(blank=True, help_text='Remarks')),
('sigma', models.FloatField(default=0.0, help_text='Fitted scale/dispersion parameter')),
('sigma_min', models.FloatField(blank=True, help_text='Scale/dispersion parameter lower bound', null=True)),
('sigma_max', models.FloatField(blank=True, help_text='Scale/dispersion parameter upper bound', null=True)),
('xi', models.FloatField(blank=True, help_text='Fitted shape parameter', null=True)),
('xi_min', models.FloatField(blank=True, help_text='Shape parameter lower bound', null=True)),
('xi_max', models.FloatField(blank=True, help_text='Shape parameter upper bound', null=True)),
('y_past', models.PositiveIntegerField(blank=True, help_text='Starting year of the analysis', null=True)),
('y_pres', models.PositiveIntegerField(blank=True, help_text='Ending year of the analysis', null=True)),
('trend', models.FloatField(blank=True, help_text='Calculated trend', null=True)),
('variable_value', models.FloatField(default=0.0, help_text='Variable value for this observation dataset')),
('T_return', models.PositiveIntegerField(help_text='Return period (yr)', validators=[django.core.validators.MinValueValidator(0.0)])),
('T_return_min', models.PositiveIntegerField(blank=True, help_text='Return period lower bound (yr)', null=True, validators=[django.core.validators.MinValueValidator(0.0)])),
('T_return_max', models.PositiveIntegerField(blank=True, help_text='Return period upper bound (yr)', null=True, validators=[django.core.validators.MinValueValidator(0.0)])),
('attribution', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='observations', to='exevada.attribution')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exevada.observationdataset')),
],
options={
'verbose_name_plural': 'observation analyses',
},
),
migrations.CreateModel(
name='ModelDataSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model_name', models.CharField(help_text='Model output dataset', max_length=128)),
('model_description', models.TextField(blank=True, help_text='Model description')),
('experiment', models.CharField(help_text='Experiment', max_length=512)),
('experiment_description', models.TextField(blank=True, help_text='Experiment description')),
('url', models.URLField(blank=True, max_length=512)),
('doi', models.CharField(blank=True, help_text='DOI of dataset', max_length=256)),
('papers', models.ManyToManyField(blank=True, help_text='Reviewed papers describing the attribution', to='exevada.JournalPaper')),
],
),
migrations.CreateModel(
name='ModelAnalysis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('PR', models.FloatField(default=0.0, help_text='Probability ratio', validators=[django.core.validators.MinValueValidator(0.0)])),
('PR_min', models.FloatField(blank=True, help_text='Probability ratio lower bound', null=True)),
('PR_max', models.FloatField(blank=True, help_text='Probability ratio upper bound', null=True)),
('Delta_I', models.FloatField(default=0.0, help_text='Intensity change', validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('Delta_I_min', models.FloatField(blank=True, help_text='Intensity change lower bound', null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('Delta_I_max', models.FloatField(blank=True, help_text='Intensity change upper bound', null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('comments', models.TextField(blank=True, help_text='Remarks')),
('sigma', models.FloatField(default=0.0, help_text='Fitted scale/dispersion parameter')),
('sigma_min', models.FloatField(blank=True, help_text='Scale/dispersion parameter lower bound', null=True)),
('sigma_max', models.FloatField(blank=True, help_text='Scale/dispersion parameter upper bound', null=True)),
('xi', models.FloatField(blank=True, help_text='Fitted shape parameter', null=True)),
('xi_min', models.FloatField(blank=True, help_text='Shape parameter lower bound', null=True)),
('xi_max', models.FloatField(blank=True, help_text='Shape parameter upper bound', null=True)),
('y_past', models.PositiveIntegerField(blank=True, help_text='Starting year of the analysis', null=True)),
('y_pres', models.PositiveIntegerField(blank=True, help_text='Ending year of the analysis', null=True)),
('trend', models.FloatField(blank=True, help_text='Calculated trend', null=True)),
('seasonal_cycle', models.CharField(choices=[('good', 'Good'), ('bad', 'Bad'), ('reasonable', 'Reasonable')], max_length=32)),
('spatial_pattern', models.CharField(choices=[('good', 'Good'), ('bad', 'Bad'), ('reasonable', 'Reasonable')], max_length=32)),
('attribution', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='models', to='exevada.attribution')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exevada.modeldataset')),
],
options={
'verbose_name_plural': 'model analyses',
},
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Short, descriptive name or title for this event', max_length=128)),
('region', models.CharField(help_text='Geographic region where the event was observed', max_length=256)),
('start_date', models.DateField(help_text='Event starting date')),
('duration', models.PositiveIntegerField(help_text='Duration of the event (nr of days)')),
('season', models.CharField(choices=[('DJJ', 'Dec-Feb'), ('MAM', 'Mar-May'), ('JJA', 'Jun-Aug'), ('SON', 'Sep-Nov')], default='DJJ', help_text='Season', max_length=8)),
('deaths', models.PositiveIntegerField(blank=True, help_text='Number of deaths', null=True)),
('people_affected', models.PositiveIntegerField(blank=True, help_text='Number of people affected', null=True)),
('economical_loss', models.DecimalField(blank=True, decimal_places=2, help_text='Estimated economic loss in Meuro', max_digits=12, null=True)),
('comments', models.TextField(blank=True, help_text='Remarks')),
('image', models.ImageField(blank=True, upload_to='img/')),
('image_caption', models.TextField(blank=True, help_text='Image caption')),
('map_location', django.contrib.gis.db.models.fields.PointField(help_text='Geographic location of event (for map display)', null=True, srid=4326)),
('event_type', models.ForeignKey(help_text='Type of event', on_delete=django.db.models.deletion.CASCADE, to='exevada.eventtype')),
],
),
migrations.AddField(
model_name='attribution',
name='distribution',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exevada.distributiontype'),
),
migrations.AddField(
model_name='attribution',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attributions', to='exevada.event'),
),
migrations.AddField(
model_name='attribution',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exevada.location'),
),
migrations.AddField(
model_name='attribution',
name='papers',
field=models.ManyToManyField(blank=True, help_text='Reviewed papers describing the attribution', to='exevada.JournalPaper'),
),
migrations.AddField(
model_name='attribution',
name='press_communication',
field=models.ManyToManyField(blank=True, help_text='Press communication related to the attribution', to='exevada.PressCommunication'),
),
migrations.AddField(
model_name='attribution',
name='statistical_method',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exevada.statisticalmethod'),
),
migrations.AddField(
model_name='attribution',
name='variable',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exevada.attributionvariable'),
),
]
| [
"g.vandenOord@gmail.com"
] | g.vandenOord@gmail.com |
a9e42f89d227ed48a392eb63989a8e4bac736bea | fc5eb35cb7ad53730cd385be2668a375b12fc95f | /feincms_template_content/admin.py | 6bdd6c0b7d5b59cd4f9b9aaad33250f519ede99e | [
"MIT"
] | permissive | RichardOfWard/feincms-template-content | c58aca52ece1e86e54c6b58244f29198e4f8e7a0 | b8dacb29b9e16e3a9c80093c2c2db3011f6bacb5 | refs/heads/master | 2021-01-23T16:35:25.600828 | 2014-05-06T14:48:32 | 2014-05-06T14:52:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from feincms.admin.item_editor import FeinCMSInline
class BaseTemplateContentAdmin(FeinCMSInline):
def __init__(self, *args, **kwargs):
self.exclude = list(self.exclude or [])
if len(self.model.template_choices) <= 1:
self.exclude.append('template')
super(BaseTemplateContentAdmin,
self).__init__(*args, **kwargs)
| [
"richard@richard.ward.name"
] | richard@richard.ward.name |
05048d5f830df5ed0b5e43b5e6473d8c7b7d7246 | 0ff87e0a84dd8b9a198cebb59a5130fa7765b9dd | /tests/test_backtest.py | 606de235aebb5127e7941c9e643a0564ca164f4f | [
"Apache-2.0"
] | permissive | dxcv/moonshot | 470caf28cdb3bc5cd5864596e69875bf1810d05d | ca05aa347b061db05c0da221e80b125a5e9ddea1 | refs/heads/master | 2020-05-31T04:40:43.638058 | 2019-03-28T17:07:04 | 2019-03-28T17:07:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76,137 | py | # Copyright 2018 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run: python3 -m unittest discover -s tests/ -p test_*.py -t . -v
import os
import unittest
from unittest.mock import patch
import glob
import pandas as pd
from moonshot import Moonshot
from moonshot.cache import TMP_DIR
class BacktestTestCase(unittest.TestCase):
def tearDown(self):
"""
Remove cached files.
"""
for file in glob.glob("{0}/moonshot*.pkl".format(TMP_DIR)):
os.remove(file)
def test_complain_if_prices_to_signals_not_implemented(self):
"""
Tests error handling when prices_to_signals hasn't been implemented.
"""
def mock_get_historical_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03"])
fields = ["Close","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
12345: [
# Close
9,
11,
10.50,
# Volume
5000,
16000,
8800
],
23456: [
# Close
12,
11,
8.50,
# Volume
15000,
14000,
28800
],
},
index=idx
)
return prices
def mock_get_db_config(db):
return {
'vendor': 'ib',
'domain': 'main',
'bar_size': '1 day'
}
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
12345: [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
23456: [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "ConId"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_historical_prices", new=mock_get_historical_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with patch("moonshot.strategies.base.get_db_config", new=mock_get_db_config):
with self.assertRaises(NotImplementedError) as cm:
Moonshot().backtest()
self.assertIn("strategies must implement prices_to_signals", repr(cm.exception))
def test_basic_long_only_strategy(self):
"""
Tests that the resulting DataFrames are correct after running a basic
long-only strategy that largely relies on the default methods.
"""
class BuyBelow10(Moonshot):
"""
A basic test strategy that buys below 10.
"""
def prices_to_signals(self, prices):
signals = prices.loc["Close"] < 10
return signals.astype(int)
def mock_get_historical_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
12345: [
# Close
9,
11,
10.50,
9.99,
# Volume
5000,
16000,
8800,
9900
],
23456: [
# Close
9.89,
11,
8.50,
10.50,
# Volume
15000,
14000,
28800,
17000
],
},
index=idx
)
return prices
def mock_get_db_config(db):
return {
'vendor': 'ib',
'domain': 'main',
'bar_size': '1 day'
}
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
12345: [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
23456: [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "ConId"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_historical_prices", new=mock_get_historical_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with patch("moonshot.strategies.base.get_db_config", new=mock_get_db_config):
results = BuyBelow10().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [1.0,
0.0,
0.0,
1.0],
23456: [1.0,
0.0,
1.0,
0.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.5,
0.0,
0.0,
1.0],
23456: [0.5,
0.0,
1.0,
0.0]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.5,
0.0,
0.0,
1.0],
23456: [0.5,
0.0,
1.0,
0.0]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: ["nan",
0.5,
0.0,
0.0],
23456: ["nan",
0.5,
0.0,
1.0]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: ["nan",
0.5,
0.0,
0.0],
23456: ["nan",
0.5,
0.0,
1.0]}
)
total_holdings = results.loc["TotalHoldings"].reset_index()
total_holdings.loc[:, "Date"] = total_holdings.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
total_holdings.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0,
1.0,
0,
0],
23456: [0,
1.0,
0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: ["nan",
0.5,
0.5,
0.0],
23456: ["nan",
0.5,
0.5,
1.0]}
)
commissions = results.loc["Commission"].reset_index()
commissions.loc[:, "Date"] = commissions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
commissions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.0,
0.0,
0.0,
0.0],
23456: [0.0,
0.0,
0.0,
0.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.0,
0.0,
0.0,
0.0],
23456: [0.0,
0.0,
0.0,
0.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.0,
0.0,
-0.0227273, # (10.50 - 11)/11 * 0.5
-0.0],
23456: [0.0,
0.0,
-0.1136364, # (8.50 - 11)/11 * 0.5
0.0]}
)
def test_basic_long_short_strategy(self):
"""
Tests that the resulting DataFrames are correct after running a basic
long-short strategy that largely relies on the default methods.
"""
class BuyBelow10ShortAbove10(Moonshot):
"""
A basic test strategy that buys below 10 and shorts above 10.
"""
def prices_to_signals(self, prices):
long_signals = prices.loc["Close"] <= 10
short_signals = prices.loc["Close"] > 10
signals = long_signals.astype(int).where(long_signals, -short_signals.astype(int))
return signals
def mock_get_historical_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
12345: [
# Close
9,
11,
10.50,
9.99,
# Volume
5000,
16000,
8800,
9900
],
23456: [
# Close
9.89,
11,
8.50,
10.50,
# Volume
15000,
14000,
28800,
17000
],
},
index=idx
)
return prices
def mock_get_db_config(db):
return {
'vendor': 'ib',
'domain': 'main',
'bar_size': '1 day'
}
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
12345: [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
23456: [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "ConId"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_historical_prices", new=mock_get_historical_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with patch("moonshot.strategies.base.get_db_config", new=mock_get_db_config):
results = BuyBelow10ShortAbove10().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [1.0,
-1.0,
-1.0,
1.0],
23456: [1.0,
-1.0,
1.0,
-1.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.5,
-0.5,
-0.5,
0.5],
23456: [0.5,
-0.5,
0.5,
-0.5]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.5,
0.5,
0.5,
0.5],
23456: [0.5,
0.5,
0.5,
0.5]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: ["nan",
0.5,
-0.5,
-0.5],
23456: ["nan",
0.5,
-0.5,
0.5]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: ["nan",
0.5,
0.5,
0.5],
23456: ["nan",
0.5,
0.5,
0.5]}
)
total_holdings = results.loc["TotalHoldings"].reset_index()
total_holdings.loc[:, "Date"] = total_holdings.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
total_holdings.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0,
1.0,
1.0,
1.0],
23456: [0,
1.0,
1.0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: ["nan",
0.5,
1.0,
0.0],
23456: ["nan",
0.5,
1.0,
1.0]}
)
commissions = results.loc["Commission"].reset_index()
commissions.loc[:, "Date"] = commissions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
commissions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.0,
0.0,
0.0,
0.0],
23456: [0.0,
0.0,
0.0,
0.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.0,
0.0,
0.0,
0.0],
23456: [0.0,
0.0,
0.0,
0.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.0,
0.0,
-0.0227273, # (10.50 - 11)/11 * 0.5
0.0242857], # (9.99 - 10.50)/10.50 * -0.5
23456: [0.0,
0.0,
-0.1136364, # (8.50 - 11)/11 * 0.5
-0.1176471] # (10.50 - 8.50)/8.50 * -0.5
}
)
def test_long_short_strategy_override_methods(self):
"""
Tests that the resulting DataFrames are correct after running a
long-short strategy that overrides the major backtesting methods.
"""
class BuyBelow10ShortAbove10Overnight(Moonshot):
"""
A basic test strategy that buys below 10 and shorts above 10 and holds overnight.
"""
def prices_to_signals(self, prices):
long_signals = prices.loc["Open"] <= 10
short_signals = prices.loc["Open"] > 10
signals = long_signals.astype(int).where(long_signals, -short_signals.astype(int))
return signals
def signals_to_target_weights(self, signals, prices):
weights = self.allocate_fixed_weights(signals, 0.25)
return weights
def target_weights_to_positions(self, weights, prices):
# enter on close same day
positions = weights.copy()
return positions
def positions_to_gross_returns(self, positions, prices):
# hold on close till next day open
closes = prices.loc["Close"]
opens = prices.loc["Open"]
pct_changes = (opens - closes.shift()) / closes.shift()
gross_returns = pct_changes * positions.shift()
return gross_returns
def mock_get_historical_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close","Open","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
12345: [
# Close
9.6,
10.45,
10.23,
8.67,
# Open
9,
11,
10.50,
9.99,
# Volume
5000,
16000,
8800,
9900
],
23456: [
# Close
10.56,
12.01,
10.50,
9.80,
# Open
9.89,
11,
8.50,
10.50,
# Volume
15000,
14000,
28800,
17000
],
},
index=idx
)
return prices
def mock_get_db_config(db):
return {
'vendor': 'ib',
'domain': 'main',
'bar_size': '1 day'
}
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
12345: [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
23456: [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "ConId"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_historical_prices", new=mock_get_historical_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with patch("moonshot.strategies.base.get_db_config", new=mock_get_db_config):
results = BuyBelow10ShortAbove10Overnight().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [1.0,
-1.0,
-1.0,
1.0],
23456: [1.0,
-1.0,
1.0,
-1.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.25,
-0.25,
-0.25,
0.25],
23456: [0.25,
-0.25,
0.25,
-0.25]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.25,
0.25,
0.25,
0.25],
23456: [0.25,
0.25,
0.25,
0.25]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.25,
-0.25,
-0.25,
0.25],
23456: [0.25,
-0.25,
0.25,
-0.25]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.25,
0.25,
0.25,
0.25],
23456: [0.25,
0.25,
0.25,
0.25]}
)
total_holdings = results.loc["TotalHoldings"].reset_index()
total_holdings.loc[:, "Date"] = total_holdings.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
total_holdings.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [1.0,
1.0,
1.0,
1.0],
23456: [1.0,
1.0,
1.0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: ["nan",
0.5,
0.0,
0.5],
23456: ["nan",
0.5,
0.5,
0.5]}
)
commissions = results.loc["Commission"].reset_index()
commissions.loc[:, "Date"] = commissions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
commissions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.0,
0.0,
0.0,
0.0],
23456: [0.0,
0.0,
0.0,
0.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.0,
0.0,
0.0,
0.0],
23456: [0.0,
0.0,
0.0,
0.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.0,
0.0364583, # (11 - 9.6)/9.6 * 0.25
-0.0011962, # (10.50 - 10.45)/10.45 * -0.25
0.0058651], # (9.99 - 10.23)/10.23 * 0.25
23456: [0.0,
0.0104167,# (11 - 10.56)/10.56 * 0.25
0.0730641, # (8.50 - 12.01)/12.01 * -0.25
0.0] # (10.50 - 10.50)/10.50 * 0.25
}
)
def test_short_only_once_a_day_intraday_strategy(self):
"""
Tests that the resulting DataFrames are correct after running a
short-only intraday strategy.
"""
class ShortAbove10Intraday(Moonshot):
"""
A basic test strategy that shorts above 10 and holds intraday.
"""
POSITIONS_CLOSED_DAILY = True
def prices_to_signals(self, prices):
morning_prices = prices.loc["Open"].xs("09:30:00", level="Time")
short_signals = morning_prices > 10
return -short_signals.astype(int)
def signals_to_target_weights(self, signals, prices):
weights = self.allocate_fixed_weights(signals, 0.25)
return weights
def target_weights_to_positions(self, weights, prices):
# enter on same day
positions = weights.copy()
return positions
def positions_to_gross_returns(self, positions, prices):
# hold from 10:00-16:00
closes = prices.loc["Close"]
entry_prices = closes.xs("09:30:00", level="Time")
exit_prices = closes.xs("15:30:00", level="Time")
pct_changes = (exit_prices - entry_prices) / entry_prices
gross_returns = pct_changes * positions
return gross_returns
def mock_get_historical_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03"])
fields = ["Close","Open"]
times = ["09:30:00", "15:30:00"]
idx = pd.MultiIndex.from_product(
[fields, dt_idx, times], names=["Field", "Date", "Time"])
prices = pd.DataFrame(
{
12345: [
# Close
9.6,
10.45,
10.12,
15.45,
8.67,
12.30,
# Open
9.88,
10.34,
10.23,
16.45,
8.90,
11.30,
],
23456: [
# Close
10.56,
12.01,
10.50,
9.80,
13.40,
14.50,
# Open
9.89,
11,
8.50,
10.50,
14.10,
15.60
],
},
index=idx
)
return prices
def mock_get_db_config(db):
return {
'vendor': 'ib',
'domain': 'main',
'bar_size': '1 day'
}
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
12345: [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
23456: [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "ConId"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_historical_prices", new=mock_get_historical_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with patch("moonshot.strategies.base.get_db_config", new=mock_get_db_config):
results = ShortAbove10Intraday().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00'],
12345: [0.0,
-1.0,
0.0],
23456: [0.0,
0.0,
-1.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00'],
12345: [0.0,
-0.25,
0.0],
23456: [0.0,
0.0,
-0.25]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00'],
12345: [0.0,
0.25,
0.0],
23456: [0.0,
0.0,
0.25]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00'],
12345: [0.0,
-0.25,
0.0],
23456: [0.0,
0.0,
-0.25]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00'],
12345: [0.0,
0.25,
0.0],
23456: [0.0,
0.0,
0.25]}
)
total_holdings = results.loc["TotalHoldings"].reset_index()
total_holdings.loc[:, "Date"] = total_holdings.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
total_holdings.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00'],
12345: [0.0,
1.0,
0.0],
23456: [0.0,
0.0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00'],
12345: [0.0,
0.5,
0.0],
23456: [0.0,
0.0,
0.5]}
)
commissions = results.loc["Commission"].reset_index()
commissions.loc[:, "Date"] = commissions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
commissions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00'],
12345: [0.0,
0.0,
0.0],
23456: [0.0,
0.0,
0.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00'],
12345: [0.0,
0.0,
0.0],
23456: [0.0,
0.0,
0.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00'],
12345: [0.0,
-0.13167, # (15.45 - 10.12)/10.12 * -0.25
0.0],
23456: [0.0,
0.0,
-0.0205224] # (14.50 - 13.40)/13.40 * 0.25
}
)
def test_continuous_intraday_strategy(self):
"""
Tests that the resulting DataFrames are correct after running a
long-short continuous intraday strategy.
"""
class BuyBelow10ShortAbove10ContIntraday(Moonshot):
"""
A basic test strategy that buys below 10 and shorts above 10.
"""
def prices_to_signals(self, prices):
long_signals = prices.loc["Close"] <= 10
short_signals = prices.loc["Close"] > 10
signals = long_signals.astype(int).where(long_signals, -short_signals.astype(int))
return signals
def mock_get_historical_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02"])
fields = ["Close"]
times = ["10:00:00", "11:00:00", "12:00:00"]
idx = pd.MultiIndex.from_product([fields, dt_idx, times], names=["Field", "Date", "Time"])
prices = pd.DataFrame(
{
12345: [
# Close
9.6,
10.45,
10.12,
15.45,
8.67,
12.30,
],
23456: [
# Close
10.56,
12.01,
10.50,
9.80,
13.40,
7.50,
],
},
index=idx
)
return prices
def mock_get_db_config(db):
return {
'vendor': 'ib',
'domain': 'main',
'bar_size': '1 hour'
}
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
12345: [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
23456: [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "ConId"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_historical_prices", new=mock_get_historical_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with patch("moonshot.strategies.base.get_db_config", new=mock_get_db_config):
results = BuyBelow10ShortAbove10ContIntraday().backtest()
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
12345: [1.0,
-1.0,
-1.0,
-1.0,
1.0,
-1.0],
23456: [-1.0,
-1.0,
-1.0,
1.0,
-1.0,
1.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
12345: [0.5,
-0.5,
-0.5,
-0.5,
0.5,
-0.5],
23456: [-0.5,
-0.5,
-0.5,
0.5,
-0.5,
0.5]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
12345: [0.5,
0.5,
0.5,
0.5,
0.5,
0.5],
23456: [0.5,
0.5,
0.5,
0.5,
0.5,
0.5]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
12345: ['nan',
0.5,
-0.5,
-0.5,
-0.5,
0.5],
23456: ['nan',
-0.5,
-0.5,
-0.5,
0.5,
-0.5]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
12345: ['nan',
0.5,
0.5,
0.5,
0.5,
0.5],
23456: ['nan',
0.5,
0.5,
0.5,
0.5,
0.5]}
)
total_holdings = results.loc["TotalHoldings"].reset_index()
total_holdings.loc[:, "Date"] = total_holdings.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
total_holdings.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
12345: [0,
1.0,
1.0,
1.0,
1.0,
1.0],
23456: [0,
1.0,
1.0,
1.0,
1.0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
12345: ['nan',
0.5,
1.0,
0.0,
0.0,
1.0],
23456: ['nan',
0.5,
0.0,
0.0,
1.0,
1.0]}
)
commissions = results.loc["Commission"].reset_index()
commissions.loc[:, "Date"] = commissions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
commissions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
12345: [0.0,
0.0,
0.0,
0.0,
0.0,
0.0],
23456: [0.0,
0.0,
0.0,
0.0,
0.0,
0.0]}
)
slippage = results.loc["Slippage"].reset_index()
slippage.loc[:, "Date"] = slippage.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
slippage.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
12345: [0.0,
0.0,
0.0,
0.0,
0.0,
0.0],
23456: [0.0,
0.0,
0.0,
0.0,
0.0,
0.0]}
)
returns = results.loc["Return"].reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00',
'2018-05-02T00:00:00'],
'Time': ['10:00:00',
'11:00:00',
'12:00:00',
'10:00:00',
'11:00:00',
'12:00:00'],
12345: [0.0,
0.0,
-0.0157895, # (10.12-10.45)/10.45 * 0.5
-0.2633399, # (15.45-10.12)/10.12 * -0.5
0.2194175, # (8.67-15.45)/15.45 * -0.5
-0.2093426 # (12.30-8.67)/8.67 * -0.5
],
23456: [0.0,
0.0,
0.0628643, # (10.50-12.01)/12.01 * -0.5
0.0333333, # (9.80-10.50)/10.50 * -0.5
-0.1836735, # (13.40-9.80)/9.80 * -0.5
-0.2201493 # (7.50-13.40)/13.40 * 0.5
]}
)
def test_pass_allocation(self):
"""
Tests that the resulting DataFrames are correct after running a basic
long-short strategy and passing an allocation.
"""
class BuyBelow10ShortAbove10(Moonshot):
"""
A basic test strategy that buys below 10 and shorts above 10.
"""
def prices_to_signals(self, prices):
long_signals = prices.loc["Close"] <= 10
short_signals = prices.loc["Close"] > 10
signals = long_signals.astype(int).where(long_signals, -short_signals.astype(int))
return signals
def mock_get_historical_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
12345: [
# Close
9,
11,
10.50,
9.99,
# Volume
5000,
16000,
8800,
9900
],
23456: [
# Close
9.89,
11,
8.50,
10.50,
# Volume
15000,
14000,
28800,
17000
],
},
index=idx
)
return prices
def mock_get_db_config(db):
return {
'vendor': 'ib',
'domain': 'main',
'bar_size': '1 day'
}
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
12345: [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
23456: [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "ConId"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_historical_prices", new=mock_get_historical_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with patch("moonshot.strategies.base.get_db_config", new=mock_get_db_config):
results = BuyBelow10ShortAbove10().backtest(allocation=3.0)
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
# replace nan with "nan" to allow equality comparisons
results = results.round(7)
results = results.where(results.notnull(), "nan")
signals = results.loc["Signal"].reset_index()
signals.loc[:, "Date"] = signals.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
signals.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [1.0,
-1.0,
-1.0,
1.0],
23456: [1.0,
-1.0,
1.0,
-1.0]}
)
weights = results.loc["Weight"].reset_index()
weights.loc[:, "Date"] = weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [1.5,
-1.5,
-1.5,
1.5],
23456: [1.5,
-1.5,
1.5,
-1.5]}
)
abs_weights = results.loc["AbsWeight"].reset_index()
abs_weights.loc[:, "Date"] = abs_weights.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_weights.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [1.5,
1.5,
1.5,
1.5],
23456: [1.5,
1.5,
1.5,
1.5]}
)
net_positions = results.loc["NetExposure"].reset_index()
net_positions.loc[:, "Date"] = net_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
net_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: ["nan",
1.5,
-1.5,
-1.5],
23456: ["nan",
1.5,
-1.5,
1.5]}
)
abs_positions = results.loc["AbsExposure"].reset_index()
abs_positions.loc[:, "Date"] = abs_positions.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
abs_positions.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: ["nan",
1.5,
1.5,
1.5],
23456: ["nan",
1.5,
1.5,
1.5]}
)
total_holdings = results.loc["TotalHoldings"].reset_index()
total_holdings.loc[:, "Date"] = total_holdings.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
total_holdings.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0,
1.0,
1.0,
1.0],
23456: [0,
1.0,
1.0,
1.0]}
)
turnover = results.loc["Turnover"].reset_index()
turnover.loc[:, "Date"] = turnover.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
turnover.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: ["nan",
1.5,
3.0,
0.0],
23456: ["nan",
1.5,
3.0,
3.0]}
)
returns = results.loc["Return"]
returns = returns.reset_index()
returns.loc[:, "Date"] = returns.Date.dt.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertDictEqual(
returns.to_dict(orient="list"),
{'Date': [
'2018-05-01T00:00:00',
'2018-05-02T00:00:00',
'2018-05-03T00:00:00',
'2018-05-04T00:00:00'],
12345: [0.0,
0.0,
-0.0681818, # (10.50 - 11)/11 * 1.5
0.0728571], # (9.99 - 10.50)/10.50 * -1.5
23456: [0.0,
0.0,
-0.3409091, # (8.50 - 11)/11 * 1.5
-0.3529412] # (10.50 - 8.50)/8.50 * -1.5
}
)
def test_label_conids(self):
"""
Tests that the label_conids param causes symbols to be included in
the resulting columns. For forex, symbol.currency should be used as
the label.
"""
class BuyBelow10(Moonshot):
"""
A basic test strategy that buys below 10.
"""
def prices_to_signals(self, prices):
signals = prices.loc["Close"] < 10
return signals.astype(int)
def mock_get_historical_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
12345: [
# Close
9,
11,
10.50,
9.99,
# Volume
5000,
16000,
8800,
9900
],
23456: [
# Close
9.89,
11,
8.50,
10.50,
# Volume
15000,
14000,
28800,
17000
],
},
index=idx
)
return prices
def mock_get_db_config(db):
return {
'vendor': 'ib',
'domain': 'main',
'bar_size': '1 day'
}
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
12345: [
"America/New_York",
"AAPL",
"STK",
"USD",
None,
None
],
23456: [
"America/New_York",
"EUR",
"CASH",
"JPY",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "ConId"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
# control: run without label_conids
with patch("moonshot.strategies.base.get_historical_prices", new=mock_get_historical_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with patch("moonshot.strategies.base.get_db_config", new=mock_get_db_config):
results = BuyBelow10().backtest()
self.assertSetEqual(
set(results.columns),
{12345,
23456}
)
# control: run with label_conids
with patch("moonshot.strategies.base.get_historical_prices", new=mock_get_historical_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with patch("moonshot.strategies.base.get_db_config", new=mock_get_db_config):
results = BuyBelow10().backtest(label_conids=True)
self.assertSetEqual(
set(results.columns),
{"AAPL(12345)",
"EUR.JPY(23456)"}
)
def test_truncate_at_start_date(self):
"""
Tests that the resulting DataFrames are truncated at the requested
start date even if the data predates the start date due to lookback
window.
"""
class BuyBelow10(Moonshot):
"""
A basic test strategy that buys below 10.
"""
LOOKBACK_WINDOW = 10 # Due to mock, this isn't actually having any effect
def prices_to_signals(self, prices):
signals = prices.loc["Close"] < 10
return signals.astype(int)
def mock_get_historical_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"])
fields = ["Close","Volume"]
idx = pd.MultiIndex.from_product([fields, dt_idx], names=["Field", "Date"])
prices = pd.DataFrame(
{
12345: [
# Close
9,
11,
10.50,
9.99,
# Volume
5000,
16000,
8800,
9900
],
23456: [
# Close
9.89,
11,
8.50,
10.50,
# Volume
15000,
14000,
28800,
17000
],
},
index=idx
)
return prices
def mock_get_db_config(db):
return {
'vendor': 'ib',
'domain': 'main',
'bar_size': '1 day'
}
def mock_download_master_file(f, *args, **kwargs):
master_fields = ["Timezone", "Symbol", "SecType", "Currency", "PriceMagnifier", "Multiplier"]
securities = pd.DataFrame(
{
12345: [
"America/New_York",
"ABC",
"STK",
"USD",
None,
None
],
23456: [
"America/New_York",
"DEF",
"STK",
"USD",
None,
None,
]
},
index=master_fields
)
securities.columns.name = "ConId"
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch("moonshot.strategies.base.get_historical_prices", new=mock_get_historical_prices):
with patch("moonshot.strategies.base.download_master_file", new=mock_download_master_file):
with patch("moonshot.strategies.base.get_db_config", new=mock_get_db_config):
results = BuyBelow10().backtest(start_date="2018-05-03")
self.assertSetEqual(
set(results.index.get_level_values("Field")),
{'Commission',
'AbsExposure',
'Signal',
'Return',
'Slippage',
'NetExposure',
'TotalHoldings',
'Turnover',
'AbsWeight',
'Weight'}
)
self.assertEqual(results.index.get_level_values("Date").min(), pd.Timestamp("2018-05-03"))
| [
"brian@quantrocket.com"
] | brian@quantrocket.com |
b21aab70f83a44383ba2584afdf1c8db013d0187 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/databoxedge/v20210201preview/get_role.py | 2b3f43979b4fd165e04ffde6de3dfaed95f863d9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,584 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetRoleResult',
'AwaitableGetRoleResult',
'get_role',
'get_role_output',
]
warnings.warn("""Please use one of the variants: CloudEdgeManagementRole, IoTRole, KubernetesRole, MECRole.""", DeprecationWarning)
@pulumi.output_type
class GetRoleResult:
"""
Compute role.
"""
def __init__(__self__, id=None, kind=None, name=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The path ID that uniquely identifies the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Role type.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Role configured on ASE resource
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
class AwaitableGetRoleResult(GetRoleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRoleResult(
id=self.id,
kind=self.kind,
name=self.name,
system_data=self.system_data,
type=self.type)
def get_role(device_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRoleResult:
"""
Compute role.
:param str device_name: The device name.
:param str name: The role name.
:param str resource_group_name: The resource group name.
"""
pulumi.log.warn("""get_role is deprecated: Please use one of the variants: CloudEdgeManagementRole, IoTRole, KubernetesRole, MECRole.""")
__args__ = dict()
__args__['deviceName'] = device_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:databoxedge/v20210201preview:getRole', __args__, opts=opts, typ=GetRoleResult).value
return AwaitableGetRoleResult(
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
system_data=__ret__.system_data,
type=__ret__.type)
@_utilities.lift_output_func(get_role)
def get_role_output(device_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRoleResult]:
"""
Compute role.
:param str device_name: The device name.
:param str name: The role name.
:param str resource_group_name: The resource group name.
"""
pulumi.log.warn("""get_role is deprecated: Please use one of the variants: CloudEdgeManagementRole, IoTRole, KubernetesRole, MECRole.""")
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
b2890fee28b3469e99f5ae1c676d8500ba428280 | 48d1bdfe8ef88e9e24e26f05a07b61a220fd5663 | /tests/settings.py | d264de5677cb4acca69cc9729cd414a7b2c6905b | [
"MIT"
] | permissive | dejmail/django-data-wizard | b2680cf14564e4be3d74c5e63d17060665adfb8d | cfb4d00032c73d4b55abceb542b68563f3a79a05 | refs/heads/master | 2023-05-10T20:59:46.222978 | 2022-08-18T01:37:40 | 2022-08-18T01:37:40 | 278,087,179 | 0 | 0 | MIT | 2020-07-08T12:46:19 | 2020-07-08T12:46:19 | null | UTF-8 | Python | false | false | 2,248 | py | import os
TEST_BACKEND = os.environ.get("TEST_BACKEND", "threading")
TEST_VARIANT = os.environ.get("TEST_VARIANT", "default")
WITH_WQDB = TEST_VARIANT == "wq.db"
SECRET_KEY = "1234"
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
if TEST_VARIANT == "wq.db":
WQ_APPS = (
"wq.db.rest",
"wq.db.rest.auth",
)
else:
WQ_APPS = tuple()
if TEST_VARIANT == "reversion":
REVERSION_APPS = ("reversion",)
else:
REVERSION_APPS = tuple()
INSTALLED_APPS = (
(
"django.contrib.contenttypes",
"django.contrib.admin",
"django.contrib.sessions",
"django.contrib.staticfiles",
"django.contrib.auth",
)
+ WQ_APPS
+ REVERSION_APPS
+ (
"data_wizard",
"data_wizard.sources",
"tests.data_app",
"tests.naturalkey_app",
"tests.eav_app",
"tests.source_app",
)
)
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "data_wizard_test.sqlite3",
}
}
ROOT_URLCONF = "tests.urls"
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), "media")
if TEST_BACKEND == "celery":
CELERY_RESULT_BACKEND = BROKER_URL = "redis://localhost/0"
if TEST_VARIANT == "wq.db":
from wq.db.default_settings import * # noqa
DATA_WIZARD = {
"BACKEND": f"data_wizard.backends.{TEST_BACKEND}",
}
STATIC_URL = "/static/"
DEBUG = True
| [
"andrew@wq.io"
] | andrew@wq.io |
02a3b3e63c958d1e43cecdd81f5fcc744ec2a602 | 8219120b00d2efcfe4a8ac698001c88032c66344 | /mysite/settings.py | 2c7cb1cc4f2906772f918a218264aa285a72aafc | [] | no_license | Potusujko/my-first-blog | 0ffc81dcf1970934e2be8c94fa2a9291709cab52 | 24b376675d2a56677b890f97119d2b415596aea9 | refs/heads/master | 2020-04-27T18:13:56.595832 | 2019-03-08T15:18:45 | 2019-03-08T15:18:45 | 174,560,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,211 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mj-ghk4iq&&ie-yeu)j8=8_#6qweag(#zv$+$*csawt=q(kw0^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '<Потусуйся>.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static') | [
"potusujko@gmail.com"
] | potusujko@gmail.com |
32c070a67536348307110e67ace6f37c433a186e | 811f563a43b887da1e3710df9c38f8aa64633a0b | /manage.py | 37bdb1b6a10514844ead5f83de444b5ed8d26c13 | [] | no_license | harryphelps/larder-site | 6c81702f6850bb5e30ea6f0193c0e6c08f7f40f7 | 27a201eb796d403b646d9ef730062f203b11f7de | refs/heads/main | 2023-07-18T19:21:31.820643 | 2021-09-15T07:53:24 | 2021-09-15T07:53:24 | 374,751,383 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "larder-project.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"harry.phelps@octoenergy.com"
] | harry.phelps@octoenergy.com |
b4fea59b3b49975b22270fb41dea70e4b71350f3 | 3c4cba4eda8db2f88d3354e64abc8387bdd9e7ec | /xplan/__main__.py | 8e5f842355e84c6d7188b7af6c2eca7f048defaf | [] | no_license | shinhwagk/xplan | eb02ec715b9085c92e7fcd1276089d8b1706397c | adb5aa6079ba2f888cf8c9e6ed9081789acd3831 | refs/heads/master | 2021-07-15T11:05:07.542511 | 2020-05-18T03:12:55 | 2020-05-18T03:12:55 | 143,730,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import argparse
from xplan.display_cursor import display_cursor
parser = argparse.ArgumentParser()
parser.add_argument('-dsn', help="db name", required=True)
parser.add_argument('-sql_id', help="inst", required=True)
parser.add_argument('-child_number', help="data source", default=0)
parser.add_argument('-print', action='store_true')
args = parser.parse_args()
display_cursor(args. dsn, args.sql_id, args.child_number).print()
| [
"noreply@github.com"
] | shinhwagk.noreply@github.com |
b812cb1fd735b6b63f0512a927f216cb8cb0055e | 4abfecc0722111c00cb2b2046014811d9aa37bed | /app.51.intviewQuestion.py | c3b1bee2164dc88fb8ec50946aaefd4ecf996f3f | [] | no_license | yzcali/PycharmProjects_HelloWorld | f5439a5f936f21ee01f3dcf7f3aff33432dfec91 | 8177d86b7cdc34819327c71a84654c107e032e0f | refs/heads/main | 2023-01-24T21:57:40.075679 | 2020-12-06T22:20:23 | 2020-12-06T22:20:23 | 315,055,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | # question 1
# Write a function called fizz_buzz that takes a number.
# If the number is divisible by 3, it should return “Fizz”.
# If it is divisible by 5, it should return “Buzz”.
# If it is divisible by both 3 and 5, it should return “FizzBuzz”.
# Otherwise, it should return the same number.
def fizz_buzz(input):
if (input % 3 == 0) and (input % 5 == 0):
return "FizzBuzz"
if input % 3 == 0:
return "Fizz"
if input % 5 == 0:
return "Buzz"
return input
print(fizz_buzz(3)) # Fizz
print(fizz_buzz(5)) # Buzz
print(fizz_buzz(15)) # FizzBuzz
print(fizz_buzz(7)) # 7
| [
"yaziciali694@gmail.com"
] | yaziciali694@gmail.com |
d09afbd5633143ee0043841892906c2295c30e97 | 5edfdcc3f9fe97971a2472a3ebfe8023d5d0fefc | /api_recognition_code/base.py | 928237b2c58987601e6c4dc5ca5b3bcb4826ddcf | [] | no_license | developneed/developneed.github.io | 77613c29ba600cd25596e7362fdcb06f87c29d67 | 28301656af488bb8abb2e66feda7d22680f93f38 | refs/heads/master | 2022-12-07T07:23:07.264008 | 2020-08-31T13:20:45 | 2020-08-31T13:20:45 | 291,243,813 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | from abc import abstractmethod
from discussion.model.base import SOPost
class SOPostAPIRecognition:
@abstractmethod
def recognize(self, post: SOPost):
return set()
class SOPostAPILinker:
def link_one(self, post: SOPost, api):
return api
def link_batch(self, post: SOPost, api):
return api
| [
"noreply@github.com"
] | developneed.noreply@github.com |
3e457a379c73822f78cb65466214746ed00dbe11 | 7dd5e97066c15aafc3da0cf826c00df591e59342 | /adam_morrison/testFiles/genFakeData/portTest.py | 05d21eeea5f21a35ee2278d0174e464a60f578bc | [] | no_license | waggle-sensor/summer2017 | 47ac5ec21a44445bac848eb1405df2390f161482 | 4ee06ce6cc9fd9bcc3a3aba9540d37045132040b | refs/heads/master | 2021-01-01T20:37:40.583816 | 2018-03-08T19:45:27 | 2018-03-08T19:45:27 | 98,899,975 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | import socket;
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('10.10.10.137',8000))
if result == 0:
print "Port is open"
else:
print "Port is not open"
| [
"adammorr@adammorr-desktop.lan"
] | adammorr@adammorr-desktop.lan |
c2006c7cd89aca0775e2f8862c0c7c80d2818081 | 6ac683881a26231638ae77261bc1c2e962ed81e6 | /message/models.py | 7155156b1bd11413e82722ed09d6d44072e0ac20 | [] | no_license | tenshiPure/chat | a3deea994d106b27bdcf7c8ac6bc21987b853601 | c10489b87814033ffbd4f50d0eebc3b9e1c364d4 | refs/heads/master | 2016-09-06T02:24:40.094709 | 2014-02-06T03:37:06 | 2014-02-06T03:37:06 | 16,363,786 | 0 | 0 | null | 2016-02-20T02:14:08 | 2014-01-30T00:12:25 | Python | UTF-8 | Python | false | false | 2,828 | py | #-*- coding: utf-8 -*-
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.forms import ModelForm
from django.db import models
from django.contrib.auth.models import User, Group
class UserForm(UserCreationForm):
first_name = forms.CharField(max_length = 32)
last_name = forms.CharField(max_length = 32)
class Meta:
model = User
fields = ('first_name', 'last_name')
class Tag(models.Model):
body = models.CharField(max_length = 64)
last_used = models.DateTimeField(auto_now = True)
group = models.ForeignKey(Group)
def formatedDatetime(self):
return self.last_used.strftime('%Y-%m-%d %H:%M')
@staticmethod
def tagging(tag, create, group):
if not tag and not create:
return None
if tag:
result = Tag.objects.get(pk = tag)
elif create:
rows = Tag.objects.filter(body = create).filter(group = group)
if rows:
result = rows[0]
else:
result = Tag(body = create, group = group)
result.save()
return result
def __unicode__(self):
return self.body
class TagForm(ModelForm):
class Meta:
model = Tag
class Message(models.Model):
body = models.TextField()
datetime = models.DateTimeField(u'送信日時', auto_now = True)
ref = models.ForeignKey('self', null = True, blank = True)
tag = models.ForeignKey(Tag, null = True, blank = True)
user = models.ForeignKey(User)
group = models.ForeignKey(Group)
def formatedDatetime(self):
return self.datetime.strftime('%Y-%m-%d %H:%M')
def __unicode__(self):
return '%s - %s' % (self.user.username, self.body[0:40])
class MessageForm(ModelForm):
class Meta:
model = Message
exclude = ('user', 'group')
def __init__(self, *args, **kwargs):
group = kwargs.get('group', False)
if group:
kwargs.pop('group')
super(MessageForm, self).__init__(*args, **kwargs)
self.fields['body'] = forms.CharField(
label = '',
widget = forms.Textarea(
attrs = {
'class' : 'class_form_input',
'cols' : 80,
'rows' : 5
}
)
)
self.fields['tag_create'] = forms.CharField(
label = '',
required = False,
widget = forms.TextInput(
attrs = {
'class' : 'class_form_input'
}
)
)
self.fields['ref'] = forms.ModelChoiceField(
# queryset = Message.objects.filter(group = group).order_by('-id'),
queryset = Message.objects.all().order_by('-id'),
label = '',
required = False,
widget = forms.Select(
attrs = {
'class' : 'class_form_input'
}
)
)
self.fields['tag'] = forms.ModelChoiceField(
# queryset = Tag.objects.filter(group = group).order_by('last_used'),
queryset = Tag.objects.all().order_by('last_used'),
label = '',
required = False,
widget = forms.Select(
attrs = {
'class' : 'class_form_input'
}
)
)
| [
"user.ryo@gmail.com"
] | user.ryo@gmail.com |
a52e9d7aed8270a9bfd6951842c62b2ae4453cac | 3892fdc152650c1abe32e4610e2936a364afa56f | /project2/auctions/models.py | 76eb1e8846608190c7bc4a2bf856e564ba1d5f36 | [] | no_license | JosephLimWeiJie/cs50w | 0af1fc4dd05a04790bcde94072f41c668319a161 | d004fa9cf72886f25959182c717f950e10583d9b | refs/heads/master | 2022-12-29T22:06:30.457421 | 2020-10-08T17:09:37 | 2020-10-08T17:09:37 | 281,036,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,069 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
watchlist_counter = models.IntegerField(default=0)
has_won = models.BooleanField(default=False)
pass
class Listing(models.Model):
CATEGORY = (
("Men's Wear", "Men's Wear"),
("Women's Apparel", "Women's Apparel"),
("Mobile & Gadgets", "Mobile & Gadgets"),
("Beauty & Personal Care", "Beauty & Personal Care"),
("Home Appliances", "Home Appliances"),
("Home & Living", "Home & Living"),
("Kids Fashion", "Kids Fashion"),
("Toys, Kids & Babies", "Toys, Kids & Babies"),
("Video Games", "Video Games"),
("Food & Beverages", "Food & Beverages"),
("Computers & Peripherals", "Computers & Peripherals"),
("Hobbies & Books", "Hobbies & Books"),
("Health & Wellness", "Health & Wellness"),
("Women's Bags", "Women's Bags"),
("Travel & Luggage", "Travel & Luggage"),
("Pet Food & Supplies", "Pet Food & Supplies"),
("Watches", "Watches"),
("Jewellery & Accessory", "Jewellery & Accessory"),
("Men's Shoes", "Men's Shoes"),
("Women's Shoes", "Women's Shoes"),
("Sports & Outdoors", "Sports & Outdoors"),
("Automotive", "Automotive"),
("Men's Bags", "Men's Bags"),
("Cameras & Drones", "Cameras & Drones"),
("Dining, Travel & Services", "Dining, Travel & Services"),
("Miscellaneous", "Miscellaneous"))
title = models.CharField(max_length=64)
desrc = models.TextField()
image_url = models.URLField(blank=True)
category = models.CharField(max_length=64, blank=True, choices=CATEGORY)
date = models.DateField(auto_now_add=True)
user = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="listing", default=1)
bid_winner = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="winnerlisting",
default=2, blank=True, null=True)
is_active = models.BooleanField(default=True)
is_on_watchlist = models.BooleanField(default=False)
watchlist_listing = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="watchlist_listing",
blank=True, null=True, default=3)
def __str__(self):
return f"{self.title}"
class Bid(models.Model):
listing = models.ForeignKey(
Listing, on_delete=models.CASCADE, related_name="bid",
default=1)
amount = models.FloatField(null=True)
user = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="bid", default=1
)
def __str__(self):
return "%s %0.2f" % ("$", self.amount)
class Comment(models.Model):
listing = models.ForeignKey(
Listing, on_delete=models.CASCADE, related_name="comment",
default=None)
comment = models.TextField()
user = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="comment", default=None)
def __str__(self):
return f"{self.user}: {self.comment}"
| [
"59989652+JosephLimWeiJie@users.noreply.github.com"
] | 59989652+JosephLimWeiJie@users.noreply.github.com |
79bef7120a9adbb9caf3a1a388176f20f58daf90 | e6572ae97327a7814a9ddd772aa2b8fc9ec0483f | /cogs/Makeemoji.py | 4a0ca81ab2254ef9adec22ddf86b6a5e79bf5a1b | [
"MIT"
] | permissive | izxxr/discord-emoji-stealer | be188010e2075a1ca82502bda97f75da786d196a | 5e34b7edac0f14cae21de2bd43c573cee0f7631f | refs/heads/main | 2023-05-06T16:18:44.278180 | 2021-05-10T09:10:41 | 2021-05-10T09:10:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,633 | py | from discord.ext import commands
import requests
import discord
class Makeemoji(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def makeemoji(self, ctx, name, url=None):
if url:
file_request = requests.get(url)
try:
emoji = await ctx.guild.create_custom_emoji(image=file_request.content, name=name)
await ctx.send(f"Emoji <:{emoji.name}:{emoji.id}> was created!")
except discord.InvalidArgument:
await ctx.send("You must attach an **image** or a **gif** for the emoji, not a different type of the file.")
return
try:
attachment_url = ctx.message.attachments[0].url
except IndexError:
await ctx.send("You must attach an image or a gif for the emoji.")
return
file_request = requests.get(attachment_url)
try:
emoji = await ctx.guild.create_custom_emoji(image=file_request.content, name=name)
except discord.InvalidArgument:
await ctx.send("You must attach an **image** or a **gif** for the emoji, not a different type of the file.")
return
await ctx.send(f"Emoji <:{emoji.name}:{emoji.id}> was created!")
@makeemoji.error
async def makeemoji_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Specify a name for the emoji. Example: `makeemoji emoji1`")
return
raise error
def setup(bot):
bot.add_cog(Makeemoji(bot))
| [
"noreply@github.com"
] | izxxr.noreply@github.com |
b00c8aa0a7d46dd26c6d293c5f9030b06975e0e7 | d532a3bf7a7c55fa6eeeaf9d959971f5cb601888 | /WebSocket_Client_Server/aes.py | 0462f78d90b199938598b2c27332dcedae703673 | [] | no_license | Kalgrand/BSI_LAB10 | 8f26b4188c973e214ec08dca5452a23fc11f396f | 74474b32db384a8b18fe7b6b407f3901ffe8592f | refs/heads/master | 2023-02-18T22:19:14.856030 | 2021-01-19T10:04:05 | 2021-01-19T10:04:05 | 330,397,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | """
Szyfrowanie tekstu przy pomocy algorytmu AES.
Autor: Maciej Milewski
Źródło algorytmu szyfrującego: https://pypi.org/project/pycrypto/
"""
from Crypto.Cipher import AES
import base64
from utils import pad, unpad, fill_to_block
def aes_encode(message, key):
""" Encrypting message using AES in CBC Mode """
key = bytes(key, encoding='utf8')
iv = "mm88!@#$%^dsmdms"
iv = bytes(iv, encoding='utf8')
cipher = AES.new(key, AES.MODE_CBC, iv)
AES_code = bytes(pad(message), encoding='utf8')
code = cipher.encrypt(AES_code)
encrypted_text = str((base64.encodebytes(code)).decode())
return encrypted_text
def aes_decode(message, key):
""" Decrypting message and removing padding from message """
key = bytes(key, encoding='utf8')
iv = "mm88!@#$%^dsmdms"
iv = bytes(iv, encoding='utf8')
cipher = AES.new(key, AES.MODE_CBC, iv)
message = base64.b64decode(message.encode())
decrypted_text = cipher.decrypt(message).decode()
decrypted_code = decrypted_text.rstrip('\0')
return unpad(decrypted_code) | [
"micdeg96@gmail.com"
] | micdeg96@gmail.com |
3bb2a6a4dfa49763c5e062343c951512a3089e45 | 02d82acce198e1a4eb69a4d0cb1586a4c8b264a5 | /motor_controller/motor.py | 0e0db988a5d984faf312b67ecac31b3d29f1efe2 | [] | no_license | asuar078/rpi_rover | 3b70b1e90e049bb82a0d6253e23014c9b893f93f | 89db9812dc38059bb83be1a8d248c70a374873a1 | refs/heads/master | 2021-04-15T10:05:18.252797 | 2018-06-19T16:27:42 | 2018-06-19T16:27:42 | 126,905,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py |
class Motor:
pwm_pin = 0
direction_pin = 0
current_pin = 0
enc_a_pin = 0
enc_b_pin = 0
def __init__(self, pwm_pin, direction_pin, current_pin=0, enc_a_pin=0, enc_b_pin=0):
self.pwm_pin = pwm_pin
self.direction_pin = direction_pin
self.current_pin = current_pin
self.enc_a_pin = enc_a_pin
self.enc_b_pin = enc_b_pin
| [
"arian.suarez001@gmail.com"
] | arian.suarez001@gmail.com |
6ce1c62e5908770f961a6f42807d4ed6711f56ab | 3cdc345a9cf34f028ce34e2a5d01a86e77b88a90 | /gevent/greentest/test_threading_2.py | 11413b663a6a6f6076906c3017ec3ffecbebb117 | [
"MIT"
] | permissive | WeilerWebServices/Reddit | 459ace5af417d7bd8b4552a3068ff64b3986a579 | b300835f5c78f83a89931cf2a1c4e9150ddb9a9c | refs/heads/master | 2023-01-01T18:25:00.196266 | 2020-10-21T12:27:25 | 2020-10-21T12:27:25 | 305,972,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,520 | py | # testing gevent's Event, Lock, RLock, Semaphore, BoundedSemaphore with standard test_threading
from __future__ import print_function
from six import xrange
setup_ = '''from gevent import monkey; monkey.patch_all()
from gevent.event import Event
from gevent.lock import RLock, Semaphore, BoundedSemaphore
from gevent.thread import allocate_lock as Lock
import threading
threading.Event = Event
threading.Lock = Lock
threading.RLock = RLock
threading.Semaphore = Semaphore
threading.BoundedSemaphore = BoundedSemaphore
'''
exec(setup_)
setup_3 = '\n'.join(' %s' % line for line in setup_.split('\n'))
setup_4 = '\n'.join(' %s' % line for line in setup_.split('\n'))
try:
from test import support
from test.support import verbose
except ImportError:
from test import test_support as support
from test.test_support import verbose
import random
import re
import sys
import threading
try:
import thread
except ImportError:
import _thread as thread
import time
import unittest
import weakref
import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' % (
self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assert_(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assert_(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' % (
self.name, self.nrunning.get()))
class ThreadTests(unittest.TestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>" % i, self, sema, mutex, numrunning)
threads.append(t)
t.daemon = False # Under PYPY we get daemon by default?
if hasattr(t, 'ident'):
self.failUnlessEqual(t.ident, None)
self.assertFalse(t.daemon)
self.assert_(re.match(r'<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join(NUMTASKS)
self.assert_(not t.is_alive())
if hasattr(t, 'ident'):
self.failIfEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assert_(re.match(r'<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads,
# as must the repr and str.
t = threading.currentThread()
self.assertFalse(t.ident is None)
str(t)
repr(t)
def f():
t = threading.currentThread()
ident.append(t.ident)
str(t)
repr(t)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print('platform does not support changing thread stack size')
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print('platform does not support changing thread stack size')
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assert_(tid in threading._active)
self.assert_(isinstance(threading._active[tid],
threading._DummyThread))
del threading._active[tid]
# in gevent, we actually clean up threading._active, but it's not happended there yet
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def SKIP_test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print("test_PyThreadState_SetAsyncExc can't import ctypes")
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
worker_started.wait()
if verbose:
print(" verifying worker hasn't exited")
self.assert_(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assert_(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
getattr(ctypes, 'pythonapi') # not available on PyPy
except (ImportError,AttributeError):
if verbose:
print("test_finalize_with_runnning_thread can't import ctypes")
return # can't do anything
del ctypes # pyflakes fix
import subprocess
rc = subprocess.call([sys.executable, "-c", """if 1:
%s
import ctypes, sys, time
try:
import thread
except ImportError:
import _thread as thread # Py3
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""" % setup_3])
self.assertEqual(rc, 42)
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
import subprocess
p = subprocess.Popen([sys.executable, "-c", """if 1:
%s
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is: %%r" %% sleep)
threading.Thread(target=child).start()
raise SystemExit
""" % setup_4],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.strip()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
assert re.match('^Woke up, sleep function is: <.*?sleep.*?>$', stdout), repr(stdout)
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertFalse(t in l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
if not hasattr(sys, 'pypy_version_info'):
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another': self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEquals(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEquals(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
class ThreadJoinOnShutdown(unittest.TestCase):
def _run_and_join(self, script):
script = """if 1:
%s
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
\n""" % setup_3 + script
import subprocess
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace(b'\r', b'')
self.assertEqual(data, b"end of main\nend of thread\n")
self.failIf(rc == 2, b"interpreter was blocked")
self.failUnless(rc == 0, b"Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
import os
if not hasattr(os, 'fork'):
return
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
import os
if not hasattr(os, 'fork'):
return
# Skip platforms with known problems forking from a worker thread.
# See http://bugs.python.org/issue3863.
# skip disable because I think the bug shouldn't apply to gevent -- denis
#if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):
# print(('Skipping test_3_join_in_forked_from_thread'
# ' due to known OS bugs on'), sys.platform, file=sys.stderr)
# return
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
class ThreadingExceptionTests(unittest.TestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join)
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
def main():
support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
main()
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
476c871aec7380602d4d54db0bccf951f511c1ea | f7098120eaff59448debbfe2191512c9a6a9c49d | /greenhouse/settings.py | 435c59dff90ac3051102bf804048ae08b4eeea5e | [] | no_license | case112/smart-greenhouse-web | 9cacb3e05d2ebc09a0ab99d04517002c0b2381be | cde35f4a59ffebc6bee4deaaf7ac4e11e1f88463 | refs/heads/master | 2023-05-28T15:56:44.618118 | 2022-04-24T10:12:26 | 2022-04-24T10:12:26 | 253,299,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,007 | py | import os
from decouple import config
import dj_database_url
import dotenv
import django_heroku
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dotenv_file = os.path.join(BASE_DIR, ".env")
if os.path.isfile(dotenv_file):
dotenv.load_dotenv(dotenv_file)
SECRET_KEY = config('SECRET_KEY')
DEBUG = True #config('DEBUG', default=False, cast=bool)
DEBUG_PROPAGATE_EXCEPTIONS = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '.herokuapp.com']
DATABASES = {}
DATABASES['default'] = dj_database_url.config(conn_max_age=600)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'data',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'greenhouse.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'greenhouse.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') #Where collectstatic collects files
django_heroku.settings(locals())
del DATABASES['default']['OPTIONS']['sslmode']
| [
"case112@users.noreply.github.com"
] | case112@users.noreply.github.com |
a0a87ff1bb1928a162d29d5c0b92860320bbc4cb | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-dialogflow-cx/google/cloud/dialogflowcx_v3/services/environments/transports/base.py | 2954e4b53ef852369ec2fb2c2b6ac7f8f8469fda | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 11,551 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1, operations_v1
from google.api_core import retry as retries
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.cloud.location import locations_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.oauth2 import service_account # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.cloud.dialogflowcx_v3 import gapic_version as package_version
from google.cloud.dialogflowcx_v3.types import environment
from google.cloud.dialogflowcx_v3.types import environment as gcdc_environment
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=package_version.__version__
)
class EnvironmentsTransport(abc.ABC):
"""Abstract transport class for Environments."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
DEFAULT_HOST: str = "dialogflow.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Don't apply audience if the credentials file passed from user.
if hasattr(credentials, "with_gdch_audience"):
credentials = credentials.with_gdch_audience(
api_audience if api_audience else host
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_environments: gapic_v1.method.wrap_method(
self.list_environments,
default_timeout=None,
client_info=client_info,
),
self.get_environment: gapic_v1.method.wrap_method(
self.get_environment,
default_timeout=None,
client_info=client_info,
),
self.create_environment: gapic_v1.method.wrap_method(
self.create_environment,
default_timeout=None,
client_info=client_info,
),
self.update_environment: gapic_v1.method.wrap_method(
self.update_environment,
default_timeout=None,
client_info=client_info,
),
self.delete_environment: gapic_v1.method.wrap_method(
self.delete_environment,
default_timeout=None,
client_info=client_info,
),
self.lookup_environment_history: gapic_v1.method.wrap_method(
self.lookup_environment_history,
default_timeout=None,
client_info=client_info,
),
self.run_continuous_test: gapic_v1.method.wrap_method(
self.run_continuous_test,
default_timeout=None,
client_info=client_info,
),
self.list_continuous_test_results: gapic_v1.method.wrap_method(
self.list_continuous_test_results,
default_timeout=None,
client_info=client_info,
),
self.deploy_flow: gapic_v1.method.wrap_method(
self.deploy_flow,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_environments(
self,
) -> Callable[
[environment.ListEnvironmentsRequest],
Union[
environment.ListEnvironmentsResponse,
Awaitable[environment.ListEnvironmentsResponse],
],
]:
raise NotImplementedError()
@property
def get_environment(
self,
) -> Callable[
[environment.GetEnvironmentRequest],
Union[environment.Environment, Awaitable[environment.Environment]],
]:
raise NotImplementedError()
@property
def create_environment(
self,
) -> Callable[
[gcdc_environment.CreateEnvironmentRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_environment(
self,
) -> Callable[
[gcdc_environment.UpdateEnvironmentRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_environment(
self,
) -> Callable[
[environment.DeleteEnvironmentRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def lookup_environment_history(
self,
) -> Callable[
[environment.LookupEnvironmentHistoryRequest],
Union[
environment.LookupEnvironmentHistoryResponse,
Awaitable[environment.LookupEnvironmentHistoryResponse],
],
]:
raise NotImplementedError()
@property
def run_continuous_test(
self,
) -> Callable[
[environment.RunContinuousTestRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def list_continuous_test_results(
self,
) -> Callable[
[environment.ListContinuousTestResultsRequest],
Union[
environment.ListContinuousTestResultsResponse,
Awaitable[environment.ListContinuousTestResultsResponse],
],
]:
raise NotImplementedError()
@property
def deploy_flow(
self,
) -> Callable[
[environment.DeployFlowRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def list_operations(
self,
) -> Callable[
[operations_pb2.ListOperationsRequest],
Union[
operations_pb2.ListOperationsResponse,
Awaitable[operations_pb2.ListOperationsResponse],
],
]:
raise NotImplementedError()
@property
def get_operation(
self,
) -> Callable[
[operations_pb2.GetOperationRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_operation(
self,
) -> Callable[[operations_pb2.CancelOperationRequest], None,]:
raise NotImplementedError()
@property
def get_location(
self,
) -> Callable[
[locations_pb2.GetLocationRequest],
Union[locations_pb2.Location, Awaitable[locations_pb2.Location]],
]:
raise NotImplementedError()
@property
def list_locations(
self,
) -> Callable[
[locations_pb2.ListLocationsRequest],
Union[
locations_pb2.ListLocationsResponse,
Awaitable[locations_pb2.ListLocationsResponse],
],
]:
raise NotImplementedError()
@property
def kind(self) -> str:
raise NotImplementedError()
__all__ = ("EnvironmentsTransport",)
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
932070d45029b0cd3513177f15e0c146af1f52d9 | 8f3d104a86a41579a2b2622081ef96c78daccd13 | /app/app.py | 48a617fadf84f73765956996cd727bb0aacfdfee | [
"MIT"
] | permissive | pgs8/IS601_Individual-Project-Web-Application-Final | 8eddd66bc4f252415ae5e840d2d6a0e4723572c5 | fa3d110bb23d1c7d6c643ea9a805ef9b2faa75c0 | refs/heads/main | 2023-03-29T19:54:19.304266 | 2021-04-02T21:31:26 | 2021-04-02T21:31:26 | 354,129,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,745 | py | import simplejson as json
from flask import Flask, request, Response, redirect, render_template
from flaskext.mysql import MySQL
from pymysql.cursors import DictCursor
app = Flask(__name__)
mysql = MySQL(cursorclass=DictCursor)
app.config['MYSQL_DATABASE_HOST'] = 'db'
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = 'root'
app.config['MYSQL_DATABASE_PORT'] = 3306
app.config['MYSQL_DATABASE_DB'] = 'movieData'
mysql.init_app(app)
@app.route('/', methods=['GET'])
def index():
user = {'username': 'DeNiro Movies Project'}
cursor = mysql.get_db().cursor()
cursor.execute("SELECT * FROM deniroMovies")
result = cursor.fetchall()
return render_template('index.html', title='Home', user=user, movies=result)
@app.route('/view/<int:movie_id>', methods=['GET'])
def record_view(movie_id):
cursor = mysql.get_db().cursor()
cursor.execute('SELECT * FROM deniroMovies WHERE id=%s', movie_id)
result = cursor.fetchall()
return render_template('view.html', title='View Form', movie=result[0])
@app.route('/edit/<int:movie_id>', methods=['GET'])
def form_edit_get(movie_id):
cursor = mysql.get_db().cursor()
cursor.execute('SELECT * FROM deniroMovies WHERE id=%s', movie_id)
result = cursor.fetchall()
return render_template('edit.html', title='Edit Form', movie=result[0])
@app.route('/edit/<int:movie_id>', methods=['POST'])
def form_update_post(movie_id):
cursor = mysql.get_db().cursor()
input_data = (request.form.get('fldTitle'), request.form.get('fldYear'), request.form.get('fldScore'), movie_id)
sql_update_query = """UPDATE deniroMovies t SET t.Title = %s, t.Year = %s, t.Score = %s WHERE t.id = %s"""
cursor.execute(sql_update_query, input_data)
mysql.get_db().commit()
return redirect("/", code=302)
@app.route('/movies/new', methods=['GET'])
def form_insert_get():
return render_template('new.html', title='New Movie Form')
@app.route('/movies/new', methods=['POST'])
def form_insert_post():
cursor = mysql.get_db().cursor()
input_data = (request.form.get('fldTitle'), request.form.get('fldYear'), request.form.get('fldScore'))
sql_insert_query = """INSERT INTO deniroMovies (Title, Year, Score) VALUES (%s, %s, %s)"""
cursor.execute(sql_insert_query, input_data)
mysql.get_db().commit()
return redirect("/", code=302)
@app.route('/delete/<int:movie_id>', methods=['POST'])
def form_delete_post(movie_id):
cursor = mysql.get_db().cursor()
sql_delete_query = """DELETE FROM deniroMovies WHERE id = %s """
cursor.execute(sql_delete_query, movie_id)
mysql.get_db().commit()
return redirect("/", code=302)
@app.route('/api/v1/movies', methods=['GET'])
def api_browse() -> str:
cursor = mysql.get_db().cursor()
cursor.execute('SELECT * FROM deniroMovies')
result = cursor.fetchall()
json_result = json.dumps(result)
resp = Response(json_result, status=200, mimetype='application/json')
return resp
@app.route('/api/v1/movies/<int:movie_id>', methods=['GET'])
def api_retrieve(movie_id) -> str:
cursor = mysql.get_db().cursor()
cursor.execute('SELECT * FROM deniroMovies WHERE id=%s', movie_id)
result = cursor.fetchall()
json_result = json.dumps(result)
resp = Response(json_result, status=200, mimetype='application/json')
return resp
@app.route('/api/v1/movies/<int:movie_id>', methods=['PUT'])
def api_edit(movie_id) -> str:
cursor = mysql.get_db().cursor()
content = request.json
input_data = (content['Title'], content['Year'], content['Score'], movie_id)
sql_insert_query = """UPDATE deniroMovies t SET t.Title = %s, t.Year = %s, t.Score = %s WHERE t.id = %s"""
cursor.execute(sql_insert_query, input_data)
mysql.get_db().commit()
resp = Response(status=200, mimetype='application/json')
return resp
@app.route('/api/v1/movies/', methods=['POST'])
def api_add() -> str:
content = request.json
cursor = mysql.get_db().cursor()
input_data = (content['Title'], content['Year'], content['Score'])
sql_insert_query = """INSERT INTO deniroMovies (Title, Year, Score) VALUES (%s, %s, %s)"""
cursor.execute(sql_insert_query, input_data)
mysql.get_db().commit()
resp = Response(status=201, mimetype='application/json')
return resp
@app.route('/api/v1/movies/<int:movie_id>', methods=['DELETE'])
def api_delete(movie_id) -> str:
cursor = mysql.get_db().cursor()
sql_delete_query = """DELETE FROM deniroMovies WHERE id = %s """
cursor.execute(sql_delete_query, movie_id)
mysql.get_db().commit()
resp = Response(status=210, mimetype='application/json')
return resp
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| [
"pgs8@njit.edu"
] | pgs8@njit.edu |
3620f9f21faf465bdcb55e4a4c4b67376ecf12d7 | bf6b8e95bad7cc9c527b87d405429d03cc336fa2 | /6/intro-proj1/DeHovitz-Bolton/intro-to-flask/app.py | 7d12b244908e8e27b2a4efc40e056c8753e95435 | [] | no_license | Enigmamemory/submissions | 90c75665f8d311a29c094c4e6a22a10a0893cf9f | 39d6b944ad61a78e693d564bb0ede661f9806964 | refs/heads/master | 2020-12-11T03:31:58.281197 | 2014-12-30T05:08:22 | 2014-12-30T05:08:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,857 | py | from flask import Flask,render_template, request
#<center>And how many words? <input type="text" name="topwords" size="5" value="50"><br>
app = Flask(__name__)
@app.route("/")
def start():
return render_template("constitutionoptions.html")
@app.route("/results", methods = ["POST", "GET"])
def main():
if request.method == 'POST':
elements = request.form["Submit"]
#return elements;
s = "";
checked=0
Dict={'one':[],'two':[]}
#return "Succcess"
one=1
two=2
if 'Iran' in elements:
Dict['one'].append('Iranian constitution')
a=open('Iran.txt')
iran=a.read()
iran=iran.lower()
iran=iran.split()
Dict['one'].append(iran)
checked+=1
if 'USA' in elements:
b=open('unitedstates.txt')
usaw=b.read()
usaw=usaw.lower()
usaw=usaw.split()
checked+=1
if Dict['one']==[]:
Dict['one'].append("USA's constitution")
Dict['one'].append(usaw)
else:
Dict['two'].append("USA's constitution")
Dict['two'].append(usaw)
if '1791' in elements:
c=open('1791France.txt')
revolution=c.read()
revolution=revolution.lower()
revolution=revolution.split()
checked+=1
if Dict['one']==[]:
Dict['one'].append("French constitution of 1791")
Dict['one'].append(revolution)
else:
Dict['two'].append("French constitution of 1791")
Dict['two'].append(revolution)
if 'Russia' in elements:
d=open('russia.txt')
rus=d.read()
rus=rus.lower()
rus=rus.split()
checked+=1
if Dict['one']==[]:
Dict['one'].append("Russian Federation' constitution")
Dict['one'].append(rus)
else:
Dict['two'].append("Russian Federation' constitution")
Dict['two'].append(rus)
if 'USSR' in elements:
e=open('USSR.txt')
ussr=e.read()
ussr=ussr.lower()
ussr=ussr.split()
checked+=1
if Dict['one']==[]:
Dict['one'].append("Union of Soviet Socialist Republic's constitution")
Dict['one'].append(ussr)
else:
Dict['two'].append("Union of Soviet Socialist Republic's constitution")
Dict['two'].append(ussr)
if 'France' in elements:
f=open('France.txt')
france=f.read()
france=france.lower()
france=france.split()
checked+=1
if Dict['one']==[]:
Dict['one'].append("French constitution")
Dict['one'].append(france)
else:
Dict['two'].append("French constitution")
Dict['two'].append(france)
if 'Manifesto' in elements:
g=open('manifesto.txt')
manifesto=g.read()
manifesto=manifesto.lower()
manifesto=manifesto.split()
checked+=1
if Dict['one']==[]:
Dict['one'].append("Communist Manifesto")
Dict['one'].append(manifesto)
else:
Dict['two'].append("Communist Manifesto")
Dict['two'].append(manifesto)
if 'Magna' in elements:
h=open('Magnacarta.txt')
magna=h.read()
magna=magna.lower()
magna=magna.split()
checked+=1
if Dict['one']==[]:
Dict['one'].append("Magna Carta")
Dict['one'].append(magna)
else:
Dict['two'].append("Magna Carta")
Dict['two'].append(magna)
checker=0
if checked!=2:
s+= '<br><center><font size="10"> <b> I said two gosh darn it!<br> </b> <font size="7"> <a href="constitutionoptions.html">Go back</a>'
return s
#10
s+= '<br><center> You have chosen to compare the '+Dict['one'][0]+' with the '+Dict['two'][0]
if True: #elements.has_key('yestopwords'):
checker+=1
s+= '<br><br><b><font size="5">Top ' + '50' +' Words</font><br><br>' #elements['topwords'].value
s+= Dict['one'][0]+':</b><br>'
s+= top50(Dict['one'][1],50)
s+= '<br><br><b>'+Dict['two'][0]+':</b><br>'
s+= top50(Dict['two'][1],50)
if True: #elements.has_key('total'):
checker+=1
s+= '<br> <br><b><font size="5">Total Words</font></b><br><br>'
s+= Dict['one'][0]+': '+str(len(Dict['one'][1])) +'<br>'
s+= Dict['two'][0]+': '+str(len(Dict['two'][1]))
uniqueone=[]
uniquetwo=[]
for x in Dict['one'][1]:
if x not in uniqueone:
uniqueone.append(x)
for x in Dict['two'][1]:
if x not in uniquetwo:
uniquetwo.append(x)
if True: #elements.has_key('unique'):
checker+=1
s+= '<br> <br><b><font size="5">Total Unique Words</font></b><br><br>'
s+= Dict['one'][0]+': '+str(len(uniqueone)) +'<br>'
s+= Dict['two'][0]+': '+str(len(uniquetwo))
if True: #elements.has_key('percentage'):
checker+=1
s+= '<br> <br><b><font size="5">Percentage of Unique Words</font></b><br><br>'
s+= Dict['one'][0]+': '+str(float(len(uniqueone))/len(Dict['one'][1])*100) +'%<br>'
s+= Dict['two'][0]+': '+str(float(len(uniquetwo))/len(Dict['two'][1])*100) +'%'
if True: #elements.has_key('common'):
checker+=1
s+= '<br> <br><b><font size="5">All Common Words</font></b><br><br>'
new=[]
for x in uniqueone:
if x in uniquetwo:
new.append(x)
s+= 'There are '+ str(len(new))+" common words and they are:<br>"
r=0
new.sort()
for word in new:
s+= word+' '
r+=1
if r==6:
s+= '<br>'
r=0
if True: #elements.has_key('letters'):
checker+=1
s+= '<br> <br><b><font size="5">Total number of letters</font></b><br><br>'
s+= Dict['one'][0]+': '
s+= str(letters(Dict['one']))
s+= "<br>"+ Dict['two'][0]+": "
s+= str(letters(Dict['two']))
if True: #elements.has_key('wordlength'):
checker+=1
s+= '<br> <br><b><font size="5">Average word length</font></b><br><br>'
s+= Dict['one'][0]+': '
s+= str(letters(Dict['one']))
s+= "<br>"+ Dict['two'][0]+": "
s+= str(letters(Dict['two']))
if checker==0:
s+= '<br><br><br><font size="5"> <b> But you forgot to check off actions to take!</b>'
s+= '<br> <a href="constitutionoptions.html">Go back!</a>'
return s
s+= '<br><br> <br> <b> Resources</b><br> All constitutions taken from <a href="http://www.constitution.org/cons/natlcons.htm">here</a>'
s+= '<br> manifesto taken from <a href="http://www.gutenberg.org/ebooks/61"> here</a>'
s+= '<br> French constitution of 1791 taken from <a href="http://ic.ucsc.edu/~traugott/hist171/readings/1791-09ConstitutionOf1791"> here</a>'
s+= '<br> and Magna Carta taken from <a href="http://www.constitution.org/eng/magnacar.htm"> here</a>'
return s
#'''
def top50(filen,number):
s = ""
cm=filen
d={}
x=0
while x<len(cm):
cm[x]=cm[x].strip(".,?[]!();:-")
x+=1
for x in cm:
if x in d:
d[x]+=1
else:
d[x]=1
x=sorted(d.values())
x=x[::-1]
y=0
n=0
dr=0
while y<int(number):
for w in d:
if y!=n:
n+=1
dr+=1
if dr==4:
dr=0
s+= '<br>'
if d[w]==x[y]:
s+= str(y+1)+') '+w+': '+str(x[y])
y+=1
return s
def letters(x):
s = 0
constitution=x[1]
## constitution.split()
d=[]
for word in constitution:
for l in word:
d.append(l)
s+= len(d)
return s
if __name__=="__main__":
app.debug=True
app.run()
| [
"adam.dehovitz@gmail.com"
] | adam.dehovitz@gmail.com |
14ae89f0d76dde18e5498cbc8cfee175078b9fb1 | c2aa972a4155a9d14e5208682fa4d0f1102b99d0 | /PyMailer.py | a87975d7371c52c72ce5054ae1506bb2b63b6832 | [
"MIT"
] | permissive | FrozenGirl-spd/automailer | d3500c0c788b81a80d32d715a92b365fbc6561b3 | b46e62d5ef3c48b15c681423adaf3a36158a6cd8 | refs/heads/master | 2022-05-27T06:22:16.762658 | 2020-04-28T17:53:36 | 2020-04-28T17:53:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,496 | py | import os
import smtplib # This module is defines an SMTP client session object that can be used to send mail
import imghdr #This module determines the type of image contained in a file
from email.message import EmailMessage
#Grab the credentials from Environment Variables locally stored on your system
Email_id = os.environ.get('EMAIL_ID') #Sender's email address obtained
Email_password = os.environ.get('EMAIL_PASSWORD') #Sender's password
Test_email_id = os.environ.get('TEST_EMAIL_PASSWORD') #Receiver's password
#For security purpose it is always recommended to grab the sender's email address and password from the system
#However you can simply put in the creditials as strings to the variables if needed
#Craft the email
msg = EmailMessage() #Creating an email object
msg['Subject'] = 'Invitation for a Chat' #Subject of the message
msg['From'] = Email_id #Sender's email address
msg['To'] = Test_email_id #Receiver's email address
#For sending to multiple recievers,open a .csv file and read the email address in a list of strings and pass the list
msg.set_content('Hey! I wanted to ask you out for a chat over a bowl of sizzling brownies topped with chocolate ice-cream over this weekend')
#The names/paths of the images loaded in a list
files = ['icecream.jpg','pastry.jpg']
for file in files:
with open(file,'rb') as f: #Make sure either the images/pdfs are in the same directory or the entire path is specified
file_data = f.read()
#Not required if we are sending pdfs
file_type = imghdr.what(f.name) #used to determine the type of image
file_name = f.name
#Add the attachment to the message
msg.add_attachment(file_data,maintype='image',subtype=file_type,filename=file_name)
#For using pdfs change: maintype = 'application' and sub_type='octet_stream'
#Set up SMTP Session over SSL
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(Email_id, Email_password) #Authentication
smtp.send_message(msg) #Send the message
#If you are facing timeout-error for SSL and lack time then use the following
#Uncomment the following and delete the above SMTP Session
#with smtplib.SMTP('smtp.gmail.com', 587) as smtp: :
# smtp.ehlo()
# smtp.starttls() # Encrypt the traffic using Tranport Layer Security
# smtp.ehlo()
# smtp.login(Email_id, Email_password) #Authentication
# smtp.send_message(msg) #Send the message
| [
"noreply@github.com"
] | FrozenGirl-spd.noreply@github.com |
c5db0d0de514c401128e8959756cdc4966c3e792 | ff0b267e3bbecf9bc1c46bdb75a42d71545d45f8 | /mluicode/FlowerClassification/iris.py | 75560ed6a5408530273cd646b7c7344ed0e05421 | [] | no_license | rupeshsm/ML_Code | 650c0590585fd52901db421b407dab1f6c5790ec | 7380a99efc7c105ea7771a40226c34571628cc70 | refs/heads/main | 2023-06-19T01:47:01.063957 | 2021-07-01T18:29:55 | 2021-07-01T18:29:55 | 380,484,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py |
import numpy as np
import pickle
import pandas as pd
df = pd.read_csv('iris.data')
X = np.array(df.iloc[:, 0:4])
y = np.array(df.iloc[:, 4:])
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y.reshape(-1))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
from sklearn.svm import SVC
sv = SVC(kernel='linear').fit(X_train,y_train)
pickle.dump(sv, open('iri.pkl', 'wb'))
| [
"rrupesh.mishra@gmail.com"
] | rrupesh.mishra@gmail.com |
96eb29f1e9ed92f18a7a489d10a30a6ab97eb62a | 78579b189f1afd790bce20017c273c71cfa96240 | /lev_pos_size.py | ddb12bdaf17f3f3f8842ebedc23d2cb08165d061 | [] | no_license | harrychurchley/pos_lev_calculator | 4dbc94c3b2d66cdf039cedfdbfd4d8fe41105456 | badeb54edee105e4482995ce89728e07535d627b | refs/heads/main | 2023-06-30T13:17:37.541714 | 2021-08-01T12:12:51 | 2021-08-01T12:12:51 | 390,522,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # this is a leverage and position size calculator
# import dependencies
import pandas as pd
from binance import client,
account_size = 1000
risk_pc = 2
risk_raw = account_size*(risk_pc/100)
def place_trade(coin, buy_price, take_profit, stop_loss):
invalidation = ((buy_price - stop_loss)/buy_price) * 100
leverage = risk_pc/invalidation
position_size = account_size * leverage
return print(leverage, position_size)
place_trade('BTC', 4000, 4500, 3500) | [
"harry_churchley@live.co.uk"
] | harry_churchley@live.co.uk |
ec3367618a33e5ef24929f6b41aaae828cb02e81 | d3e51b340a22b5924025e30f846302f62839335b | /api/models.py | 053dcf0c2f39bf5d7edf938940969b540a09faef | [] | no_license | benbryson789/drf-auth-ex | 98fff4f1a80ec54e9e8a225328139ab5b6e9dcb7 | 6a5a2b82959b2ccd14fd6e8dbf5b420f5bce81a3 | refs/heads/master | 2023-07-15T07:27:22.250314 | 2021-08-21T13:52:42 | 2021-08-21T13:52:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from django.db import models
class Advice(models.Model):
genre = models.CharField(max_length=100)
body = models.TextField()
def __str__(self):
return f"{self.genre}, {self.body[:20]}"
| [
"mowbray.chad@gmail.com"
] | mowbray.chad@gmail.com |
e25e3fb611bdf6fa99186813f21592c175ee2b99 | 53ee800e1cd6b4cd3e834e049a74c67c5e32eaca | /conftest.py | d7d40aca37ed6a8b6431be82ec5d473360206d71 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sasobadovinac/ht | 482cd7e7c8ef351dd4bcb5bc9993ef3f74d8cab0 | de707506c00a3aefc2985008e98e9df0e7af9cb6 | refs/heads/master | 2023-02-09T04:42:11.961473 | 2023-01-23T02:21:06 | 2023-01-23T02:21:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,984 | py | import sys
import platform
is_pypy = 'PyPy' in sys.version
def pytest_ignore_collect(path):
path = str(path)
if 'manual_runner' in path or 'make_test_stubs' in path or 'plot' in path or 'prerelease' in path:
return True
if 'conf.py' in path:
return True
ver_tup = platform.python_version_tuple()[0:2]
ver_tup = tuple(int(i) for i in ver_tup)
if ver_tup < (3, 7) or ver_tup >= (3, 10) or is_pypy:
# numba does not yet run under pypy
if 'numba' in path:
return True
if '.rst' in path: # skip .rst tests as different rendering from pint and no support for NUMBER flag
return True
if sys.version[0] == '2':
if 'numba' in path or 'typing_utils' in path:
return True
if 'test' not in path:
return True
if 'ipynb' in path and 'bench' in path:
return True
return False
#def pytest_addoption(parser, pluginmanager):
# if sys.version[0] == '323523':
# parser.addoption("--doctest-modules")
# parser.addini(name="doctest_optionflags", help="", default="NORMALIZE_WHITESPACE NUMBER")
#def pytest_configure(config):
# print(config)
#open('/home/caleb/testoutput', 'w').write(str(1))
#if sys.version[0] == '2':
# args = []
# #print(args)
def pytest_load_initial_conftests(args):
a = 1
b = 2
def pytest_configure(config):
if sys.version[0] == '3':
import pytest
if pytest.__version__.split('.')[0] >= '6':
config.addinivalue_line("addopts", '--doctest-modules')
config.option.doctestmodules = True
config.addinivalue_line("doctest_optionflags", "NUMBER")
# config.addinivalue_line("addopts", config.inicfg['addopts'].replace('//', '') + ' --doctest-modules')
#config.inicfg['addopts'] = config.inicfg['addopts'] + ' --doctest-modules'
#
config.addinivalue_line("doctest_optionflags", "NORMALIZE_WHITESPACE")
| [
"Caleb.Andrew.Bell@gmail.com"
] | Caleb.Andrew.Bell@gmail.com |
daebbdca8eef6e5e32041ddcc0c62e19d77d24da | 27b9c6fd86bd1f27bb5abac66c85eaa005e928e1 | /directinfo.py | 09be6c1409c5cdda9fff2a9a5c9bcf463158e680 | [] | no_license | essefi-ahlem/web-scraping-with-BeautifulSoup | 02415e04165b4d3d7f3c241f098f966cffb243ca | c1f94b4c369312ca743dde8c7fa4165a79cfa57f | refs/heads/master | 2022-06-07T19:26:18.746611 | 2020-05-06T20:19:02 | 2020-05-06T20:19:02 | 258,362,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,984 | py | # -*- coding: utf-8 -*-
"""directinfo.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1-cJh0axprUdwijWJ0HCRz7uN56ECBfQM
"""
#importing libraries
import bs4
from urllib.request import urlopen as req
from bs4 import BeautifulSoup as soup
import requests
#list of links of every article in different pages
links=[]
#list of types of every article
types=[]
#list of titles of every article in different pages
titles=[]
links_pages=[]
for i in range (2):
links_pages.append("https://directinfo.webmanagercenter.com/lessentiel/page/"+str(i+1)+"/")
#for every page , we 'll extract the information that we need
for l in links_pages :
#parses html into a soup data structure to traverse html
myurl=req(l)
page_s=soup(myurl.read(),"html.parser")
myurl.close()
#searching containers of articles
containers=page_s.find("div",{"class":"td-transition-content-and-menu td-content-wrap"})
c=containers.find_all("div",{"class":"td-module-thumb"})
for container in c:
#appending urls of every article to links
links.append(container.find('a')['href'])
#appending titles to titles
titles.append(container.find('a')['title'])
#here we'll enter every link , searching for the text of the article
article=[]
for l in links:
#parses html into a soup data structure to traverse html
myurl=req(l)
page_s=soup(myurl.read(),"html.parser")
myurl.close()
#searching for the article container
article_container=page_s.find("div",{"class":"td-post-content"})
#in our case we'll concatenate paragraphs to have the full text
ch=""
for i in article_container.find_all('p'):
ch=ch+i.text
article.append(ch)
# Import pandas to create our dataframe
import pandas as pd
import numpy as np
df=pd.DataFrame(list(zip(links,titles,article)),columns=['link','title','article'])
df['type']=np.nan
# Create and download the csv file
df.to_csv('directinfos_scraping.csv', index = False) | [
"noreply@github.com"
] | essefi-ahlem.noreply@github.com |
bfbdcb02acc6bbaaf28aed62a3a02c0364e3390f | 1e5f6ac1590fe64e2d5a2d8b036c0948847f668d | /codes/Module_3/lecture_14/lecture_14_1.py | 8e31229472bbd0149536f6ac5d764794c79ff078 | [] | no_license | Gedanke/Reptile_study_notes | 54a4f48820586b1784c139716c719cc9d614c91b | a9705ebc3a6f95160ad9571d48675bc59876bd32 | refs/heads/master | 2022-07-12T23:43:24.452049 | 2021-08-09T12:54:18 | 2021-08-09T12:54:18 | 247,996,275 | 5 | 1 | null | 2022-06-26T00:21:48 | 2020-03-17T14:50:42 | HTML | UTF-8 | Python | false | false | 728 | py | # -*- coding: utf-8 -*-
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
url = "https://www.baidu.com"
browser = webdriver.Chrome()
try:
browser.get(url)
input = browser.find_element_by_id('kw')
input.send_keys('Python')
input.send_keys(Keys.ENTER)
wait = WebDriverWait(browser, 10)
wait.until(EC.presence_of_element_located((By.ID, 'content_left')))
time.sleep(5)
print(browser.current_url)
print(browser.get_cookies())
print(browser.page_source)
finally:
browser.close()
| [
"13767927306@163.com"
] | 13767927306@163.com |
673319d7217bb7d2670c787afae6d99c369506e2 | 4ff8c0b324c4447e6a777b6311c74ca21ad75cc0 | /tukutter.py | bc14a857b11e33cebd8262fac6dc471faa8a3494 | [] | no_license | Asatai95/tukutter | 58ba07e7dcde978f53b53c7e8ab71438fb335534 | e315ac17d2dc393ef1d9a3f045ec852224549b60 | refs/heads/master | 2021-04-25T12:18:47.205449 | 2018-08-17T10:20:06 | 2018-08-17T10:20:06 | 111,815,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,728 | py | import MySQLdb
import time
import os
from flask import Flask, request, render_template, redirect, make_response, send_from_directory, url_for, flash
from werkzeug import secure_filename
application = Flask(__name__)
UPLOAD_FOLDER = './static/img/'
ALLOWED_EXTENSIONS = set(['png', 'jpeg', 'gif'])
path = './static/img/*.ALLOWED_EXTENSIONS'
application.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
application.config['SECRET_KEY'] = os.urandom(24)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@application.route('/tukutter')
def top_db():
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = "select tweet.tw_id, created_at, tweet_comment, user_name, user_img, log_id from tweet inner join users on tweet.user_id = users.log_id where log_id != '" + data + "' "
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
if sql is not False:
sql = "select user_name, user_img from users where log_id = '" + data + "'"
con.execute(sql)
db.commit()
print(sql)
top = con.fetchall()
print(top)
return render_template('top.html', rows=result, tops=top)
@application.route('/')
def login():
return render_template('login.html')
@application.route('/', methods=['POST'])
def login_db():
log = request.form['log_id']
pas = request.form['passwd']
if log == (''):
error_log = 'ログインIDを入力してください'
return render_template('login.html', error_log=error_log)
elif pas == (''):
error_pas = 'パスワードを入力してください'
return render_template('login.html', error_pas=error_pas)
else:
db = MySQLdb.connect( user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
sql = "select log_id, passwd from users where log_id = '" + log + "' and passwd = '" + pas + "' "
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
if result == ():
error_login = 'ログインID、またはパスワードが異なります。'
return render_template('login.html', error_login=error_login)
else:
print('top.html')
resp = make_response(redirect('http://localhost:8080/tukutter'))
resp.set_cookie('name' , log)
print(resp)
return resp
@application.route('/forgot')
def forgot():
return render_template('forgot.html')
@application.route('/forgot', methods=['POST'])
def forgot_db():
log_id = request.form['log_id']
print(log_id)
db = MySQLdb.connect( user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
sql = 'select log_id from users where log_id = "' + log_id + '" '
test = con.execute(sql)
db.commit()
print(sql)
print(test)
if test == 0:
error = '存在しないログインIDです。'
return render_template('forgot.html', error=error)
else:
result = con.fetchall()
print(result)
resp = make_response(redirect('http://localhost:8080/remake'))
resp.set_cookie('name' , log_id)
print(resp)
return resp
@application.route('/remake')
def remake():
return render_template('remake.html')
@application.route('/remake', methods=['POST'])
def remake_db():
data = request.cookies.get('name', None)
passwd = request.form['passwd']
passwd_again = request.form['passwd_again']
print(passwd)
print(passwd_again)
print(data)
if passwd == (''):
error = 'パスワードを入力してください。'
return render_template('remake.html', error=error)
elif passwd_again == (''):
error = '確認用のパスワードも入力してください。'
return render_template('remake.html', error=error)
elif passwd != passwd_again:
error = '項目に同じパスワード値を入力してください。'
return render_template('remake.html', error=error)
elif len(passwd) <= 8 and len(passwd_again) <= 8:
error = ('パスワードは8文字以上16文字以内で入力してください。')
return render_template('remake.html', error=error)
elif len(passwd) > 16:
error = 'パスワードは16文字以内で入力してください。'
return render_template('remake.html', error=error)
elif passwd == passwd_again:
print(data)
db = MySQLdb.connect( user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
sql = 'select passwd from users where log_id = "' + data + '" '
passwd_check = con.execute(sql)
db.commit()
print(passwd_check)
print('test')
result = con.fetchall()
task = result[0][0]
print(result)
print('test')
if task == passwd and task == passwd_again:
error = 'すでに使用されているパスワードです。'
return render_template('remake.html', error=error)
else:
db = MySQLdb.connect( user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
sql = 'update users set passwd = "' + passwd + '" where log_id = "' + data + '" '
commit = con.execute(sql)
db.commit()
print(sql)
print(commit)
result = con.fetchall()
print(result)
resp = make_response(redirect('http://localhost:8080/'))
resp.set_cookie('','')
print(resp)
return resp
@application.route('/logout')
def logout():
resp = make_response(redirect('http://localhost:8080/'))
resp.set_cookie('', '')
print(resp)
return resp
@application.route('/new')
def new():
return render_template('new.html')
@application.route('/new', methods=['POST'])
def new_db():
log_id = request.form['log_id']
print(log_id)
passwd = request.form['passwd']
print(passwd)
user_name = request.form['user_name']
print(user_name)
img_file = request.files["img_file"]
print(img_file)
if log_id == ('') and passwd == ('') and user_name == (''):
return render_template("new.html", error='全ての内容を入力してください。')
elif log_id == (''):
return render_template("new.html", error='ログインIDを入力してください。')
elif passwd == (''):
return render_template("new.html", error='パスワードを入力してください。')
elif user_name == (''):
return render_template("new.html", error='名前を入力してください。')
elif passwd <= 8:
return render_template("new.html", error='パスワードは8文字以上16文字以下です。')
elif passwd >= 16:
return render_template("new.html", error='パスワードは8文字以上16文字以下です。')
else:
if img_file and allowed_file(img_file.filename):
filename = secure_filename(img_file.filename)
img_file.save(os.path.join(application.config['UPLOAD_FOLDER'], filename))
path = UPLOAD_FOLDER + filename
print(path)
db = MySQLdb.connect( user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
sql = 'insert into users(log_id,passwd,user_name,user_img) values(%s,%s,%s,%s)'
try:
test = con.execute(sql,[log_id,passwd,user_name,path])
db.commit()
print(test)
result = con.fetchall()
print(result)
except MySQLdb.IntegrityError:
error = 'すでに登録されているログインIDです。'
return render_template('new.html', log_error=error)
resp = make_response(redirect('http://localhost:8080/tweet'))
resp.set_cookie("name", log_id)
print(resp)
return resp
else:
path = './static/img/profile.png'
print(path)
db = MySQLdb.connect( user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('test')
sql = 'insert into users(log_id,passwd,user_name,user_img) values(%s,%s,%s,%s)'
try:
test = con.execute(sql,[log_id,passwd,user_name,path])
db.commit()
print(test)
except MySQLdb.IntegrityError:
error = 'すでに登録されているログインIDです。'
return render_template('new.html', log_error=error)
resp = make_response(redirect('http://localhost:8080/tweet'))
resp.set_cookie("name", log_id)
print(resp)
return resp
@application.route('/tweet')
def tweet():
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'select user_img, user_name from users where log_id = "' + data + '" '
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
return render_template('tweet.html', tweets=result)
@application.route('/tweet', methods=['POST'])
def tweet_db():
data = request.cookies.get("name", None)
print(data)
time_stamp = time.strftime('%Y-%m-%d %H:%M:%S')
test = request.form['tweet']
print(time_stamp)
print(test)
if len(test) is 0:
return render_template('tweet.html', test='文字を入力して下さい')
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
sql = "insert into tweet(tweet_comment, created_at) values(%s, %s)"
con.execute(sql, [test, time_stamp])
print(sql)
if sql is not False:
db.commit()
sql = "update tweet set user_id = '" + data + "' order by tw_id DESC limit 1 "
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
return redirect('http://localhost:8080/pro')
return render_template('tweet.html')
@application.route('/search')
def search():
data = request.cookies.get("name", None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = "select tweet_comment, created_at, user_name, user_img, tw_id from tweet inner join users on tweet.user_id = users.log_id where log_id != '" + data + "'"
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
if sql is not False:
sql = 'select user_name, user_img from users where log_id = "' + data + '" '
con.execute(sql)
db.commit()
print(sql)
view = con.fetchall()
print(view)
return render_template('search.html', rows=result, views=view)
@application.route('/search', methods=['POST'])
def search_db():
data = request.cookies.get("name", None)
print(data)
search = request.form["search"]
print(search)
if search == (''):
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'select user_name, user_img from users where log_id = "' + data + '"'
con.execute(sql)
db.commit()
print(sql)
search = con.fetchall()
print(search)
return render_template('search.html', test='キーワードを入力してください。', views=search)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = "select tweet_comment, created_at, user_name, user_img, tw_id from tweet inner join users on tweet.user_id = users.log_id where tweet_comment like '" '%' + search + '%' "' "
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
if result == () :
sql = 'select user_name, user_img from users where log_id = "' + data + '" '
con.execute(sql)
db.commit()
print(sql)
view = con.fetchall()
print(view)
return render_template('search.html', test='該当なし', views=view)
else:
sql = 'select user_name, user_img from users where log_id = "' + data + '" '
con.execute(sql)
db.commit()
print(sql)
view = con.fetchall()
print(view)
return render_template('search.html', rows=result, views=view)
@application.route('/pro')
def pro():
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = "select user_name, user_intro, user_img from users where log_id = '" + data + "'"
con.execute(sql)
db.commit()
print(sql)
test = con.fetchall()
print(test)
if sql is not False:
sql = "select user_name, created_at, tweet_comment, user_img, tw_id from tweet inner join users on tweet.user_id = users.log_id where log_id = '" + data + "' "
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
return render_template('pro.html', rows=result, pros=test)
@application.route('/pro/edit/<pro_id>')
def pro_view(pro_id=None):
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = "select user_name, user_img from users where log_id = '" + data + "' "
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
if sql is not False:
sql = "select tweet_comment from tweet where tw_id = '" + pro_id + "' "
con.execute(sql)
db.commit()
print(sql)
test = con.fetchall()
print(test)
return render_template('pro_edit.html', tests=result, views=test, pro_id=pro_id)
@application.route('/pro/edit/<pro_id>', methods=['POST'])
def pro_id(pro_id=None):
data = request.cookies.get('name', None)
print(data)
write = request.form['write']
print(write)
time_stamp = time.strftime('%Y-%m-%d %H:%M:%S')
print(time_stamp)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'update tweet set tweet_comment = "' + write + '", created_at = "' + time_stamp + '" where tw_id = "' + pro_id + '" '
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
return redirect('http://localhost:8080/pro')
@application.route('/pro/delete/<pro_id>')
def pro_delete(pro_id=None):
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'delete from tweet where tw_id = "' + pro_id + '" '
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
return redirect('http://localhost:8080/pro')
@application.route('/edit')
def edit():
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'select user_img, id, user_name, user_intro from users where log_id = "' + data + '" '
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
return render_template('edit.html', pros=result)
@application.route('/edit', methods=['POST'])
def edit_db():
data = request.cookies.get('name', None)
print(data)
passwd = request.form["passwd"]
print(passwd)
passwd_again = request.form["passwd_again"]
print(passwd_again)
user_name = request.form["user_name"]
print(user_name)
user_intro = request.form["user_intro"]
print(user_intro)
img_file = request.files["img_file"]
print(img_file)
if passwd == (''):
error = 'passwordを入力してください!!'
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'select user_img, id, user_name, user_intro from users where log_id = "' + data + '" '
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
return render_template('edit.html', pros=result, error=error)
elif len(passwd) <= 8 or len(passwd) >= 16:
error = 'パスワードを8文字以上16文字以内で入力してください!!'
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'select user_img, id, user_name, user_intro from users where log_id = "' + data + '" '
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
return render_template('edit.html', pros=result, error=error)
elif passwd != passwd_again:
error = 'パスワードは両方とも同様の内容を入力してください!!'
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'select user_img, id, user_name, user_intro from users where log_id = "' + data + '" '
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
return render_template('edit.html', pros=result, error=error)
elif user_name == (''):
error = '名前を入力してください!!'
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'select user_img, id, user_name, user_intro from users where log_id = "' + data + '" '
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
return render_template('edit.html', pros=result, error=error)
else:
if img_file and allowed_file(img_file.filename):
filename = secure_filename(img_file.filename)
img_file.save(os.path.join(application.config['UPLOAD_FOLDER'], filename))
path = UPLOAD_FOLDER + filename
print(path)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = "update users set user_img = %s, passwd = '" + passwd + "', user_name = '" + user_name + "', user_intro = '" + user_intro + "' where log_id = '" + data + "' "
con.execute(sql, [path])
db.commit()
print(sql)
result = con.fetchall()
print(result)
return redirect('http://localhost:8080/pro')
else:
path = './static/img/profile.png'
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = "update users set user_img = %s, passwd = '" + passwd + "', user_name = '" + user_name + "', user_intro = '" + user_intro + "' where log_id = '" + data + "' "
con.execute(sql, [path])
db.commit()
print(sql)
result = con.fetchall()
print(result)
return redirect('http://localhost:8080/pro')
@application.route('/oki/<user_id>')
def oki(user_id=None):
time_stamp = time.strftime('%Y-%m-%d %H:%M:%S')
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'insert into fab(tweet_id, log_id, fab_time) values(%s, %s, %s) on duplicate key update tweet_id = "' + user_id + '", log_id = "' + data + '", id=LAST_INSERT_ID(id) '
con.execute(sql, [user_id, data, time_stamp])
db.commit()
print(sql)
result = con.fetchall()
print(result)
return redirect('http://localhost:8080/oki')
@application.route('/oki')
def oki_db():
data = request.cookies.get('name', None)
print(data)
coun = 1
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = "select tweet.tw_id, user_name, user_img, tweet_comment, tweet.created_at, users.log_id from tweet inner join users on tweet.user_id = users.log_id inner join fab on fab.tweet_id = tweet.tw_id where fab.log_id = '" + data + "' "
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
if result == ():
co = 3
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = "select user_name, user_img from users where log_id = '" + data + "' "
con.execute(sql)
db.commit()
print(sql)
oki = con.fetchall()
print(oki)
return render_template('oki.html', com='誰かの投稿をお気に入りしてみよう👍', test=oki, count=co)
else:
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = "select user_name, user_img from users where log_id = '" + data + "' "
con.execute(sql)
db.commit()
print(sql)
okis = con.fetchall()
print(okis)
return render_template('oki.html', okis=result, count=coun, test=okis)
@application.route('/delete/<delete_id>')
def delete(delete_id=None):
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'delete from fab where tweet_id = "' + delete_id + '" '
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
return redirect('http://localhost:8080/oki')
@application.route('/pro/<user_pro>')
def user_pro(user_pro=None):
data = request.cookies.get("name", None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = "select created_at, tweet_comment, user_name, user_img, tweet.tw_id from tweet inner join users on tweet.user_id = users.log_id where log_id = '" + user_pro + "' "
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
if sql is not False:
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'select user_name, user_intro, user_img, log_id from users where log_id = "' + user_pro + '"'
con.execute(sql)
db.commit()
print(sql)
user_pro = con.fetchall()
print(user_pro)
return render_template('user_pro.html', user_pro=result, pros=user_pro)
@application.route('/follower')
def follower():
count = 1
text = 'フォロー中'
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'select user_name, user_img, user_intro, user_id from users inner join follow on users.log_id = follow.user_id where follow_text = "' + text + '" AND follow.log_id = "' + data + '" '
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
if result == ():
coun = 3
data = request.cookies.get('name', None)
print(data)
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = "select user_name, user_img from users where log_id = '" + data + "' "
con.execute(sql)
db.commit()
print(sql)
follow = con.fetchall()
print(follow)
return render_template('follower.html', com='誰かフォローしたら表示されるよ!', tests=follow, count=coun)
if sql is not False:
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'select user_name, user_img from users where log_id = "' + data + '"'
con.execute(sql)
db.commit()
print(sql)
test = con.fetchall()
print(test)
return render_template('follower.html', pros=result, tests=test, count=count)
@application.route('/follow/<follow_id>')
def top(follow_id=None):
data = request.cookies.get('name', None)
print(data)
time_stamp = time.strftime('%Y-%m-%d %H:%M:%S')
print(time_stamp)
text = 'フォロー中'
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('test')
sql = 'insert into follow(log_id, user_id, follow_time, follow_text) values(%s, %s, %s, %s) on duplicate key update user_id = "' + follow_id + '", log_id = "' + data + '", id=LAST_INSERT_ID(id) '
con.execute(sql, [data, follow_id, time_stamp, text])
db.commit()
print(sql)
result = con.fetchall()
print(result)
return redirect('http://localhost:8080/follower')
@application.route('/follower/delete/<follower>')
def follower_delete(follower=follower):
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'delete from follow where user_id = "' + follower + '" '
con.execute(sql)
db.commit()
print(sql)
result = con.fetchall()
print(result)
return redirect('http://localhost:8080/follower')
@application.route('/pay')
def pay():
return render_template('pay.html')
@application.route('/pay', methods=['POST'])
def pay_db():
number = request.form['number']
print(number)
cardname = request.form['cardname']
print(cardname)
expiry = request.form['expiry']
print(expiry)
cvc = request.form['cvc']
print(cvc)
time_stamp = time.strftime('%Y-%m-%d %H:%M:%S')
if number == ('') and cardname == ('') and expiry == ('') and cvc == (''):
error = 'すべての項目に適切の内容を入力してください!'
return render_template('pay.html', error=error)
elif number == (''):
error = '正しくカード番号を入力してください!'
return render_template('pay.html', error=error)
elif cardname == (''):
error = 'カード保有者の名前を入力してください!'
return render_template('pay.html', error=error)
elif expiry == (''):
error = 'mm/yyを入力してください!'
return render_template('pay.html', error=error)
elif cvc == (''):
error = 'cvcを入力してください!'
return render_template('pay.html', error=error)
elif number.startswith('4') or number.startswith('5') or number.startswith('35') or number.startswith('37') or number.startswith('2222'):
print('test')
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'insert into credit(cardnumber, card_name, mmyy, cvc, created_at) values(%s, %s, %s, %s, %s) on duplicate key update cardnumber = "' + number + '", card_name = "' + cardname + '", id=LAST_INSERT_ID(id)'
con.execute(sql, [number, cardname, expiry, cvc, time_stamp])
db.commit()
print(sql)
result = con.fetchall()
print(result)
return redirect('http://localhost:8080/check')
else:
error = '正しいカード番号を入力してください!'
return render_template('pay.html', error=error)
@application.route('/info')
def info():
return render_template('info.html')
@application.route('/yuryou')
def test():
return render_template('yuryou.html')
@application.route('/check')
def check():
db = MySQLdb.connect(user='root', passwd='asatai95', host='localhost', db='tukutter', charset='utf8')
con = db.cursor()
print('???')
sql = 'delete from credit where cardnumber not in (select card_number from test where credit.cardnumber = test.card_number and credit.id=LAST_INSERT_ID(credit.id))'
test = con.execute(sql)
db.commit()
print(sql)
print(test)
if test == 1:
return render_template('pay.html', error='存在しないカードアカウントです。')
else:
result = con.fetchall()
print(result)
return render_template('check.html')
| [
"defense433@gmail.com"
] | defense433@gmail.com |
150dd85450289d365d3b3802b6dc6442dc1c5738 | de9629b3afd37e9b9cf069a096129271f241d7e6 | /Code/HMM.py | 859d736321cccd60b64412b00edbd682f4c7516e | [] | no_license | GazeProject05/PythonCode | 6bc343b958acb7aa58c841b654e23fd9854f1e7e | c96f5d917308862a4a1cb695879ad0c1bac2eb8c | refs/heads/master | 2020-05-15T08:12:28.730234 | 2019-11-16T14:17:10 | 2019-11-16T14:17:10 | 182,155,622 | 0 | 0 | null | 2019-04-28T12:18:23 | 2019-04-18T20:49:23 | null | UTF-8 | Python | false | false | 12,528 | py | import pandas as pd
import numpy as np
import math as m
from scipy.stats import multivariate_normal as mn
import csv
#Reading data file
df = pd.read_excel('19Proband19.xlsx')
#Readig relevant columns of data
gazeEventData = df['GazeEventType']
b = df['PupilLeft']
leftPupilData = b.str.replace(',','.').astype(float)
b2 = df['PupilRight']
rightPupilData = b2.str.replace(',','.').astype(float)
gazeGradientData = df[['GazeGradientX','GazeGradientY']]
#GroundTruth | Annotations made by expret A and B
gd1 = df['StudioEvent']
gd2 = df['StudioEvent_B']
# Modeling
states = ('Scanning', 'Skimming', 'Reading', 'MediaView', 'Unknown')
start_probability = {'Scanning': -1.72276659775, 'Skimming': float("-inf"), 'Reading': -0.62415430908, 'MediaView': float("-inf"), 'Unknown': -1.25276296851}
#Dummy data for testing GazeEventType
observations = ('Fixation','Saccade','Fixation','Fixation','Fixation','Fixation','Fixation') #First 7 sates fixation
gr1 = ['1_Scanning','1_Scanning','1_Unknown','1_Unknown','1_Unknown','1_Reading','1_Skimming']
gr2 = ['1_Skimming','1_Reading','1_Unknown','1_MediaView','1_Unknown','1_Reading','1_Scanning']
#-------------------- 1st order Transition Matrix ----------#
transition_probability = {
'Scanning' : {'Scanning': -0.00232095217675443, 'Skimming': -7.88855044050366, 'Reading': -7.53491040026009, 'MediaView': -9.24499183847387, 'Unknown': -6.6357874720083},
'Skimming' : {'Scanning': -7.48995094652083, 'Skimming': -0.00236717233575057, 'Reading': -7.22484319610759, 'MediaView': -9.11740736445761, 'Unknown': -6.9405916587526},
'Reading' : {'Scanning': -7.68382320280519, 'Skimming': -8.02791936453706, 'Reading': -0.00201180911939325, 'MediaView': -10.1073609062169, 'Unknown': -6.74006507623042},
'MediaView': {'Scanning': -6.9409482463379, 'Skimming': -7.02795962332753, 'Reading': -7.8164169836918, 'MediaView': -0.00371507453616538, 'Unknown': -6.53548313822974},
'Unknown' : {'Scanning': -7.50610673071849, 'Skimming': -8.39992460674059, 'Reading': -7.96406202144805, 'MediaView': -10.6841605610664, 'Unknown': -0.00114590030289818}
}
##------------------------------- MODEL FOR GAZE EVENT TYPE ---------------------------------------##
emission_probability = {
'Scanning' : {'Fixation': -0.51042419, 'Saccade': -1.17873170, 'Unclassified': -2.38498473},
'Skimming' : {'Fixation': -0.71985695, 'Saccade': -0.90357292, 'Unclassified': -2.22508255},
'Reading' : {'Fixation': -0.29756367, 'Saccade': -1.60167980, 'Unclassified': -2.88567604},
'MediaView' : {'Fixation': -0.36204432, 'Saccade': -1.40492718, 'Unclassified': -2.84106350},
'Unknown' : {'Fixation': -1.01026657, 'Saccade': -1.33310886, 'Unclassified': -0.98826541}
}
##-------------------- MODEL FOR PUPIL DATA -- AND -- FUNCTIONS TO CALCULATE PROBABILITIES ------------------##
leftPupilModel = {
'Reading' : ( {'mean': 3.8, 'std_dev': 0.19, 'weight': 0.1},
{'mean': 2.4, 'std_dev': 0.077, 'weight': 0.15},
{'mean': 2.8, 'std_dev': 0.28, 'weight': 0.75}
),
'Scanning' : ( {'mean': 2.9, 'std_dev': 0.2, 'weight': 0.61},
{'mean': 3.9, 'std_dev': 0.26, 'weight': 0.11},
{'mean': 2.4, 'std_dev': 0.21, 'weight': 0.28}
),
'Skimming' : ( {'mean': 2.8, 'std_dev': 0.34, 'weight': 0.92},
{'mean': 3.9, 'std_dev': 0.24, 'weight': 0.083}
),
'Unknown' : ({'mean': 2.7, 'std_dev': 0.47, 'weight': 1.0}, ),
'MediaView' : ( {'mean': 3.6, 'std_dev': 0.4, 'weight': 0.1},
{'mean': 2.8, 'std_dev': 0.33, 'weight': 0.42},
{'mean': 2.7, 'std_dev': 0.12, 'weight': 0.48}
)
}
rightPupilModel = {
'Reading' : ( {'mean': 2.9, 'std_dev': 0.27, 'weight': 0.71},
{'mean': 2.2, 'std_dev': 0.071, 'weight': 0.18},
{'mean': 3.9, 'std_dev': 0.19, 'weight': 0.11}
),
'Scanning' : ( {'mean': 2.2, 'std_dev': 0.11, 'weight': 0.19},
{'mean': 4.1, 'std_dev': 0.25, 'weight': 0.1},
{'mean': 2.9, 'std_dev': 0.24, 'weight': 0.7}
),
'Skimming' : ( {'mean': 4.2, 'std_dev': 0.25, 'weight': 0.072},
{'mean': 2.9, 'std_dev': 0.37, 'weight': 0.93}
),
'Unknown' : ({'mean': 2.8, 'std_dev': 0.49, 'weight': 1.0}, ),
'MediaView' : ( {'mean': 2.6, 'std_dev': 0.18, 'weight': 0.57},
{'mean': 3.7, 'std_dev': 0.29, 'weight': 0.15},
{'mean': 3.1, 'std_dev': 0.13, 'weight': 0.28}
)
}
#def normalProbability(x, mean, std_dev):
# return ( (1/(std_dev*2.507)) * m.exp((-0.5)*m.pow( (x - mean)/std_dev , 2) ) )
def logPdf(datapoint, mean,deviation):
#print("Calculating PDF")
#u = (datapoint - self.mean) / abs(self.deviation)
#y = -math.log(math.sqrt(2*math.pi*self.deviation * self.deviation))- (u*u/2)
u = (datapoint - mean)
y = -m.log(m.sqrt(2*m.pi*deviation))- (u*u/(2*deviation))
#print("PDF: {} ".format(y))
return y
def gmmProbability(x, key, side):
p=0
tempProbabs = []
if(side=='left'): #side -> decides which (left or right) pupil model are we going to use.
n = len(leftPupilModel[key])
for i in range(n):
tempProbabs.append(logPdf(x, leftPupilModel[key][i]['mean'] , leftPupilModel[key][i]['std_dev'] )+ m.log(leftPupilModel[key][i]['weight']))
p = logExpSum(tempProbabs)
elif(side=='right'):
n = len(rightPupilModel[key])
for i in range(n):
tempProbabs.append(logPdf(x, rightPupilModel[key][i]['mean'] , rightPupilModel[key][i]['std_dev'] )+ m.log(rightPupilModel[key][i]['weight']))
p = logExpSum(tempProbabs)
return p
##--------------------------------- GAZE GRADIENT'S MUTIVARIATE GAUSSIAN MODEL -----------------------------------##
ReadingMean = np.array([-0.018223117030612773, 0.04327775196599728])
ReadingCov = np.array( [ [1179.63428727, 171.67930601], [171.67930601, 836.91103656] ] )
ScanningMean = np.array([-0.14131410896028737, -0.08072241069646777])
ScanningCov = np.array( [ [1852.15462508, 330.37926778], [330.37926778, 1716.64905786] ] )
SkimmingMean = np.array([0.3212, -0.6845777777777777])
SkimmingCov = np.array( [ [1805.68290536, 216.39954858], [216.39954858, 4072.86681401] ] )
UnknownMean = np.array([-0.04316383904262544, 0.06364754324031643])
UnknownCov = np.array( [ [1786.47885221, 598.16561454], [598.16561454, 2461.48454459] ] )
MediaViewMean = np.array([-1.0968448729184925, 0.2287467134092901])
MediaViewCov = np.array( [ [1922.47066957, -383.06551818], [-383.06551818, 1367.8950435] ] )
MultiVariateModel = {
'Reading' : { 'MeanArray' : ReadingMean , 'Coovariance' : ReadingCov },
'Scanning': { 'MeanArray' : ScanningMean , 'Coovariance' : ScanningCov },
'Skimming': { 'MeanArray' : SkimmingMean , 'Coovariance' : SkimmingCov },
'Unknown' : { 'MeanArray' : UnknownMean , 'Coovariance' : UnknownCov },
'MediaView': { 'MeanArray' : MediaViewMean , 'Coovariance' : MediaViewCov },
}
# TO CALCULATE PROBAILITY FOR A GIVEN POINT,
#WE USE pdf FUNCTION GIVEN IN multivariate_normal CLASS FROM LIBRARY scipy.stats
# mn.pdf(x,mean,cov)
def mulnor(x, key):
return mn.logpdf(x, mean = MultiVariateModel[key]['MeanArray'], cov = MultiVariateModel[key]['Coovariance'])
##----------------------------------- VITERBI IMPLIMENTATION ------------------------------------------------------##
# Helps visualize the steps of Viterbi.
def print_dptable(V):
s = " " + " ".join(("%7d" % i) for i in range(len(V))) + "\n"
for y in V[0]:
s += "%.5s: " % y
s += " ".join("%.7s" % ("%f" % v[y]) for v in V)
s += "\n"
print(s)
#Viterbi algo function
def viterbi(gazeEventData, leftPupilData, rightPupilData, gazeGradientData, states, start_p, trans_p, emit_p):
#def viterbi(gazeEventData, states, start_p, trans_p, emit_p):
V = [{}] #[]-> List ; {} -> Dictionary [{}] ->List of dictionary
path = []
dic = {}
# Initialize base cases (t == 0)
for p in states:
array = []
array.append(start_p[p])
#Logic for skipping probilities, when data is not presents
if(pd.isnull(gazeEventData[0]) == False):
array.append(emit_p[p][gazeEventData[0]])
if(pd.isnull(leftPupilData[0]) == False):
array.append(gmmProbability(leftPupilData[0], p, 'left'))
if(pd.isnull(rightPupilData[0]) == False):
array.append(gmmProbability(rightPupilData[0], p, 'right'))
if((gazeGradientData.iloc[0].dropna().empty) == False):
array.append(mulnor(gazeGradientData.iloc[0], p))
V[0][p] = sum(array)
dic[p] = [p]
path.append(dic)
# Run Viterbi for (t >= 1)
dic = {}
for t in range(1, len(gazeEventData)):
V.append({})
for q in states:
maximum = float("-inf")
state = ''
array = []
for p in states:
# y -> t -> state in this time step Q
#y0 -> t-1 -> state 1 time step ago P
array = [(V[t-1][p]), trans_p[p][q] ]
#Logic for skipping probilities, when data is not presents
if(pd.isnull(gazeEventData[t]) == False):
array.append(emit_p[q][gazeEventData[t]])
if(pd.isnull(leftPupilData[t]) == False):
array.append(gmmProbability(leftPupilData[t], q, 'left'))
if(pd.isnull(rightPupilData[t]) == False):
array.append(gmmProbability(rightPupilData[t], q, 'right'))
if((gazeGradientData.iloc[t].dropna().empty) == False):
array.append(mulnor(gazeGradientData.iloc[t], q))
temp = sum(array)
if (temp > maximum):
maximum = temp
state = p
V[t][q] = maximum
dic[q] = state
path.append(dic)
# print_dptable(V)
(prob, state) = max((V[t][y], y) for y in states)
# return (prob, path[state])
#print(prob, path[state])
#print(type(path[state]))
out = []
out.append(state)
for i in range((len(V)-1),0,-1):
key = out[-1]
out.append(path[i][key])
out.reverse()
return(out)
##------------------------- Saving in a .csv file ----------##
def exportcsv(path, A, B):
fields = ['Prediction','A','B']
with open ('output_1st.csv','w') as file:
writer = csv.DictWriter(file, fieldnames=fields)
writer.writeheader()
for i in range(len(A)):
if( (A[i].lower() == '0_unstated') or (B[i].lower() == '0_unstated') ): #Intially, after ScreenRecordStart, there were no annotations made -
continue # for some rows, so we skip those rows
else:
a = A[i].split('_')
b = B[i].split('_')
data = [{ 'Prediction': path[i], 'A':a[1], 'B':b[1] }]
writer.writerows(data)
file.close()
#--------------- log Exp trick ----------#
def logExpSum(arr ):
#find maximum of array assuming the array passed is already containg log values
maxVal =0
maxVal= findMaxArray(arr)
res = 0
for i in range(0, len(arr)):
res += m.exp (arr[i] - maxVal)
return (m.log(res)+ maxVal)
def findMaxArray(arr):
maxValue = arr[0]
for i in range(0, len(arr)):
if(maxValue <arr[i]):
maxValue = arr[i]
return maxValue
##--------------------- MAIN() ------------------##
def main():
path = viterbi(gazeEventData, leftPupilData, rightPupilData, gazeGradientData, states, start_probability, transition_probability, emission_probability)
# path = viterbi(observations, states, start_probability, transition_probability, emission_probability)
# print(path)
exportcsv(path, gd1, gd2)
if __name__ == '__main__':
main()
| [
"44416059+devesh1611singh@users.noreply.github.com"
] | 44416059+devesh1611singh@users.noreply.github.com |
4cac7bbd91f2ee70771624bc6cc8a2c4bfff9f5f | 3ea45d6acd362a646e906eac31ab6d3ea019d727 | /qaeval/tests/scoring/scorers/lerc_test.py | cadff9977a9b00b52775e5e6b44447cb724e1300 | [
"Apache-2.0"
] | permissive | rajhans/qaeval | 9747dea5dd0a234cc3df7837d6cbc0406b5d1b03 | dd7273183dd1b2c9995115310ef041daa953ca81 | refs/heads/master | 2023-07-10T04:15:05.399369 | 2021-08-03T02:22:15 | 2021-08-03T02:22:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | import os
import pytest
from qaeval.scoring.scorers import LERCScorer
from qaeval.tests.scoring.scorers.scorer_test import TestScorer
@pytest.mark.skipif('LERC_MODEL' not in os.environ or 'LERC_PRETRAINED' not in os.environ, reason='LERC environment variables not set')
class TestLERCScorer(TestScorer):
@classmethod
def setUpClass(cls) -> None:
cls.scorer = LERCScorer(
model_path=os.environ['LERC_MODEL'],
pretrained_path=os.environ['LERC_PRETRAINED'],
cuda_device=0
)
def test_keys(self):
assert self.scorer.keys() == {'lerc'}
def test_default_scores(self):
assert self.scorer.default_scores() == {'lerc': 0.0}
def test_is_answered(self):
self.assert_expected_output(
# This is a regression test. It does not ensure these numbers are correct
self.scorer,
{'lerc': (2.5152266025543213 + 4.940724849700928) / 2},
[{'lerc': 2.5152266025543213}, {'lerc': 4.940724849700928}],
[[{'lerc': 2.5210483074188232}, {'lerc': 5.024631500244141}, {'lerc': 0.0}], [{'lerc': 4.940724849700928}]]
)
| [
"danfdeutsch@gmail.com"
] | danfdeutsch@gmail.com |
02bfe54d10807aad87934191d827ad29badd0bb4 | debacbb9a5c833334b36f1d89ddd3e3bce2cf054 | /ciphers/fractions/trifid.py | 81d7e3afa2ed392f533990872c57a776ff14c54d | [] | no_license | branislavblazek/projects | 35198a58afecb30988a65a84c72ebfc7d1cf871c | f028b37df5351e69abda974ea1f5ab1d0c4691f2 | refs/heads/master | 2021-12-22T01:07:13.664219 | 2021-12-14T07:15:05 | 2021-12-14T07:15:05 | 234,887,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,414 | py | from pprint import pprint
class Trifid:
def __init__(self):
self.alphabet = [chr(i+65) for i in range(26)]
self.alphabet.append('.')
self.len = len(self.alphabet)
def intersection(self, lst1, lst2):
lst3 = [value for value in lst2 if value not in lst1]
return lst3
def repeating_string(self, given_string):
seen = set()
ret = []
for c in given_string:
if c not in seen and c in self.alphabet:
seen.add(c)
ret.append(c)
return ''.join(ret)
def create_square(self, key):
key = self.repeating_string(key)
key = list(key)
table = []
self.can_use = key + self.intersection(key, self.alphabet)
for zero_point in range(self.len)[::9]:
usable = self.can_use[zero_point:zero_point+9]
row = len(usable) // 3
mini_table = []
for index in range(row):
mini_table.append(usable[index*3:index*3+3])
table.append(mini_table)
return table
def encode(self, input_text, password, more_secure=False):
input_text = input_text.upper().lstrip().rstrip()
password = password.upper().lstrip().rstrip()
text = input_text
if more_secure:
text = text.replace(' ', '')
text = [text[i:i+5] for i in range(0,len(text), 5)]
text = ' '.join(text)
table = self.create_square(password)
first_line = []
second_line = []
third_line = []
encoded_text = ''
for word in text.split(' '):
first = []
second = []
third = []
for letter in word:
can_break = False
for index_table, mini_table in enumerate(table):
for index_row, row in enumerate(mini_table):
if letter in row:
first.append(index_table)
second.append(index_row)
third.append(row.index(letter))
break
can_break = True
if can_break:
break
first_line.append(first)
second_line.append(second)
third_line.append(third)
for index in range(len(first_line)):
read = first_line[index] + second_line[index] + third_line[index]
for index in range(len(read)//3):
pair = read[index*3:index*3+3]
encoded_text += table[pair[0]][pair[1]][pair[2]]
encoded_text += ' '
return encoded_text
def decode(self, input_text, password, more_secure=False):
input_text = input_text.upper().lstrip().rstrip()
password = password.upper().lstrip().rstrip()
text = input_text
if more_secure:
text = text.replace(' ', '')
text = [text[i:i+5] for i in range(0,len(text), 5)]
text = ' '.join(text)
table = self.create_square(password)
decoded_text = ''
for word in text.split(' '):
word_indexes = []
for index, letter in enumerate(word):
for index_table, mini_table in enumerate(table):
can_break = False
for index_row, row in enumerate(mini_table):
if letter in row:
word_indexes.append(index_table)
word_indexes.append(index_row)
word_indexes.append(row.index(letter))
can_break = True
break
if can_break:
break
pocet = len(word_indexes) // 3
for i in range(pocet):
pair = word_indexes[i::pocet]
decoded_text += table[pair[0]][pair[1]][pair[2]]
decoded_text += ' '
return decoded_text
cipher = Trifid()
message = cipher.encode('novyt ypsif ry', 'qyfbmrxiwsalkveuj.dotzgchpn')
print(message)
text = cipher.decode(message, 'qyfbmrxiwsalkveuj.dotzgchpn', True)
print(text) | [
"branislav.blazek.bb@gmail.com"
] | branislav.blazek.bb@gmail.com |
2eda323e1df29dba8b357199e32a196401cea08e | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_2_1_neat/16_2_1_latsyrc11235_1.py | da55e94217b12acb619e4ed1d23e38ecc1f4df14 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 2,143 | py | f = [line.rstrip() for line in open('/Users/roshil/Desktop/A-small-attempt0 (2).in')]
out = open('/Users/roshil/Desktop/out.txt','w')
out.truncate()
line = 0
testcases = int(f[line])
line += 1
for i in range(1, testcases+1):
r1 = f[line]
line += 1
r1 = r1.lower()
word = [k for k in r1]
s = []
while len(word) > 0:
#print word
if 'z' in word:
word.remove('z')
word.remove('e')
word.remove('r')
word.remove('o')
s.append(0)
elif 'w' in word:
word.remove('t')
word.remove('w')
word.remove('o')
s.append(2)
elif 'u' in word:
word.remove('f')
word.remove('o')
word.remove('u')
word.remove('r')
s.append(4)
elif 'r' in word:
word.remove('t')
word.remove('h')
word.remove('r')
word.remove('e')
word.remove('e')
s.append(3)
elif 'x' in word:
word.remove('s')
word.remove('i')
word.remove('x')
s.append(6)
elif 'g' in word:
word.remove('e')
word.remove('i')
word.remove('g')
word.remove('h')
word.remove('t')
s.append(8)
elif 'o' in word:
word.remove('o')
word.remove('n')
word.remove('e')
s.append(1)
elif 'f' in word:
word.remove('f')
word.remove('i')
word.remove('v')
word.remove('e')
s.append(5)
elif 'v' in word:
word.remove('s')
word.remove('e')
word.remove('v')
word.remove('e')
word.remove('n')
s.append(7)
else:
word.remove('n')
word.remove('i')
word.remove('n')
word.remove('e')
s.append(9)
s.sort()
ans = "".join([str(l) for l in s])
print ans
out.write("Case #"+str(i)+": "+str(ans) + "\n")
out.close() | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
25facb350837f954b32639f52229de7957d57c7f | 19b857585fa6509b61b28852546db96237a9aa20 | /spider_project/baike_spider/html_parser.py | 04fd841dc1a138da6ff8ec9d78d6d8ab775e05d8 | [] | no_license | hanxueda/python | 296fbd9aba290f538d304877ade36a365e041f75 | c741f413b4e6909924bf0243e1dafc6ccc5fe2f7 | refs/heads/master | 2021-07-17T04:42:10.722626 | 2017-10-23T08:17:57 | 2017-10-23T08:17:57 | 107,843,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | import re, urllib.parse
from bs4 import BeautifulSoup
class HtmlParser():
def parse(self, page_url, html_cont):
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls, new_data
def _get_new_urls(self, page_url, soup):
new_urls = set()
links = soup.find_all('a', href=re.compile(r'/item/'))
for link in links:
new_url = link['href']
new_full_url = urllib.parse.urljoin(page_url, new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self, page_url, soup):
res_data = {}
# url
res_data['url'] = page_url
# <dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1>
title_node = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')
res_data['title'] = title_node.get_text()
# <div class="lemma-summary" label-module="lemmaSummary">
summary_noe = soup.find('div', class_='lemma-summary')
res_data['summary'] = summary_noe.get_text()
return res_data
| [
"hanxueda@qq.com"
] | hanxueda@qq.com |
229358e72b33f7974234b83b16ad03b969cd8c66 | e31c3a6c866fc8537cd2ad25638bfe1b1102f53b | /danbo-backend/codes/danbo_backend/wsgi.py | b15a0aad0e7ebebc66efe13cb0ecd2ef2ac024f5 | [
"MIT"
] | permissive | StarDxxx/Software-Engineering-Project---Danbo | 8f781ce18fc63fa665bd588d30426dbe42d93115 | 2225cfe627548ba5af32b842a0bfa64d714fe777 | refs/heads/master | 2023-02-05T21:11:35.811793 | 2020-12-30T09:13:54 | 2020-12-30T09:13:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for danbo_backend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'danbo_backend.settings')
application = get_wsgi_application()
| [
"17307130121@fudan.edu.cn"
] | 17307130121@fudan.edu.cn |
2d29c7de460a6f7702e0e41e00c5019dfd8918a8 | 0ceb04ce763cf8b73f9b71a211990070abc1f5bb | /src/models/dataset.py | df71f2d8f3edb21d4ece1c2736dba94359979ca5 | [
"MIT"
] | permissive | fangzhimeng/MachineLearningRegressionBenchmark | e04412581ca21d82f4bbff64ba29ec9795337cac | 42a83a1261dbf6b30624e9950db5b2d297622d76 | refs/heads/main | 2023-03-21T04:57:51.633945 | 2021-01-10T20:20:24 | 2021-01-10T20:20:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | import copy
from pandas import DataFrame, concat
from numpy import array, ndarray
class Dataset:
_dataFrame: DataFrame
_label: str
def __init__(self, label: str, dataFrame: DataFrame):
self._dataFrame = dataFrame
self._label = label
self.prepareDataset()
def getDataset(self):
"""Gets a reference of the dataset object"""
return self._dataFrame
def getDatasetCopy(self):
"""Gets a copy of the dataset object"""
return copy.copy(self._dataFrame)
def copy(self):
return copy.copy(self)
def prepareDataset(self):
"""Modifies the dataset object with a user defined logic"""
from src.helpers.datasetHelper import DatasetHelper
self._dataFrame = DatasetHelper.prepareDataset(self.getDatasetCopy())
def getFeaturesData(self):
"""Gets the data held in the feature columns"""
return array(self._dataFrame.drop([self._label], 1))
def getFeatureData(self, feature: str):
"""Gets the data held in the feature columns"""
return array(self._dataFrame[feature])
def getLabelData(self):
return array(self._dataFrame[self._label])
def updateLabelData(self, values: ndarray):
self._dataFrame[self._label] = values
def addRow(self, row: DataFrame):
self._dataFrame = concat([row, self._dataFrame.iloc[:]])
| [
"iulian.octavian.preda@gmail.com"
] | iulian.octavian.preda@gmail.com |
023d9f5a2081647f38c2abb19c67c5d07e7f1bac | fb3f2c3f83fbfe894f01ea514c760371ef05d54f | /Algorithm/chapter5/flatten.py | 0b99312d0778169c809ff206410031189ac979eb | [] | no_license | jonXue92/PythonGit | 8160220a3d51fb6a317702a2b50e8ca3306a8f0e | a9358ac79a47b3d1fd072a4af603bf07a89b1a2c | refs/heads/master | 2020-04-02T05:25:51.032912 | 2019-04-12T04:18:15 | 2019-04-12T04:18:15 | 154,076,228 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | # -*- coding: utf-8 -*-
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Flatten:
last_node = None
def flatten(self, root):
if root is None:
return
if self.last_node is not None:
self.last_node.left = None
self.last_node.right = root
self.last_node = root
right = root.right
self.flatten(root.left)
self.flatten(right)
def flatten1(self, root):
self.helper(root)
# restructure and return last node in preorder
def helper(self, root):
if root is None:
return None
left_last = self.helper(root.left)
right_last = self.helper(root.right)
# connect
if left_last is not None:
left_last.right = root.right
root.right = root.left
root.left = None
if right_last is not None:
return right_last
if left_last is not None:
return left_last
return root | [
"xzywrz@gmail.com"
] | xzywrz@gmail.com |
edfb24502e388ee7e252a957ea60815238e99f0f | 29f8b7f92eb22cc3134a16c439d3180e254df4bb | /chp04_database_programming/04_65_sql.py | d81a91878a6b51f9b1bfd0ac8be6453d5ed66e59 | [] | no_license | Hemie143/realpython2 | 7df80dd5f61ce7cd8c31b8bf78111b8507cbdb36 | b8535ffe97594e1b18233bcd9aa0de664257cb09 | refs/heads/master | 2022-12-12T04:51:53.120131 | 2021-01-03T19:52:32 | 2021-01-03T19:52:32 | 208,735,855 | 0 | 0 | null | 2023-08-17T05:45:32 | 2019-09-16T07:22:16 | Python | UTF-8 | Python | false | false | 925 | py | import sqlite3
with sqlite3.connect("new.db") as connection:
c = connection.cursor()
c.execute("CREATE TABLE regions (city TEXT, region TEXT)")
cities = [
('New York City', 'Northeast'),
('San Francisco', 'West'),
('Chicago', 'Midwest'),
('Houston', 'South'),
('Phoenix', 'West'),
('Boston', 'Northeast'),
('Los Angeles', 'West'),
('Houston', 'South'),
('Philadelphia', 'Northeast'),
('San Antonio', 'South'),
('San Diego', 'West'),
('Dallas', 'South'),
('San Jose', 'West'),
('Jacksonville', 'South'),
('Indianapolis', 'Midwest'),
('Austin', 'South'),
('Detroit', 'Midwest')
]
c.executemany("INSERT INTO regions VALUES(?, ?)", cities)
c.execute("SELECT * FROM regions ORDER BY region ASC")
rows = c.fetchall()
for r in rows:
print(r[0], r[1])
| [
"hemie143@gmail.com"
] | hemie143@gmail.com |
1b11eb854f9ea3d9f3db117ffd7404cfc3d9f2d2 | 34d82d94f49e9bb228d827a4bb2aa29de1a7ffb3 | /cafe_project/content/migrations/0002_alter_meal_photo.py | 2042c69b1fbaf839856d27a84b8ff851aaaf8269 | [] | no_license | Zinko17/CafeProject | eb8504a72f8a11ba4d9d0eea2a102ddca7ac8004 | 5243aa84f138cf6f740e1e8ae4b359c058409026 | refs/heads/master | 2023-05-27T06:31:22.478258 | 2021-06-13T08:52:05 | 2021-06-13T08:52:05 | 374,962,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # Generated by Django 3.2.4 on 2021-06-09 10:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='meal',
name='photo',
field=models.ImageField(default='soup.jpeg', upload_to=''),
),
]
| [
"vovazin2@gmail.com"
] | vovazin2@gmail.com |
537d39ea66e7cc44ae00acb9282f590cf9ffb326 | ae8074a50ee666e46484e33bed7eb1cc16dfd0b8 | /notebooks/CaseStudies/executor_1.py | 04316c053eba23dd3c842d1844e318ff17f821f8 | [] | no_license | ayyogg0628/AnomalyDetection_MEAD | 72edb3c5f222c1d8c1f4fc7fc6d2ae17a757e254 | 0df68f91568726c40f5ff7309cf8f74bcc2af74e | refs/heads/master | 2023-03-18T22:22:17.045809 | 2020-07-07T23:44:59 | 2020-07-07T23:44:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,089 | py | import operator
import pickle
import numpy as np
import os
import sys
import time
import pprint
import inspect
from collections import OrderedDict
import matplotlib.pyplot as plt
import yaml
from sklearn.metrics import auc
import logging
import logging.handlers
import tensorflow as tf
import pandas as pd
tf.logging.set_verbosity(tf.logging.ERROR)
# matplotlib.use('Agg')
sys.path.append('./..')
sys.path.append('./../../.')
try:
import src.m2_test_1layer.tf_model_3_withNorm as tf_model
except:
from .src.m2_test_1layer import tf_model_3_withNorm as tf_model
try:
from src.Eval import eval_v1 as eval
except:
from .src.Eval import eval_v1 as eval
# ------------------------------------ #
cur_path = '/'.join(
os.path.abspath(
inspect.stack()[0][1]
).split('/')[:-1]
)
sys.path.append(cur_path)
_author__ = "Debanjan Datta"
__email__ = "ddatta@vt.edu"
__version__ = "5.0"
__processor__ = 'embedding'
_SAVE_DIR = 'save_dir'
MODEL_NAME = None
_DIR = None
DATA_DIR = None
MODEL_OP_FILE_PATH = None
CONFIG_FILE = 'config_caseStudy_1.yaml'
CONFIG = None
# ----------------------------------------- #
def get_domain_dims():
global DATA_DIR
f_path = os.path.join(DATA_DIR, 'domain_dims.pkl')
with open(f_path, 'rb') as fh:
res = pickle.load(fh)
return list(res.values())
# ----------------------------------------- #
# --------- Model Config --------- #
# ----------------------------------------- #
# embedding_dims = None
DOMAIN_DIMS = None
logger = None
def setup_general_config():
global MODEL_NAME
global _DIR
global SAVE_DIR
global OP_DIR
global _SAVE_DIR
global CONFIG
global logger
SAVE_DIR = os.path.join(CONFIG['SAVE_DIR'], _DIR)
OP_DIR = os.path.join(CONFIG['OP_DIR'], _DIR)
if not os.path.exists(CONFIG['SAVE_DIR']):
os.mkdir(os.path.join(CONFIG['SAVE_DIR']))
if not os.path.exists(SAVE_DIR):
os.mkdir(os.path.join(SAVE_DIR))
return
# --------------------------------------------- #
def set_up_model(config, _dir):
global embedding_dims
global SAVE_DIR
global OP_DIR
global MODEL_NAME
MODEL_NAME = config['MODEL_NAME']
if type(config[_dir]['op_dims']) == str:
embedding_dims = config[_dir]['op_dims']
embedding_dims = embedding_dims.split(',')
embedding_dims = [int(e) for e in embedding_dims]
else:
embedding_dims = [config[_dir]['op_dims']]
model_obj = tf_model.model(MODEL_NAME, SAVE_DIR, OP_DIR)
model_obj.set_model_options(
show_loss_figure=config[_dir]['show_loss_figure'],
save_loss_figure=config[_dir]['save_loss_figure']
)
domain_dims = get_domain_dims()
LR = config[_dir]['learning_rate']
model_obj.set_model_hyperparams(
domain_dims=domain_dims,
emb_dims=embedding_dims,
batch_size=config[_dir]['batchsize'],
num_epochs=config[_dir]['num_epochs'],
learning_rate=LR,
num_neg_samples=config[_dir]['num_neg_samples']
)
model_obj.set_l2_loss_flag(True)
model_obj.inference = False
model_obj.build_model()
return model_obj
def get_data():
global CONFIG
global DATA_DIR
global _DIR
DIR = _DIR
with open(os.path.join(
CONFIG['DATA_DIR'],
DIR,
'domain_dims.pkl'
), 'rb') as fh:
domain_dims = pickle.load(fh)
train_x_pos_file = os.path.join(
CONFIG['DATA_DIR'],
DIR,
'matrix_train_positive_v1.pkl'
)
with open(train_x_pos_file, 'rb') as fh:
train_x_pos = pickle.load(fh)
train_x_neg_file = os.path.join(
CONFIG['DATA_DIR'],
DIR,
'negative_samples_v1.pkl'
)
with open(train_x_neg_file, 'rb') as fh:
train_x_neg = pickle.load(fh)
train_x_neg = train_x_neg
test_x_file = os.path.join(
CONFIG['DATA_DIR'],
DIR,
'matrix_test_positive.pkl'
)
with open(test_x_file, 'rb') as fh:
test_x = pickle.load(fh)
_df = pd.read_csv(os.path.join(CONFIG['DATA_DIR'],DIR,'test_data.csv'),header=0)
test_id_list = list(_df['PanjivaRecordID'])
return train_x_pos, train_x_neg, test_x, test_id_list, domain_dims
def process(
CONFIG,
_DIR,
train_x_pos,
train_x_neg,
test_data_x,
test_id_list
):
global logger
num_neg_samples = train_x_neg.shape[1]
CONFIG[_DIR]['num_neg_samples'] = num_neg_samples
model_obj = set_up_model(CONFIG, _DIR)
_use_pretrained = CONFIG[_DIR]['use_pretrained']
if _use_pretrained is True:
saved_file_path = None
pretrained_file = CONFIG[_DIR]['saved_model_file']
print('Pretrained File :', pretrained_file)
saved_file_path = os.path.join(
SAVE_DIR,
'checkpoints',
pretrained_file
)
if saved_file_path is not None:
model_obj.set_pretrained_model_file(saved_file_path)
else:
model_obj.train_model(
train_x_pos,
train_x_neg
)
elif _use_pretrained is False:
model_obj.train_model(
train_x_pos,
train_x_neg
)
print(' Len of test_ids ', len(test_id_list))
print('Length of test data', test_data_x.shape)
res = model_obj.get_event_score(test_data_x)
print('Length of results ', len(res))
res = list(res)
_id_score_dict = {
id: _res for id, _res in zip(
test_id_list,
res
)
}
'''
sort by ascending
since lower likelihood means anomalous
'''
tmp = sorted(
_id_score_dict.items(),
key=operator.itemgetter(1)
)
sorted_id_score_dict = OrderedDict()
for e in tmp:
sorted_id_score_dict[e[0]] = e[1][0]
_ID = []
_SCORE = []
for k,v in sorted_id_score_dict.items():
_ID.append(k)
_SCORE.append(v)
_df = pd.DataFrame(columns=['PanjivaRecordID','score'])
_df['PanjivaRecordID'] = _ID
_df['score'] = _SCORE
_df.to_csv(os.path.join(OP_DIR,'result_1.csv'))
# get embeddings
emb_res = model_obj.get_record_embeddings(train_x_pos)
with open(os.path.join(OP_DIR,'train_embeddings.pkl'),'wb') as fh:
pickle.dump(emb_res,fh,pickle.HIGHEST_PROTOCOL)
return
def main():
global embedding_dims
global SAVE_DIR
global _DIR
global DATA_DIR
global CONFIG
global CONFIG_FILE
global MODEL_NAME
global DOMAIN_DIMS
global logger
with open(CONFIG_FILE) as f:
CONFIG = yaml.safe_load(f)
DATA_DIR = os.path.join(CONFIG['DATA_DIR'], _DIR)
setup_general_config()
if not os.path.exists(os.path.join(SAVE_DIR, 'checkpoints')):
os.mkdir(
os.path.join(SAVE_DIR, 'checkpoints')
)
# ------------ #
if not os.path.exists(os.path.join(SAVE_DIR, 'checkpoints')):
os.mkdir(os.path.join(SAVE_DIR, 'checkpoints'))
# ------------ #
logger.info('-------------------')
train_x_pos, train_x_neg, test_x, test_id_list, domain_dims = get_data()
process(
CONFIG,
_DIR,
train_x_pos,
train_x_neg,
test_x,
test_id_list
)
logger.info('-------------------')
# ----------------------------------------------------------------- #
# find out which model works best
# ----------------------------------------------------------------- #
with open(CONFIG_FILE) as f:
CONFIG = yaml.safe_load(f)
try:
log_file = 'case_studies_1.log'
except:
log_file = 'm2.log'
_DIR = 'us_import'
logger = logging.getLogger('main')
logger.setLevel(logging.INFO)
OP_DIR = os.path.join(CONFIG['OP_DIR'], _DIR)
if not os.path.exists(CONFIG['OP_DIR']):
os.mkdir(CONFIG['OP_DIR'])
if not os.path.exists(OP_DIR):
os.mkdir(OP_DIR)
handler = logging.FileHandler(os.path.join(OP_DIR, log_file))
handler.setLevel(logging.INFO)
logger.addHandler(handler)
logger.info(' Info start ')
logger.info(' -----> ' + _DIR)
main()
| [
"ddatta@vt.edu"
] | ddatta@vt.edu |
70772f2adcd137ef04c0dd0f83df8264fa9192f8 | 72db8db1a513dfa01ce81bf88b39c10c662bfae2 | /annoying/tests/models.py | 099d3338d0b17db45ce4bfc6d5fbc2b27c37d152 | [
"MIT"
] | permissive | colorstheforce/insta-clonewars | ec6053853505db26e9e931c531e531b5e6754740 | 2e8e6fc2e5ef7d2401d7902679e64d8859918d3a | refs/heads/master | 2022-03-30T13:28:39.755391 | 2019-01-17T12:55:11 | 2019-01-17T12:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | from django.db import models
from annoying.fields import AutoOneToOneField
from annoying.fields import JSONField
class SuperVillain(models.Model):
name = models.CharField(max_length=20, default="Dr Horrible")
stats = JSONField(default=None, blank=True, null=True)
class SuperHero(models.Model):
name = models.CharField(max_length=20, default="Captain Hammer")
mortal_enemy = AutoOneToOneField(SuperVillain, related_name='mortal_enemy')
| [
"jackogina60@gmail.com"
] | jackogina60@gmail.com |
e9b5cf2445399642b2b7c925cbf7645c8e7e2f58 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /trinity/renderjobs.py | 394ddcb28608b85b76dfb5fc0412e2471051f7de | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\trinity\renderjobs.py
import decometaclass
from . import _trinity as trinity
class RenderJobs(object):
__cid__ = 'trinity.Tr2RenderJobs'
__metaclass__ = decometaclass.BlueWrappedMetaclass
def __init__(self):
pass
def UnscheduleByName(self, name):
for rj in self.recurring:
if rj.name == name:
self.recurring.remove(rj)
return True
return False
def FindByName(self, name):
for rj in self.recurring:
if rj.name == name:
return rj
def FindStepByName(self, name):
def FindInJob(rj):
for step in rj.steps:
if step.name == name:
return step
for rj in self.recurring:
ret = FindInJob(rj)
if ret is not None:
return ret
def FindScenes(self, sceneType, filter = lambda x: True):
results = set({})
def RecursiveSearch(job):
for step in job.steps:
if hasattr(step, 'object') and type(step.object) is sceneType and filter(step.object):
results.add(step.object)
return
if type(step) is trinity.TriStepRunJob:
RecursiveSearch(step.job)
for job in self.recurring:
RecursiveSearch(job)
return results
| [
"le02005@163.com"
] | le02005@163.com |
9db0edab58fd19dee622b29b06fd8afe7a16804b | 81243f1ec5eeb089b8f93dffe843d811f6918a68 | /matlab_starter.py | 5d1baa2768dd9c46da7987d36f16fbef65ec42ff | [] | no_license | crossz/simu-betting-strategy | ac67932a942f98e6ebdb9b9f2592e5a68968ca91 | 2fc116fe3eec5e82cfdb3115e341741917e29595 | refs/heads/master | 2021-05-04T01:52:37.635680 | 2016-10-23T17:24:42 | 2016-10-23T17:24:42 | 71,226,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | import pickle
import LoadData
import WhoScoreInvestor
GameData = LoadData.GameData
mode = 'w'
mode = 'r'
def getGameData():
if mode == 'w':
game_data = GameData.get_data_from_mysql(sql='SELECT CAST(WIN310_EUROPE_ID as UNSIGNED) FROM t_crawler_win310 where START_DATE_TIME > "2016-09-17 00:00:00" order by START_DATE_TIME')
pickle.dump(game_data, open("game_data.p", "wb"))
elif mode == 'r':
game_data = matlab_get_data()
else:
game_data=[]
return game_data
def matlab_get_data():
gamedata = pickle.load(open('game_data.p', 'rb'))
return gamedata
def process_one_game(game):
i = WhoScoreInvestor.WhoScoreInvestor(game, strong_team=False, co_action=True)
i.game_processing()
return i
# # demo for MATLAB calling py.XXX.search(...)
def helloworld():
return "Matlab says: 'Hello Python.'"
if __name__ == "__main__":
game_data = getGameData()
print(game_data)
for i in range(200):
it = process_one_game(game_data[i])
print(it.operation_list)
print(it.result_dict)
| [
"drcross@outlook.com"
] | drcross@outlook.com |
934ee088516ad8e93e8cde4bb96817b3c2bccdaa | fa8f9df0c87647a7ed01ff441c75f9a887bb7283 | /attentive_np/cartpole_attentive_np.py | bdfb8eed95066104a0ec789f68630c66b5d5b9f4 | [] | no_license | aGiant/attentive-neural-algo | cd11c050641534f1086669fc65d77f80149dcab8 | 71979832a09e3ee4a95261bfceaf53f4923ac27e | refs/heads/master | 2020-09-14T16:51:33.158898 | 2019-06-09T20:51:32 | 2019-06-09T20:51:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,736 | py | import tensorflow as tf
def batch_mlp(input, output_sizes, variable_scope):
"""Apply MLP to the final axis of a 3D tensor (reusing already defined MLPs).
Args:
input: input tensor of shape [B,n,d_in].
output_sizes: An iterable containing the output sizes of the MLP as defined
in `basic.Linear`.
variable_scope: String giving the name of the variable scope. If this is set
to be the same as a previously defined MLP, then the weights are reused.
Returns:
tensor of shape [B,n,d_out] where d_out=output_sizes[-1]
"""
# Get the shapes of the input and reshape to parallelise across observations
batch_size, _, filter_size = input.shape.as_list()
output = tf.reshape(input, (-1, filter_size))
output.set_shape((None, filter_size))
# Pass through MLP
with tf.variable_scope(variable_scope, reuse=tf.AUTO_REUSE):
for i, size in enumerate(output_sizes[:-1]):
output = tf.nn.relu(
tf.layers.dense(output, size, name="layer_{}".format(i)))
# Last layer without a ReLu
output = tf.layers.dense(
output, output_sizes[-1], name="layer_{}".format(i + 1))
# Bring back into original shape
output = tf.reshape(output, (batch_size, -1, output_sizes[-1]))
return output
class DeterministicEncoder(object):
"""The Deterministic Encoder."""
def __init__(self, output_sizes, attention):
"""(A)NP deterministic encoder.
Args:
output_sizes: An iterable containing the output sizes of the encoding MLP.
attention: The attention module.
"""
self._output_sizes = output_sizes
self._attention = attention
def __call__(self, context_x, context_y, target_x):
"""Encodes the inputs into one representation.
Args:
context_x: Tensor of shape [B,observations,d_x]. For this 1D regression
task this corresponds to the x-values.
context_y: Tensor of shape [B,observations,d_y]. For this 1D regression
task this corresponds to the y-values.
target_x: Tensor of shape [B,target_observations,d_x].
For this 1D regression task this corresponds to the x-values.
Returns:
The encoded representation. Tensor of shape [B,target_observations,d]
"""
# Concatenate x and y along the filter axes
encoder_input = tf.concat([context_x, context_y], axis=-1)
# Pass final axis through MLP
hidden = batch_mlp(encoder_input, self._output_sizes,
"deterministic_encoder")
# Apply attention
with tf.variable_scope("deterministic_encoder", reuse=tf.AUTO_REUSE):
hidden = self._attention(context_x, target_x, hidden)
return hidden
class LatentEncoder(object):
"""The Latent Encoder."""
def __init__(self, output_sizes, num_latents):
"""(A)NP latent encoder.
Args:
output_sizes: An iterable containing the output sizes of the encoding MLP.
num_latents: The latent dimensionality.
"""
self._output_sizes = output_sizes
self._num_latents = num_latents
def __call__(self, x, y):
"""Encodes the inputs into one representation.
Args:
x: Tensor of shape [B,observations,d_x]. For this 1D regression
task this corresponds to the x-values.
y: Tensor of shape [B,observations,d_y]. For this 1D regression
task this corresponds to the y-values.
Returns:
A normal distribution over tensors of shape [B, num_latents]
"""
# Concatenate x and y along the filter axes
encoder_input = tf.concat([x, y], axis=-1)
# Pass final axis through MLP
hidden = batch_mlp(encoder_input, self._output_sizes, "latent_encoder")
# Aggregator: take the mean over all points
hidden = tf.reduce_mean(hidden, axis=1)
# Have further MLP layers that map to the parameters of the Gaussian latent
with tf.variable_scope("latent_encoder", reuse=tf.AUTO_REUSE):
# First apply intermediate relu layer
hidden = tf.nn.relu(
tf.layers.dense(hidden,
(self._output_sizes[-1] + self._num_latents)/2,
name="penultimate_layer"))
# Then apply further linear layers to output latent mu and log sigma
mu = tf.layers.dense(hidden, self._num_latents, name="mean_layer")
log_sigma = tf.layers.dense(hidden, self._num_latents, name="std_layer")
# Compute sigma
sigma = 0.1 + 0.9 * tf.sigmoid(log_sigma)
return tf.contrib.distributions.Normal(loc=mu, scale=sigma)
class Decoder(object):
"""The Decoder."""
def __init__(self, output_sizes):
"""(A)NP decoder.
Args:
output_sizes: An iterable containing the output sizes of the decoder MLP
as defined in `basic.Linear`.
"""
self._output_sizes = output_sizes
def __call__(self, representation, target_x):
"""Decodes the individual targets.
Args:
representation: The representation of the context for target predictions.
Tensor of shape [B,target_observations,?].
target_x: The x locations for the target query.
Tensor of shape [B,target_observations,d_x].
Returns:
dist: A multivariate Gaussian over the target points. A distribution over
tensors of shape [B,target_observations,d_y].
mu: The mean of the multivariate Gaussian.
Tensor of shape [B,target_observations,d_x].
sigma: The standard deviation of the multivariate Gaussian.
Tensor of shape [B,target_observations,d_x].
"""
# concatenate target_x and representation
hidden = tf.concat([representation, target_x], axis=-1)
# Pass final axis through MLP
hidden = batch_mlp(hidden, self._output_sizes, "decoder")
# Get the mean an the variance
mu, log_sigma = tf.split(hidden, 2, axis=-1)
# Bound the variance
sigma = 0.1 + 0.9 * tf.nn.softplus(log_sigma)
# Get the distribution
dist = tf.contrib.distributions.MultivariateNormalDiag(
loc=mu, scale_diag=sigma)
return dist, mu
class LatentModel(object):
"""The (A)NP model."""
def __init__(self, latent_encoder_output_sizes, num_latents,
decoder_output_sizes, use_deterministic_path=True,
deterministic_encoder_output_sizes=None, attention=None):
"""Initialises the model.
Args:
latent_encoder_output_sizes: An iterable containing the sizes of hidden
layers of the latent encoder.
num_latents: The latent dimensionality.
decoder_output_sizes: An iterable containing the sizes of hidden layers of
the decoder. The last element should correspond to d_y * 2
(it encodes both mean and variance concatenated)
use_deterministic_path: a boolean that indicates whether the deterministic
encoder is used or not.
deterministic_encoder_output_sizes: An iterable containing the sizes of
hidden layers of the deterministic encoder. The last one is the size
of the deterministic representation r.
attention: The attention module used in the deterministic encoder.
Only relevant when use_deterministic_path=True.
"""
self._latent_encoder = LatentEncoder(latent_encoder_output_sizes,
num_latents)
self._decoder = Decoder(decoder_output_sizes)
self._use_deterministic_path = use_deterministic_path
if use_deterministic_path:
self._deterministic_encoder = DeterministicEncoder(
deterministic_encoder_output_sizes, attention)
def __call__(self, query, num_targets, target_y=None):
"""Returns the predicted mean and variance at the target points.
Args:
query: Array containing ((context_x, context_y), target_x) where:
context_x: Tensor of shape [B,num_contexts,d_x].
Contains the x values of the context points.
context_y: Tensor of shape [B,num_contexts,d_y].
Contains the y values of the context points.
target_x: Tensor of shape [B,num_targets,d_x].
Contains the x values of the target points.
num_targets: Number of target points.
target_y: The ground truth y values of the target y.
Tensor of shape [B,num_targets,d_y].
Returns:
log_p: The log_probability of the target_y given the predicted
distribution. Tensor of shape [B,num_targets].
mu: The mean of the predicted distribution.
Tensor of shape [B,num_targets,d_y].
sigma: The variance of the predicted distribution.
Tensor of shape [B,num_targets,d_y].
"""
(context_x, context_y), target_x = query
# Pass query through the encoder and the decoder
prior = self._latent_encoder(context_x, context_y)
# For training, when target_y is available, use targets for latent encoder.
# Note that targets contain contexts by design.
if target_y is None:
latent_rep = prior.sample()
# For testing, when target_y unavailable, use contexts for latent encoder.
else:
posterior = self._latent_encoder(target_x, target_y)
latent_rep = posterior.sample()
latent_rep = tf.tile(tf.expand_dims(latent_rep, axis=1),
[1, num_targets, 1])
if self._use_deterministic_path:
deterministic_rep = self._deterministic_encoder(context_x, context_y,
target_x)
representation = tf.concat([deterministic_rep, latent_rep], axis=-1)
else:
representation = latent_rep
dist, mu = self._decoder(representation, target_x)
# If we want to calculate the log_prob for training we will make use of the
# target_y. At test time the target_y is not available so we return None.
if target_y is not None:
log_p = dist.log_prob(target_y)
posterior = self._latent_encoder(target_x, target_y)
kl = tf.reduce_sum(
tf.contrib.distributions.kl_divergence(posterior, prior),
axis=-1, keepdims=True)
kl = tf.tile(kl, [1, num_targets])
loss = - tf.reduce_mean(log_p - kl / tf.cast(num_targets, tf.float32))
else:
log_p = None
kl = None
loss = None
return mu, log_p, kl, loss
def uniform_attention(q, v):
"""Uniform attention. Equivalent to np.
Args:
q: queries. tensor of shape [B,m,d_k].
v: values. tensor of shape [B,n,d_v].
Returns:
tensor of shape [B,m,d_v].
"""
total_points = tf.shape(q)[1]
rep = tf.reduce_mean(v, axis=1, keepdims=True) # [B,1,d_v]
rep = tf.tile(rep, [1, total_points, 1])
return rep
def laplace_attention(q, k, v, scale, normalise):
"""Computes laplace exponential attention.
Args:
q: queries. tensor of shape [B,m,d_k].
k: keys. tensor of shape [B,n,d_k].
v: values. tensor of shape [B,n,d_v].
scale: float that scales the L1 distance.
normalise: Boolean that determines whether weights sum to 1.
Returns:
tensor of shape [B,m,d_v].
"""
k = tf.expand_dims(k, axis=1) # [B,1,n,d_k]
q = tf.expand_dims(q, axis=2) # [B,m,1,d_k]
unnorm_weights = - tf.abs((k - q) / scale) # [B,m,n,d_k]
unnorm_weights = tf.reduce_sum(unnorm_weights, axis=-1) # [B,m,n]
if normalise:
weight_fn = tf.nn.softmax
else:
weight_fn = lambda x: 1 + tf.tanh(x)
weights = weight_fn(unnorm_weights) # [B,m,n]
rep = tf.einsum('bik,bkj->bij', weights, v) # [B,m,d_v]
return rep
def dot_product_attention(q, k, v, normalise):
"""Computes dot product attention.
Args:
q: queries. tensor of shape [B,m,d_k].
k: keys. tensor of shape [B,n,d_k].
v: values. tensor of shape [B,n,d_v].
normalise: Boolean that determines whether weights sum to 1.
Returns:
tensor of shape [B,m,d_v].
"""
d_k = tf.shape(q)[-1]
scale = tf.sqrt(tf.cast(d_k, tf.float32))
unnorm_weights = tf.einsum('bjk,bik->bij', k, q) / scale # [B,m,n]
if normalise:
weight_fn = tf.nn.softmax
else:
weight_fn = tf.sigmoid
weights = weight_fn(unnorm_weights) # [B,m,n]
rep = tf.einsum('bik,bkj->bij', weights, v) # [B,m,d_v]
return rep
def multihead_attention(q, k, v, num_heads=8):
"""Computes multi-head attention.
Args:
q: queries. tensor of shape [B,m,d_k].
k: keys. tensor of shape [B,n,d_k].
v: values. tensor of shape [B,n,d_v].
num_heads: number of heads. Should divide d_v.
Returns:
tensor of shape [B,m,d_v].
"""
d_k = q.get_shape().as_list()[-1]
d_v = v.get_shape().as_list()[-1]
head_size = d_v / num_heads
key_initializer = tf.random_normal_initializer(stddev=d_k**-0.5)
value_initializer = tf.random_normal_initializer(stddev=d_v**-0.5)
rep = tf.constant(0.0)
for h in range(num_heads):
o = dot_product_attention(
tf.layers.Conv1D(head_size, 1, kernel_initializer=key_initializer,
name='wq%d' % h, use_bias=False, padding='VALID')(q),
tf.layers.Conv1D(head_size, 1, kernel_initializer=key_initializer,
name='wk%d' % h, use_bias=False, padding='VALID')(k),
tf.layers.Conv1D(head_size, 1, kernel_initializer=key_initializer,
name='wv%d' % h, use_bias=False, padding='VALID')(v),
normalise=True)
rep += tf.layers.Conv1D(d_v, 1, kernel_initializer=value_initializer,
name='wo%d' % h, use_bias=False, padding='VALID')(o)
return rep
class Attention(object):
"""The Attention module."""
def __init__(self, rep, output_sizes, att_type, scale=1., normalise=True,
num_heads=8):
"""Create attention module.
Takes in context inputs, target inputs and
representations of each context input/output pair
to output an aggregated representation of the context data.
Args:
rep: transformation to apply to contexts before computing attention.
One of: ['identity','mlp'].
output_sizes: list of number of hidden units per layer of mlp.
Used only if rep == 'mlp'.
att_type: type of attention. One of the following:
['uniform','laplace','dot_product','multihead']
scale: scale of attention.
normalise: Boolean determining whether to:
1. apply softmax to weights so that they sum to 1 across context pts or
2. apply custom transformation to have weights in [0,1].
num_heads: number of heads for multihead.
"""
self._rep = rep
self._output_sizes = output_sizes
self._type = att_type
self._scale = scale
self._normalise = normalise
if self._type == 'multihead':
self._num_heads = num_heads
def __call__(self, x1, x2, r):
"""Apply attention to create aggregated representation of r.
Args:
x1: tensor of shape [B,n1,d_x].
x2: tensor of shape [B,n2,d_x].
r: tensor of shape [B,n1,d].
Returns:
tensor of shape [B,n2,d]
Raises:
NameError: The argument for rep/type was invalid.
"""
if self._rep == 'identity':
k, q = (x1, x2)
elif self._rep == 'mlp':
# Pass through MLP
k = batch_mlp(x1, self._output_sizes, "attention")
q = batch_mlp(x2, self._output_sizes, "attention")
else:
raise NameError("'rep' not among ['identity','mlp']")
if self._type == 'uniform':
rep = uniform_attention(q, r)
elif self._type == 'laplace':
rep = laplace_attention(q, k, r, self._scale, self._normalise)
elif self._type == 'dot_product':
rep = dot_product_attention(q, k, r, self._normalise)
elif self._type == 'multihead':
rep = multihead_attention(q, k, r, self._num_heads)
else:
raise NameError(("'att_type' not among ['uniform','laplace','dot_product'"
",'multihead']"))
return rep | [
"ubuntu@ip-172-31-36-212.us-east-2.compute.internal"
] | ubuntu@ip-172-31-36-212.us-east-2.compute.internal |
e2fbbf0ef19bf424f658040ab456eacfcf5a351f | d8ff5977d69876cb21295892e80d4881e1172446 | /secure_info_lab12/venv/bin/easy_install-3.6 | 1f14356d80b53756fe026bf4ab2db7d3472b8838 | [] | no_license | OlehFliurkevych/secure_labs | f1ba571707aa7b263304ffad4b76507c3eabb280 | a2fc1d8172aff77ae7b41d3e6ab0aafba5868f56 | refs/heads/master | 2020-04-02T18:25:07.950881 | 2018-10-25T15:53:17 | 2018-10-25T15:53:17 | 154,697,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | 6 | #!/Users/admin/PycharmProjects/secure_infe_lab12/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.0.1','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.0.1', 'console_scripts', 'easy_install-3.6')()
)
| [
"39386806+Korb1t@users.noreply.github.com"
] | 39386806+Korb1t@users.noreply.github.com |
d4a4f7cad1ae98a307e8097d46ba07924f6a4adb | 1f85142263a08d2e20080f18756059f581d524df | /chromium_extension/branches/timeline/src/build/common.gypi | 764680d3d99285d67d4b0c6a767afa03d918f377 | [] | no_license | songlibo/page-speed | 60edce572136a4b35f4d939fd11cc4d3cfd04567 | 8776e0441abd3f061da969644a9db6655fe01855 | refs/heads/master | 2021-01-22T08:27:40.145133 | 2016-02-03T15:34:40 | 2016-02-03T15:34:40 | 43,261,473 | 0 | 0 | null | 2015-09-27T19:32:17 | 2015-09-27T19:32:17 | null | UTF-8 | Python | false | false | 1,260 | gypi | # Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
# Make sure we link statically so everything gets linked into a
# single shared object.
'library': 'static_library',
# The nacl toolchain fails to build valid nexes when we enable gc
# sections, at least on 64 bit builds. TODO: revisit this to see
# if a newer nacl toolchain supports it.
'no_gc_sections': 1,
# We're building a shared library, so everything needs to be built
# with Position-Independent Code.
'linux_fpic': 1,
},
'includes': [
'../third_party/libpagespeed/src/build/common.gypi',
],
# 'target_defaults': {
# 'include_dirs': [
# '<(DEPTH)/build/nacl_header_stubs',
# ],
# },
}
| [
"bmcquade@google.com"
] | bmcquade@google.com |
9cf2b167df432cc84eb98e9b71712b4ded02036b | 9c38f88e52afd3651b52ecb8bc9d36e4563a4454 | /pandaserver/taskbuffer/Initializer.py | bd985ae8b921a05bbcc8266112edca6ce20f6993 | [
"MIT"
] | permissive | eschanet/QMonit | 9f84feb1fcdf28ea1f21804f332d8ce80aafcd5a | 83f3323fa465b3ae41f9a49f28332bdb5e748685 | refs/heads/master | 2023-07-15T09:38:19.570964 | 2021-08-23T15:53:51 | 2021-08-23T15:53:51 | 318,832,692 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,904 | py | import sys
from threading import Lock
from pandaserver.config import panda_config
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
_logger = PandaLogger().getLogger('Initializer')
# initialize cx_Oracle using dummy connection to avoid "Unable to acquire Oracle environment handle"
class Initializer:
def __init__(self):
self.lock = Lock()
self.first = True
def init(self):
_logger.debug("init new=%s" % self.first)
# do nothing when nDBConnection is 0
if panda_config.nDBConnection == 0:
return True
# lock
self.lock.acquire()
if self.first:
self.first = False
try:
_logger.debug("connect")
# connect
if panda_config.backend == 'oracle':
import cx_Oracle
conn = cx_Oracle.connect(dsn=panda_config.dbhost,user=panda_config.dbuser,
password=panda_config.dbpasswd,threaded=True)
else:
import MySQLdb
conn = MySQLdb.connect(host=panda_config.dbhost, db=panda_config.dbname,
port=panda_config.dbport, connect_timeout=panda_config.dbtimeout,
user=panda_config.dbuser, passwd=panda_config.dbpasswd)
# close
conn.close()
_logger.debug("done")
except Exception:
self.lock.release()
type, value, traceBack = sys.exc_info()
_logger.error("connect : %s %s" % (type,value))
return False
# release
self.lock.release()
return True
# singleton
initializer = Initializer()
del Initializer
| [
"eric.schanet@cern.ch"
] | eric.schanet@cern.ch |
832ae6f79a74d95dff09d7d3c29ba88a946d521f | 28d8c101e01c0f0cc1eb9b227efdbe61650d7c35 | /ejemplos_en_clase/2._Administracion_de_procesos/intro_hilos.py | 208e4574e08e32dea4e2981f27f0dcdc947739ee | [
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0"
] | permissive | Emiliano312/sistop-2020-1 | b65831c0aee1982084a38ab5bfc6b874c4b602df | 603b2539afa720387508b855a3740e2f260bef4b | refs/heads/master | 2020-07-10T20:06:52.122098 | 2019-09-03T17:53:12 | 2019-09-03T17:53:12 | 204,358,183 | 0 | 0 | NOASSERTION | 2019-08-25T22:45:45 | 2019-08-25T22:45:44 | null | UTF-8 | Python | false | false | 442 | py | #!/usr/bin/python3
from threading import Thread, enumerate
from time import sleep
from random import randint
ultimo = 0
def un_hilo(yo):
global ultimo
while True:
print(' ' * yo, ultimo, yo)
ultimo = yo
sleep(randint(0,5))
hilos = []
for i in range(10):
hilo = Thread(target=un_hilo, args=[i], name="Hilo-%s"%i)
hilo.start()
hilos.append(hilo)
while True:
print(enumerate())
sleep(5)
| [
"gwolf@gwolf.org"
] | gwolf@gwolf.org |
49cd88876798c50a994c5caa747a95ebd98b165b | 41251f95aaa04bec71a2c087c3a3d86af98dc86f | /toolbox/reaction.py | c2f963f5acfd32da42a04219cfdb624747d65a90 | [
"MIT"
] | permissive | xiaoruiDong/RMG-tools | f73072ad3cbbb15e58ddb1be49c45d782e26cd1e | 95305caef3baeda8d86164e602e5775e3c64d5d7 | refs/heads/master | 2020-09-13T09:35:01.236416 | 2019-11-27T20:14:29 | 2019-11-27T20:14:29 | 222,728,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,960 | py | #!/usr/bin/env python3
"""
The toolbox for reaction related tasks
"""
import logging
import re
from copy import deepcopy
from rmgpy.molecule.molecule import Molecule
from rmgpy.species import Species
from rmgpy.kinetics.arrhenius import Arrhenius, MultiArrhenius, PDepArrhenius
from toolbox.species import add_spc_to_spc_dict
##################################################################
def parse_rxn_label(label, spc_dict, interactive=False, resonance=True):
"""
Convert reaction label to reactants, products and reversible
Args:
label (str): reaction label in RMG style
spc_dict (OrderedDict): an ordered dictionary has all species information
interactive (bool): if parse in an interactive way
resonance (bool): if generate resonance structure for reactants and product
"""
# Make a copy of species dictionary to avoid overwriting
spc_dict_backup = deepcopy(spc_dict)
# Check if reaction arrow contained
arrow = re.search(r'[<]?[=][>]', label).group()
if not arrow:
logging.error('The reaction label %s is not legal' %(label))
return
# Parse the reaction label into species labels
[left, right] = label.split(arrow)
if left and right:
reactant_labels = [label.strip() for label in left.split("+")]
product_labels = [label.strip() for label in right.split("+")]
else:
# Check if any side is empty
logging.error('The reaction label %s is not legal' % (label))
return
# Check if species is not parsable by species dictionary
for label in reactant_labels + product_labels:
# Modify the label or extend species dictionary if interactive mode
if label not in spc_dict_backup and interactive:
add_or_correct = 'wrong'
logging.info('Species label "%s" is not recoganizable.' %(label))
while add_or_correct.lower() not in 'add' \
and add_or_correct.lower() not in 'correct':
add_or_correct = input('Add a species (type "add") or correct the label (type "correct"):')
if add_or_correct.lower() in 'add':
# If successful return new label, nonetype otherwise
new_label = add_spc_to_spc_dict('', '', spc_dict_backup, interactive=True)
elif add_or_correct.lower() in 'correct':
new_label = input('Enter the correct species label:')
else:
new_label = None
if new_label in spc_dict_backup and label in reactant_labels:
reactant_labels[reactant_labels.index(label)] = new_label
elif new_label in spc_dict_backup and label in product_labels:
product_labels[product_labels.index(label)] = new_label
else:
add_or_correct = 'wrong'
logging.error('Invalid addition or correction.')
elif label not in spc_dict_backup:
logging.error("label %s is not in the species dict." % (label))
return
reactants = [spc_dict_backup[label] for label in reactant_labels]
products = [spc_dict_backup[label] for label in product_labels]
spc_dict.update(spc_dict_backup)
if resonance:
for reactant in reactants:
reactant.generate_resonance_structures()
for product in products:
product.generate_resonance_structures()
return reactants, products
def get_arrhenius_from_param(params, settings, arrh_type='Arrhenius'):
"""
Get Arrhenius object given params and settings
Args:
params (dict) : A dictionary contains the information about
A factor, n, Ea, T0 and multiplier
settings (dict): A dictionary contains the information about
variable units, T and P range, description
arrh_type (str): The type of Arrhenius object to be added,
supporting Arrhenius, MultiArrhenius, and
PdepArrehnius
Returns:
(Arrhenius object): The Arrhenius object generated
"""
args = {}
if 'Tmin' in settings and settings['Tmin']:
args['Tmin'] = (settings['Tmin'], settings['T_unit'])
if 'Tmax' in settings and settings['Tmax']:
args['Tmax'] = (settings['Tmax'], settings['T_unit'])
if 'Pmin' in settings and settings['Pmin']:
args['Pmin'] = (settings['Pmin'], settings['P_unit'])
if 'Pmax' in settings and settings['Pmax']:
args['Pmax'] = (settings['Pmax'], settings['P_unit'])
if 'comment' in settings and settings['comment']:
args['comment'] = settings['comment']
if arrh_type == 'Arrhenius':
args['A'] = (params['A'], settings['A_unit'])
args['n'] = params['n']
args['Ea'] = (params['Ea'], settings['E_unit'])
if 'T0' in params and params['T0']:
args['T0'] = (params['T0'], settings['T_unit'])
if 'uncertainty' in params and params['uncertainty']:
args['uncertainty'] = params['uncertainty']
return Arrhenius(**args)
elif arrh_type == 'MultiArrhenius':
args['arrhenius'] = params['arrhenius']
return MultiArrhenius(**args)
elif arrh_type == 'PdepArrhenius':
args['arrhenius'] = params['arrhenius']
args['pressures'] = (params['pressures'], settings['P_unit'])
if 'highPlimit' and params['highPlimit']:
args['highPlimit'] = params['highPlimit']
return PDepArrhenius(**args)
def get_kinetic_data(k_data, settings):
"""
Get Arrhenius object given params and settings
Args:
k_data (dict) : A dictionary contains the information about
A factor, n, Ea, T0 and multiplier
settings (dict): A dictionary contains the information about
variable units, T and P range, description
Returns:
data (RMG Kinetics): RMG kinetics data can be used to generate
rate coefficients or added to RMG Entry
"""
for k_type, params in k_data.items():
if not params['active']:
continue
if k_type == 'Arrhenius':
data = get_arrhenius_from_param(params, settings)
elif k_type == 'MultiArrhenius':
if len(params['A']) == len(params['n']) and \
len(params['A']) == len(params['Ea']):
mult_params = {'arrhenius': []}
for index in range(len(params['A'])):
arrh_params = {
'A': params['A'][index],
'n': params['n'][index],
'Ea': params['Ea'][index],
}
if 'T0' in params:
arrh_params['T0'] = params['T0']
mult_params['arrhenius'].append(
get_arrhenius_from_param(arrh_params, settings))
print(mult_params)
data = get_arrhenius_from_param(mult_params,
settings, arrh_type=k_type)
else:
logging.error('A, n and Ea does not have same length.')
return
elif k_type == 'PdepArrhenius':
if len(params['A']) == len(params['n']) and \
len(params['A']) == len(params['Ea']) and \
len(params['A']) == len(params['P']):
pdep_params = {'arrhenius': [],
'pressures': params['P'],
'highPlimit': None}
for index in range(len(params['A'])):
arrh_params = {
'A': params['A'][index],
'n': params['n'][index],
'Ea': params['Ea'][index],
}
if 'T0' in params:
arrh_params['T0'] = params['T0']
if isinstance(params['P'], str) and \
params['P'] == 'inf':
pdep_params['highPlimit'] = get_arrhenius_from_param(
arrh_params, settings)
pdep_params['pressures'].remove('inf')
else:
pdep_params['arrhenius'].append(
get_arrhenius_from_param(arrh_params, settings))
data = get_arrhenius_from_param(pdep_params,
settings, arrh_type=k_type)
else:
logging.error('Not support the kinetic type.')
return
if 'multiplier' in params.keys() and params['multiplier']:
data.change_rate(params['multiplier'])
break
else:
logging.error('No active kinetic parameter data.')
return
return data
| [
"xiaorui@mit.edu"
] | xiaorui@mit.edu |
499899b07cb558bc9dd599794ace8b8746cee9ba | 06c9edb02884ced68c62b5527d2be0e1a2e65bf1 | /9012.py | 3ce6c980e1d0a4c7ae29f246559b2957d47c7fc6 | [] | no_license | 0x232/BOJ | 3c5d3973b62036bfe9b761c88c822cf7fe909bce | 5f135ac51b1c304eff4630798fb5c516b666a5c6 | refs/heads/master | 2021-07-03T02:56:00.132987 | 2020-10-31T02:18:19 | 2020-10-31T02:18:19 | 191,161,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | n = int(input())
for _ in range(n):
paren = input()
counter = 0
answer = True
for p in paren:
if counter < 0:
answer = False
break
if p == '(':
counter += 1
if p == ')':
counter -= 1
if counter != 0:
answer = False
if answer:
print('YES')
else:
print('NO')
| [
"51640066+0x232@users.noreply.github.com"
] | 51640066+0x232@users.noreply.github.com |
6f0ed4a9c23a128f716f922b6b99cba603ce1e7f | 42d0a78861a904cbb4397a495855de40d7f069ad | /client/src/characterControl/Camera.py | a6f1b9fff05b0567f92c489eee03e39e4f11af27 | [] | no_license | sythaeryn/lostfrontier | 35e1555dc61dc444fadb4dc24448dc9b566d21a7 | a10b52a015fcfe68018dadb2e3cab31d684da89d | refs/heads/master | 2016-09-10T11:07:34.366957 | 2009-08-27T15:38:11 | 2009-08-27T15:38:11 | 37,406,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | from direct.showbase.DirectObject import DirectObject
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
class Camera(DirectObject):
objSeguir = None
def __init__(self,obj):
base.disableMouse()
lente = base.cam.node().getLens()
lente.setFov(40)
base.cam.node().setLens(lente)
self.objSeguir = obj
base.camera.reparentTo(self.objSeguir)
def look(self,obj):
base.camera.lookAt(obj)
def set_cam_pos(self,x,y,z,vel = 0.1):
cam = base.camera
cammov = base.camera.posInterval(vel,Point3(x,y,z), startPos=Point3(cam.getX(),cam.getY(),cam.getZ()))
camInt = Sequence(cammov, name = "camInt")
camInt.start()
#base.camera.setPos(x,y,z)
def cam_follow(self,dist,hei,sid = 0.0):
base.camera.setPos( sid, dist, hei)
def alt_cam_h(self):
base.camera.setH(base.camera.getH() + 1)
| [
"joao@setequatro.com.br"
] | joao@setequatro.com.br |
3c80081c260d7cc613091294c57a8e8f35a4dd0c | bbbd25309c327ae774ddd10a92c043a88be27665 | /game/main.py | 9e75d05ee2dd4521b8c08cf2076dc96f93f9f76c | [] | no_license | Max-Joshua/Herramientas_Computacionales | b2298039f585b6d0f55ff9dbecd18e0846ab5803 | c275b056cf7cc25a3fbb9743ada7b3bc3ef39d5e | refs/heads/main | 2023-01-07T06:18:32.760318 | 2020-10-30T17:14:28 | 2020-10-30T17:14:28 | 308,102,773 | 0 | 1 | null | 2020-10-30T15:42:39 | 2020-10-28T18:12:00 | Python | UTF-8 | Python | false | false | 21,016 | py | """
thieves' hole redeem is a plataformer game, where magic and bad guys collide!
Have fun going through different dungeos to save your loved one.
thieves' hole redeem by Alejandro Fernandez del Valle Herrera, Joshua Rubén Amaya Camilo and José Emilio Derbez Safie
is licensed under CC BY 4.0. To view a copy of this license, visit https://creativecommons.org/licenses/by/4.0
"""
import pyglet, os, json, random
# /$$ /$$ /$$ /$$ /$$
# | $$ | $$ |__/ | $$ | $$
# | $$ | $$ /$$$$$$ /$$$$$$ /$$ /$$$$$$ | $$$$$$$ | $$ /$$$$$$ /$$$$$$$
# | $$ / $$/|____ $$ /$$__ $$| $$ |____ $$| $$__ $$| $$ /$$__ $$ /$$_____/
# \ $$ $$/ /$$$$$$$| $$ \__/| $$ /$$$$$$$| $$ \ $$| $$| $$$$$$$$| $$$$$$
# \ $$$/ /$$__ $$| $$ | $$ /$$__ $$| $$ | $$| $$| $$_____/ \____ $$
# \ $/ | $$$$$$$| $$ | $$| $$$$$$$| $$$$$$$/| $$| $$$$$$$ /$$$$$$$/
# \_/ \_______/|__/ |__/ \_______/|_______/ |__/ \_______/|_______/
width = 1500
height = 800
mana = 5
window = pyglet.window.Window(1500, 800, resizable=False, vsync=True)
drawGroup0 = pyglet.graphics.OrderedGroup(0)
drawGroup1 = pyglet.graphics.OrderedGroup(1)
drawGroup2 = pyglet.graphics.OrderedGroup(2)
mainBatch = pyglet.graphics.Batch()
bkgBatch = pyglet.graphics.Batch()
bkgBatch1 = pyglet.graphics.Batch()
projectileBatch = pyglet.graphics.Batch()
objects = []
projectiles = []
plataforms = []
def gameStarter():
'''
Creates al the fundamental things in the level.
Checks JSON file, to create and load the current level.
'''
print("starting game!")
global objects, plataforms, drawGroup0,drawGroup1,drawGroup2, mainBatch, bkgBatch, bkgBatch1, mana
# reset pyglet batches for optimazation
drawGroup0 = pyglet.graphics.OrderedGroup(0)
drawGroup1 = pyglet.graphics.OrderedGroup(1)
drawGroup2 = pyglet.graphics.OrderedGroup(2)
mainBatch = pyglet.graphics.Batch()
bkgBatch = pyglet.graphics.Batch()
bkgBatch1 = pyglet.graphics.Batch()
# reset game objects
objects = []
plataforms = []
mana = 5
# load JSON file
with open('game/information.json') as file:
info = json.load(file)
objects.append(object(images.imgDict['Protagonist'], mainBatch, info['player']['x'], info['player']['y'])) # create player
# create enemies
for enemy in info['levels'][info['player']['currentlvl']]['enemies']:
if enemy['type'] == 0: # nice
objects.append(object(images.imgDict['Enemie0'], mainBatch, enemy['x'], enemy['y'], enemy["HP"]))
elif enemy['type'] == 1:
objects.append(object(images.imgDict['Enemie1'], mainBatch, enemy['x'], enemy['y'], enemy["HP"]))
# create plataforms
for plataform in info['levels'][info['player']['currentlvl']]['objects']:
plataforms.append(staticObject(plataform['x'], plataform['y'], plataform['width'], plataform['height'], images.imgDict['Platform'], images.imgDict['Platform_rocks']))
# start monser AI
for monster in objects[1:]:
monster.moveMonster()
# /$$$$$$
# |_ $$_/
# | $$ /$$$$$$/$$$$ /$$$$$$ /$$$$$$ /$$$$$$
# | $$ | $$_ $$_ $$ |____ $$ /$$__ $$ /$$__ $$
# | $$ | $$ \ $$ \ $$ /$$$$$$$| $$ \ $$| $$$$$$$$
# | $$ | $$ | $$ | $$ /$$__ $$| $$ | $$| $$_____/
# /$$$$$$| $$ | $$ | $$| $$$$$$$| $$$$$$$| $$$$$$$
# |______/|__/ |__/ |__/ \_______/ \____ $$ \_______/
# /$$ \ $$
# | $$$$$$/
# \______/
class imagesLoader:
def __init__(self):
"""Cretes a image dictionary ready to be used in pyglet.
ONLY .png FILES ARE SUPPORTED"""
print('loading images')
self.imgDict = {}
files = []
for root, dirs, foundFiles in os.walk("game/img", topdown=False):
for name in foundFiles:
if name.endswith('.png'):
files.append(name)
for file in files:
self.imgDict[file[:-4]] = pyglet.image.load('game/img/' + file)
print('images loaded')
pyglet.options['audio'] = ('openal', 'pulse', 'directsound', 'silent')
class soundLoader:
soundDict = {}
streamDict = {}
def __init__(self):
"""Cretes a sound dictionary ready to be used in pyglet.
ONLY .wav FILES ARE SUPPORTED"""
print('loading sound')
self.soundDict = {}
files = []
for root, dirs, foundFiles in os.walk("game/snd", topdown=False):
for name in foundFiles:
if name.endswith('.wav'):
files.append(name)
for file in files:
self.soundDict[file[:-4]] = pyglet.media.load('game/snd/' + file, streaming=False)
files = []
for root, dirs, foundFiles in os.walk("game/stream", topdown=False):
for name in foundFiles:
if name.endswith('.wav'):
files.append(name)
for file in files:
self.streamDict[file[:-4]] = pyglet.media.load('game/stream/' + file, streaming=True)
print('sound loaded')
def playLoopSound(self, sndToPlay):
self.player = pyglet.media.Player()
self.player.queue(sndToPlay)
self.player.loop = True
self.player.play()
# /$$$$$$ /$$ /$$
# /$$__ $$| $$ | $$
# | $$ \__/| $$$$$$$ /$$$$$$ /$$$$$$ /$$$$$$ /$$$$$$$ /$$$$$$ /$$$$$$ /$$$$$$ /$$$$$$$
# | $$ | $$__ $$ |____ $$ /$$__ $$|____ $$ /$$_____/|_ $$_/ /$$__ $$ /$$__ $$ /$$_____/
# | $$ | $$ \ $$ /$$$$$$$| $$ \__/ /$$$$$$$| $$ | $$ | $$$$$$$$| $$ \__/| $$$$$$
# | $$ $$| $$ | $$ /$$__ $$| $$ /$$__ $$| $$ | $$ /$$| $$_____/| $$ \____ $$
# | $$$$$$/| $$ | $$| $$$$$$$| $$ | $$$$$$$| $$$$$$$ | $$$$/| $$$$$$$| $$ /$$$$$$$/
# \______/ |__/ |__/ \_______/|__/ \_______/ \_______/ \___/ \_______/|__/ |_______/
class object:
x = 0
y = 0
xVel = 0
yVel = 0
xAxcel = 0
yAxcel = 0
drag = 10
gravityActive = True
canJump = False
width = 0
height = 0
HP = 100
def __init__( self, spriteImg, batch, x : float = 0, y : float = 0, HP : int = 4):
"""
creates and mantains a desired character, with the spriteImg as the sprite, it creates the sprite inside.
x and y coordinates are used for placement in world, and HP is for hitpoints
"""
self.x = x
self.y = y
self.sprite = pyglet.sprite.Sprite(img = spriteImg, batch = batch)
self.HP = HP
self.width = self.sprite.width
self.height = self.sprite.height
def calculateNextPoint(self):
"""
Does the math for all the necesary gravity and more calculations
"""
# calculate velocity
self.xVel += self.xAxcel * physiscDeltaTime - self.xVel / self.drag
self.yVel += self.yAxcel * physiscDeltaTime - self.yVel / self.drag
# calculate position based on previios pos and current vel
self.x += self.xVel * physiscDeltaTime
self.y += self.yVel * physiscDeltaTime
# make the player look the way it should
if self.xVel > 0:
look = 1
offset = 0
else:
look = -1
offset = self.sprite.width
# make the player be inside colliders
self.sprite.x = self.x + offset
self.sprite.y = self.y
self.sprite.update(scale_x = look)
def moveMonster(self, dx = None):
"""
simple monster AI, reapeats itself untill it dies
"""
self.xVel = random.randint(-500,500)
self.yVel = random.randint(100,800)
pyglet.clock.schedule_once(self.moveMonster, random.randint(2,10))
def checkCollisions(self):
"""
make sure the player does not fall of the world!
"""
activateGravity = False # deactivates gravity if is within boundaries
# check if inside world
# on X
if self.x < 0:
self.x = 0
self.xVel = 0
self.xAxcel = 0
elif self.x > width - self.sprite.width:
self.x = width - self.sprite.width
self.xVel = 0
self.xAxcel = 0
# on Y
if self.y < 0:
self.y = 0
self.yVel = 0
self.yAxcel = 0
activateGravity = True
elif self.y <= 0:
activateGravity = True
elif self.y > height - self.sprite.height:
self.y = height - self.sprite.height
self.yVel = 0
self.yAxcel = 0
# check if colliding with plataform, xirst on X coordinates, then on Y
for plataform in plataforms:
if plataform.x - self.sprite.width < self.x < plataform.x + plataform.width:
if plataform.y - self.sprite.height < self.y < plataform.y + plataform.height:
# chech best way to throw the player to
offsetRenderer = 5
# throw player up or down if withing boundaries
if plataform.x - self.sprite.width + offsetRenderer <= self.x <= plataform.x + plataform.width - 20:
if plataform.y + plataform.height - 2 > self.y > plataform.y:
self.y = plataform.y + plataform.height - 1
self.yAxcel = 0
self.yVel = 0
activateGravity = True
elif self.y < plataform.y:
self.y = plataform.y - self.sprite.height
self.yAxcel = 0
self.yVel = 0
if plataform.y + plataform.height >= self.y > plataform.y:
activateGravity = True
# throw player left or right if withing boundaries
if plataform.y - self.sprite.height + offsetRenderer <= self.y <= plataform.y + plataform.height - offsetRenderer:
activateGravity = True
if self.x + self.sprite.width / 2 < plataform.x + plataform.width / 2:
self.x -= offsetRenderer - 1
self.xVel = 0
self.xAxcel = 0
else:
self.x += offsetRenderer - 1
self.xVel = 0
self.xAxcel = 0
if activateGravity: # if the gravioty is deactivated or not
self.canJump = True
self.gravityActive = False
else:
self.gravityActive = True
def checkCollisionWithList(self, list, action):
"""
check specified collisions and take action if colided
"""
for object in list:
if object.x - self.sprite.width < self.x < object.x + object.width:
if object.y - self.sprite.height < self.y < object.y + object.height:
action()
def removeHitpoints(self, amount):
"""
remove amount of HP, if HP is 0, destroy self, with sprite
"""
self.HP -= amount
audio.soundDict['hit'].play()
if self.HP == 0:
self.destroy()
def destroy(self):
"""
destroy handler, makes sure all objects are eliminated
"""
global mana
self.sprite.delete()
objects.pop(objects.index(self))
mana = 5
audio.soundDict['die'].play()
# /$$$$$$$ /$$ /$$ /$$
# | $$__ $$ | $$ |__/| $$
# | $$ \ $$ /$$$$$$ /$$$$$$ /$$ /$$$$$$ /$$$$$$$ /$$$$$$ /$$| $$ /$$$$$$
# | $$$$$$$//$$__ $$ /$$__ $$|__/ /$$__ $$ /$$_____/|_ $$_/ | $$| $$ /$$__ $$
# | $$____/| $$ \__/| $$ \ $$ /$$| $$$$$$$$| $$ | $$ | $$| $$| $$$$$$$$
# | $$ | $$ | $$ | $$| $$| $$_____/| $$ | $$ /$$| $$| $$| $$_____/
# | $$ | $$ | $$$$$$/| $$| $$$$$$$| $$$$$$$ | $$$$/| $$| $$| $$$$$$$
# |__/ |__/ \______/ | $$ \_______/ \_______/ \___/ |__/|__/ \_______/
# /$$ | $$
# | $$$$$$/
# \______/
class bullet:
x = 0
y = 0
xVel = 500
yVel = 0
sprite = None
def __init__(self, img, player : object, batch, looking: int):
"""
starts the projectile in player position.
looking has to be 1 or -1 to give direction
"""
self.sprite = pyglet.sprite.Sprite(img = img, batch = batch)
self.width = self.sprite.width
self.height = self.sprite.height
self.x = player.x + player.width / 2
self.y = player.y + player.height / 2
self.xVel *= looking
self.sprite.x = self.x
self.sprite.y = self.y
def calculateCollisionsandNext(self, listToDestroy, otherList):
# same as object's collision
global physiscDeltaTime
self.x += self.xVel * physiscDeltaTime
self.sprite.x = self.x
for object in listToDestroy:
if object.x - self.sprite.width < self.x < object.x + object.width:
if object.y - self.sprite.height < self.y < object.y + object.height:
object.removeHitpoints(1)
self.destroy()
break
else:
for object in otherList:
if object.x - self.sprite.width < self.x < object.x + object.width:
if object.y - self.sprite.height < self.y < object.y + object.height:
self.destroy()
break
else:
if not (0 < self.x < width):
self.destroy()
def destroy(self):
# same as object's destroy
self.sprite.delete()
projectiles.pop(projectiles.index(self))
def killPlayer():
objects[0].HP -= 1
objects[0].xVel *= -5
objects[0].yVel *= -5
audio.soundDict['hit'].play()
if objects[0].HP < 0:
audio.soundDict['die'].play()
gameStarter()
# /$$$$$$$ /$$ /$$ /$$$$$$
# | $$__ $$| $$ | $$ /$$__ $$
# | $$ \ $$| $$ /$$$$$$ /$$$$$$ /$$$$$$ | $$ \__//$$$$$$ /$$$$$$ /$$$$$$/$$$$
# | $$$$$$$/| $$ |____ $$|_ $$_/ |____ $$| $$$$ /$$__ $$ /$$__ $$| $$_ $$_ $$
# | $$____/ | $$ /$$$$$$$ | $$ /$$$$$$$| $$_/ | $$ \ $$| $$ \__/| $$ \ $$ \ $$
# | $$ | $$ /$$__ $$ | $$ /$$ /$$__ $$| $$ | $$ | $$| $$ | $$ | $$ | $$
# | $$ | $$| $$$$$$$ | $$$$/| $$$$$$$| $$ | $$$$$$/| $$ | $$ | $$ | $$
# |__/ |__/ \_______/ \___/ \_______/|__/ \______/ |__/ |__/ |__/ |__/
class staticObject:
x = 0
y = 0
width = 0
height = 0
def __init__(self, x, y, width, height, spriteImg, secondarySpriteImg):
self.x = x
self.y = y
self.width = width
self.height = height
self.sprites = [pyglet.sprite.Sprite(img = spriteImg, batch = bkgBatch)]
self.bkgSprites = []
scale = self.width / (self.width // self.sprites[0].width + 1) / self.sprites[0].width
self.sprites[0].update(x=self.x, y=self.y + self.height - self.sprites[0].height,scale_x=scale)
for i in range(1, self.width // self.sprites[0].width + 1):
self.sprites.append(pyglet.sprite.Sprite(img = spriteImg, batch = bkgBatch))
self.sprites[i].update(x=self.x + scale * self.sprites[0].width * i, y=self.y + self.height - self.sprites[0].height, scale_x = scale)
if self.height // self.sprites[0].height > 0:
for Y in range(0, self.height // self.sprites[0].height):
for i in range(0, self.width // self.sprites[0].width + 1):
self.bkgSprites.append(pyglet.sprite.Sprite(img = secondarySpriteImg, batch = bkgBatch1, group = drawGroup2))
self.bkgSprites[i + Y * (self.height // self.sprites[0].height) ].update(x = self.x + scale * self.sprites[0].width * i, y = self.y - 5 , scale_x = scale)
# /$$$$$$$$ /$$
# | $$_____/ | $$
# | $$ /$$ /$$ /$$$$$$ /$$$$$$$ /$$$$$$ /$$$$$$$
# | $$$$$| $$ /$$//$$__ $$| $$__ $$|_ $$_/ /$$_____/
# | $$__/ \ $$/$$/| $$$$$$$$| $$ \ $$ | $$ | $$$$$$
# | $$ \ $$$/ | $$_____/| $$ | $$ | $$ /$$\____ $$
# | $$$$$$$$\ $/ | $$$$$$$| $$ | $$ | $$$$//$$$$$$$/
# |________/ \_/ \_______/|__/ |__/ \___/ |_______/
@window.event
def on_draw():
# do all the draw procidures, from clearing all, to drawing all the objects
global bkgBatch, mainBatch
window.clear()
for i in range(objects[0].HP + 1): # UwU
hearts[i].draw()
for i in range(mana):
manaSprites[i].draw()
bkgBatch1.draw()
bkgBatch.draw()
mainBatch.draw()
projectileBatch.draw()
moveForce = 1000
@window.event
def on_key_press(symbol, modifiers):
# event handlet
global mana
if symbol == 97: # if pressed the A key
objects[0].xAxcel -= moveForce
if symbol == 119: # if pressed the W key
if objects[0].canJump:
objects[0].yAxcel = 2000
objects[0].yVel = 500
objects[0].canJump = False
audio.soundDict['jump'].play()
if symbol == 100: # if pressed the D key
objects[0].xAxcel += moveForce
if symbol == 115: # if pressed the S key
if mana > 0:
projectiles.append(bullet(images.imgDict['Fire_Ball'], objects[0], projectileBatch, 1 if objects[0].xAxcel >= 0 else -1))
audio.soundDict['fire'].play()
mana -= 1
@window.event
def on_key_release(symbol, modifiers):
# make sure player not keep walking right
if symbol == 119: # if pressed the W key
if objects[0].yAxcel > 0:
objects[0].yAxcel = 0
if symbol == 97: # if pressed the A key
objects[0].xAxcel = 0
if symbol == 100: # if pressed the D key
objects[0].xAxcel = 0
gravity = 100
maxGravity = -10000
physiscDeltaTime = 0.02
def runPhysics(dx):
"""
Repitable action for physics jumps
"""
objects[0].checkCollisionWithList(objects[1:], killPlayer)
for object in projectiles:
object.calculateCollisionsandNext(objects[1:], plataforms)
for object in objects:
if object.gravityActive:
if object.yAxcel > maxGravity:
object.yAxcel -= gravity
object.checkCollisions()
object.calculateNextPoint()
# /$$$$$$ /$$ /$$
# /$$__ $$ | $$ | $$
# | $$ \__//$$$$$$ /$$$$$$ /$$$$$$ /$$$$$$
# | $$$$$$|_ $$_/ |____ $$ /$$__ $$|_ $$_/
# \____ $$ | $$ /$$$$$$$| $$ \__/ | $$
# /$$ \ $$ | $$ /$$ /$$__ $$| $$ | $$ /$$
# | $$$$$$/ | $$$$/| $$$$$$$| $$ | $$$$/
# \______/ \___/ \_______/|__/ \___/
# procidures to start game itself
images = imagesLoader()
audio = soundLoader()
audio.playLoopSound(audio.streamDict['song'])
hearts = [pyglet.sprite.Sprite(img = images.imgDict['Heart']), pyglet.sprite.Sprite(img = images.imgDict['Heart']), pyglet.sprite.Sprite(img = images.imgDict['Heart']), pyglet.sprite.Sprite(img = images.imgDict['Heart']), pyglet.sprite.Sprite(img = images.imgDict['Heart'])]
for i in range(len(hearts)):
hearts[i].update(x= 20 * i + 25, y=height - 50, scale = 3)
manaSprites = [pyglet.sprite.Sprite(img = images.imgDict['ManaPoints']), pyglet.sprite.Sprite(img = images.imgDict['ManaPoints']), pyglet.sprite.Sprite(img = images.imgDict['ManaPoints']), pyglet.sprite.Sprite(img = images.imgDict['ManaPoints']), pyglet.sprite.Sprite(img = images.imgDict['ManaPoints'])]
for i in range(len(hearts)):
manaSprites[i].update(x= 20 * i + 25, y=height - 100, scale = 1)
gameStarter()
pyglet.clock.schedule_interval(runPhysics, physiscDeltaTime)
pyglet.app.run() | [
"alexplays003@gmail.com"
] | alexplays003@gmail.com |
a2d6e90b036d44adf506a3ed0f934486170ee325 | d47da5253b853fbe15f06f5f23e43e4786d7deb2 | /code/ACER/main.py | 4a6cf2e170b19f2d98a7f0bc5b5291cae50b77b2 | [] | no_license | htpauleta/RL-Algorithms | 1189167e5dd2cadc649c53df6470f626e7c2f7d3 | 483c99cea2c4d9c0d053ce5cf9c2279a0d7c4a4e | refs/heads/master | 2020-09-13T16:32:21.564621 | 2019-11-14T11:58:37 | 2019-11-14T11:58:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,140 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2018/5/23 下午1:19
# @Author : Yang Yuchi
import argparse
import threading
import multiprocessing as mp
import tensorflow as tf
import gym
from network import ActorCriticNet, Agent
from utils import Counter
from test import test
# Hyper parameters
parser = argparse.ArgumentParser(description='Actor Critic with Experience Replay')
parser.add_argument('--seed', type=int, default=11, help='Random seed')
parser.add_argument('--num-agents', type=int, default=mp.cpu_count(), metavar='N', help='Number of training agents')
parser.add_argument('--max-training-steps', type=int, default=500000, metavar='STEPS', help='Maximum training steps')
parser.add_argument('--t-max', type=int, default=100, metavar='STEPS',
help='Max number of forward steps for on-policy learning before update')
parser.add_argument('--max-episode-length', type=int, default=500, metavar='LENGTH', help='Maximum episode length')
parser.add_argument('--replay-start', type=int, default=100, metavar='EPISODES',
help='Number of transitions to save before starting off-policy training')
parser.add_argument('--batch-size', type=int, default=100, metavar='SIZE', help='Off-policy batch size')
parser.add_argument('--num-hidden', type=int, default=40, metavar='SIZE', help='Number of hidden neurons')
parser.add_argument('--memory-capacity', type=int, default=20000, metavar='CAPACITY',
help='Experience replay memory capacity')
parser.add_argument('--max-stored-episode-length', type=int, default=10000, metavar='LENGTH',
help='Maximum length of a stored episode')
parser.add_argument('--replay-ratio', type=int, default=4, metavar='r', help='Ratio of off-policy to on-policy updates')
parser.add_argument('--gamma', type=float, default=0.99, metavar='γ', help='RL discount factor')
parser.add_argument('-c', type=float, default=10, metavar='c', help='Importance weight truncation value')
parser.add_argument('--trust-region', action='store_true', help='Use trust region')
parser.add_argument('--alpha', type=float, default=0.99, metavar='α', help='Average policy decay rate')
parser.add_argument('--delta', type=float, default=1, metavar='δ', help='Trust region threshold value')
parser.add_argument('--learning-rate', type=float, default=0.001, metavar='η', help='Learning rate')
parser.add_argument('--env', type=str, default='CartPole-v1', help='environment name')
parser.add_argument('--evaluation-interval', type=int, default=500, metavar='STEPS',
help='Number of training steps between evaluations (roughly)')
parser.add_argument('--evaluation-episodes', type=int, default=10, metavar='N',
help='Number of evaluation episodes to average over')
parser.add_argument('--render', action='store_true', help='Render evaluation agent')
parser.add_argument('--output-graph', action='store_true', help='Output Tensor board graph')
args = parser.parse_args()
gym.logger.set_level(gym.logger.ERROR) # Disable Gym warnings
counter = Counter()
tf.set_random_seed(args.seed)
sess = tf.Session()
with tf.device('/cpu:0'):
global_net = ActorCriticNet(args, 'global_net', sess)
average_net = ActorCriticNet(args, 'average_net', sess)
test_net = ActorCriticNet(args, 'test', sess)
agents = []
for i in range(args.num_agents):
agent_name = 'Agent_%i' % i
agents.append(Agent(args, i, agent_name, global_net, average_net, sess))
if args.output_graph:
tf.summary.FileWriter("logs/", sess.graph)
coord = tf.train.Coordinator()
sess.run(tf.global_variables_initializer())
# initialize average network weights to global network weights
sess.run([tf.assign(a_p, g_p) for a_p, g_p in zip(average_net.a_params, global_net.a_params)])
def job():
agent.acer_main(counter, coord, average_net)
processes = []
p = threading.Thread(target=test, args=(args, counter, test_net, global_net, sess))
p.start()
processes.append(p)
for agent in agents:
task = threading.Thread(target=job)
task.start()
processes.append(task)
coord.join(processes)
| [
"noreply@github.com"
] | htpauleta.noreply@github.com |
63c822733375cb75b5e2b4c7c27706ad5ff89be8 | 2d244496960c8bb2bfff001ea70df4a47e532241 | /products/migrations/0011_auto_20151123_1231.py | 274a1b134441eccde0bb5b21fc448db27d861942 | [] | no_license | brajeshvit/iCharity | 73f1fb0dd6743d34aa481411e20c62019dd87cef | f7de759da19d8902020efff46fb4f5a115f09130 | refs/heads/master | 2016-08-11T20:04:27.969963 | 2015-11-24T08:14:26 | 2015-11-24T08:14:26 | 46,608,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0010_auto_20151123_1226'),
]
operations = [
migrations.CreateModel(
name='Variation1',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=120)),
('active', models.BooleanField(default=True)),
('inventory', models.IntegerField(null=True, blank=True)),
('product', models.ForeignKey(to='products.Product')),
],
),
migrations.RemoveField(
model_name='variation',
name='product',
),
migrations.DeleteModel(
name='Variation',
),
]
| [
"richavit1995@gmail.com"
] | richavit1995@gmail.com |
4ff7442917eee0fc06eb777314963d47e465335b | 8e1e8e65720b525c843fe11a45bbacff1df1ec4f | /translation_client/management/__init__.py | 659f7ba41b6a1db78e7dc214e769006ac1ed1eb6 | [
"MIT"
] | permissive | gdelnegro/django-translation-client | 43a3b9d87d6e61d1dbd5e1285651d788cad03e0a | be8ad3f68cf43f77cae3d99254876b394e5ecd55 | refs/heads/master | 2021-01-12T15:16:46.600464 | 2017-10-13T07:53:15 | 2017-10-13T07:53:15 | 71,740,257 | 0 | 1 | null | 2017-10-13T07:53:16 | 2016-10-24T01:12:52 | Python | UTF-8 | Python | false | false | 96 | py | # -*- coding: utf-8 -*-
# Created by Gustavo Del Negro <gustavodelnegro@gmail.com> on 10/23/16.
| [
"gustavodelnegro@gmail.com"
] | gustavodelnegro@gmail.com |
84e9d0b869c9b47ae05f7b7b29b416b67681bec8 | 2c2c8d82d3a5ee78a4e4bd4ed501fc31203204ab | /check_equal.py | 4c45d0c4357c536989e2abb5f53e0da703d920a1 | [] | no_license | KumarSanskar/Python-Programs | 3341e47f43f7c96ac7ef9cab5e9baedb4d40a684 | a33cde03b19b36b1c447f78aa2f30f2593d38708 | refs/heads/master | 2021-01-04T13:31:39.509503 | 2020-11-18T04:25:39 | 2020-11-18T04:25:39 | 240,574,672 | 3 | 2 | null | 2020-10-18T10:10:07 | 2020-02-14T18:33:32 | Python | UTF-8 | Python | false | false | 378 | py | # Programs to check two no. are equal or not
n = int(input("Enter first no "))
p = int(input("Enter second no "))
if n == p: # using equality operator and if else
print("They are equal")
else:
print("They are unequal")
print()
# Using ternary if else structure
# This will be printed at second time
print(" They are equal" if n == p else "They are unequal")
| [
"noreply@github.com"
] | KumarSanskar.noreply@github.com |
7f11c1f7db6501aaf310c7744ab87a66e8414e63 | a08daa76d8fb7368ba4abc10dab7d6afcf7b61dd | /App/migrations/0001_initial.py | 965626ff3cb0594bae62d4cfc58d96ba3be38106 | [] | no_license | farshadkalathingal/beardeo-django | 74d7601f5067f302a4e34079c7b0a4fb19c112df | 2f3e4742072c4b6b3773b4e984859f3604e1f009 | refs/heads/main | 2023-01-03T09:23:08.935404 | 2020-10-30T05:19:36 | 2020-10-30T05:19:36 | 308,530,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | # Generated by Django 3.1.2 on 2020-10-29 07:52
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContactDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=150)),
('message', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('slug', models.SlugField(blank=True, editable=False, null=True)),
],
),
]
| [
"farshadkalathingal@gmail.com"
] | farshadkalathingal@gmail.com |
184e3bd29a8ed96678a85c84d7c640909df9152d | 1a3413c32d8384fb781bd0945a7d4e68be796407 | /Netconf-Agents/EnodebSimSetup.py | cfa5bf97f7c93250c0cc9138d9327a333f530df2 | [
"Apache-2.0"
] | permissive | Laymer/ran-sim | 795e62cf1bc40b60321a54bb38c0800e44512be7 | 68b77cf98c81986221fa11e18529e1d448a00c1e | refs/heads/master | 2020-06-19T08:47:14.693615 | 2019-07-08T11:18:31 | 2019-07-08T11:18:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,616 | py | #!/bin/python
import sys
import shutil
import os
import signal
import stat
import time
import subprocess
import fileinput
import re
number_of_arguments = len(sys.argv)
if number_of_arguments == 3:
print "Staring..."
else:
print 'Missing arguement NoOfInstances'
print 'Usage:', sys.argv[0] ,' NoOfInstances [start/stop]'
sys.exit(1)
NoOfInstances = int(sys.argv[1])
actionStr = sys.argv[2]
if NoOfInstances > 2000 or NoOfInstances < 5:
print 'Invalid value for NoOfInstances ', NoOfInstances
print 'Usage: ',sys.argv[0] ,' NoOfInstances'
sys.exit(1)
if actionStr == "start" or actionStr == "stop" :
print '...'
else:
print 'Invalid value for 2nd arguement [start/stop]'
print 'Usage:', sys.argv[0] ,' NoOfInstances [start/stop]'
sys.exit(1)
INSTANCE_INDEX=50001
TCP_INSTANCE_INDEX=60001
COUNTER=0
while (COUNTER < NoOfInstances):
COUNTER = COUNTER + 1
INSTANCE = str(INSTANCE_INDEX)
TCP_INSTANCE = str(TCP_INSTANCE_INDEX)
if actionStr == "start" :
print 'Preparing instance ', INSTANCE
try:
shutil.copytree('hc_50000', 'hc_'+INSTANCE ,symlinks=False, ignore=None )
except Exception:
sys.exc_clear()
os.chdir('hc_'+INSTANCE)
os.getcwd()
print "Current working dir : %s" % os.getcwd()
for line in fileinput.FileInput("config/netconf.json", inplace=1):
line = line.replace("50000",INSTANCE)
line = line.replace("60000", TCP_INSTANCE)
print line
for line in fileinput.FileInput("ransim.properties", inplace=1):
line = line.replace("50000",INSTANCE)
print line
print 'Starting instance ', INSTANCE
subprocess.Popen(["nohup", "java", "-Xms128m", "-Xmx1024m", "-jar", "./enodebsim-distribution-1.18.10.jar", INSTANCE]);
os.chdir("../")
if actionStr == "stop" :
print 'Stopping and removing instance ', INSTANCE
pidFindCmd="ps -eaf | grep java | grep enodebsim-distribution-1.18.10.jar | grep -v grep | grep " + INSTANCE + " | awk -F' ' '{ print $2 }'";
print 'Finding process ', pidFindCmd
#try:
pidToKill=0
pidToKill=subprocess.check_output([pidFindCmd], shell=True)
print 'pidFindCmd ', pidFindCmd, ' pidToKill ', pidToKill, type(pidToKill)
if isinstance(pidToKill, str):
print 'Killing instance ', INSTANCE, ' pid is ', str(pidToKill)
os.kill(int(pidToKill), signal.SIGKILL)
if isinstance(pidToKill, list):
print 'Killing multiple processes'
for pid in pidToKill:
print 'Killing instance with pid is ', str(pid)
os.kill(int(pid), signal.SIGKILL)
#except Exception:
# print 'Error in finding pid'
# sys.exc_clear()
try:
shutil.rmtree('./hc_'+INSTANCE)
except Exception:
print 'Error in removing folder'
sys.exc_clear()
INSTANCE_INDEX = INSTANCE_INDEX + 1
TCP_INSTANCE_INDEX = TCP_INSTANCE_INDEX + 1
if actionStr == "stop" :
try:
pidToKill=subprocess.check_output([pidFindCmd])
if isinstance(pidToKill, int) and pidToKill > 100 :
print 'Killing an instance with pid is ', str(pidToKill)
os.kill(int(pidToKill), signal.SIGKILL)
if isinstance(pidToKill, list):
for pid in pidToKill:
print 'Killing instance with pid is ', str(pid)
os.kill(int(pid), signal.SIGKILL)
except Exception:
sys.exc_clear()
| [
"ramesh.kumar50@wipro.com"
] | ramesh.kumar50@wipro.com |
64c8d5a9d3265d54252822fc85c11d1822807503 | f5dee44afaa7ec04061899feeb2d4b0717abc089 | /app/adapters/queues.py | abe870801cec42105ea71c269089a9ae6bfb3439 | [] | no_license | jazmanzana/event-driven | 2cfe53254f05cc8f36e8ee3bbd7b432f8b8e5a2e | 22f8c56645fec267b2cea82971e7d0e3c12042bd | refs/heads/main | 2023-06-21T12:16:50.318073 | 2021-07-27T06:14:57 | 2021-07-27T06:14:57 | 388,902,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | from collections.abc import Callable
import pika
import os
class Publisher:
EXCHANGE = ""
QUEUE = "processing"
def __init__(self, url="queues"):
# this type of connection is affecting my performance
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=url))
self.channel = self.connection.channel()
self.channel.queue_declare(queue=Publisher.QUEUE)
self.properties = pika.BasicProperties(expiration=os.getenv("JOB_EXPIRATION", default="60000"))
def publish(self, body: bytes):
self.channel.basic_publish(
exchange="", routing_key=Publisher.QUEUE, body=str(body), properties=self.properties
)
class Consumer:
EXCHANGE = ""
QUEUE = "done"
def __init__(self, url="queues"):
# this type of connection is affecting my performance
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=url))
self.channel = self.connection.channel()
self.channel.queue_declare(queue=Consumer.QUEUE)
def set_callback(self, callback: Callable):
self.channel.basic_consume(
queue=Consumer.QUEUE, auto_ack=False, on_message_callback=callback
)
def start_consuming(self):
self.channel.start_consuming()
def acknowledge_message(self):
self.channel.basic_ack(Consumer.QUEUE)
| [
"noreply@github.com"
] | jazmanzana.noreply@github.com |
efa692d6ea6388de8348b3713667942e6b5c86e3 | 2193dafd0739cf6eedd19936eb25758a10d041b8 | /Dijkistras.py | becc0b37dd97b45da9d896b6559537d1a61b9315 | [] | no_license | JavierPalomares90/Algorithms | 953d51ffc3a1d1df3f9a933ed40e8abe64947b6a | 1fb2082dda6de95b1e21a3b8cca27a0f8952fbc8 | refs/heads/master | 2020-07-01T00:46:38.334973 | 2014-06-10T03:40:41 | 2014-06-10T03:40:41 | 20,671,543 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,608 | py | # -*- coding: utf-8 -*-
# <nbformat>2</nbformat>
# <codecell>
class edge:
def __init__(self):
self.v1 = 0
self.v2 = 0
self.length = 0
class vertex:
def __init__(self):
# will keep track of edges going out of vertices
self.edges = []
self.explored = False
self.leader = 0
self.label = 0
# <codecell>
import math
import re
from pylab import *
from numpy import *
numVertices = 200
edges = []
vertices = []
for i in xrange(numVertices):
v = vertex()
vertices.append(v)
f = open('dijkstraData.txt')
re = re.compile('\s*,\s*')
for line in f:
l = line.split()
v1Index = int(l[0])
vertices[v1Index-1].label = v1Index
for j in xrange(1,len(l),1):
n = re.split(l[j])
v2Index = int(n[0])
length = int(n[1])
vertices[v2Index-1].label = v2Index
e = edge()
e.v1 = v1Index
e.v2 = v2Index
e.length = length
vertices[v1Index-1].edges.append(e)
vertices[v2Index-1].edges.append(e)
edges.append(e)
X,A = Dijkstra(vertices,edges,vertices[0])
# <codecell>
def Dijkstra(vertices,edges,source):
X = [source.label]
A = [1000000]*len(vertices)
A[source.label-1] = 0
while len(X) != len(vertices):
minLength = sys.maxint
index = 0
vIn = 0
vOut = 0
for i in xrange(len(edges)):
# find the one that minimizes length
if((edges[i].v1 in X) and (not edges[i].v2 in X)):
v1Index = edges[i].v1
v2Index = edges[i].v2
l = A[v1Index - 1] + edges[i].length
if (l < minLength):
index = i
minLength = l
vIn = v1Index
vOut = v2Index
if((edges[i].v2 in X) and (not edges[i].v1 in X)):
v1Index = edges[i].v1
v2Index = edges[i].v2
l = A[v2Index - 1] + edges[i].length
if (l < minLength):
index = i
minLength = l
vIn = v2Index
vOut = v1Index
# Add to x
X.append(vOut)
A[vOut - 1] = A[vIn-1] + edges[index].length
return X,A
# <codecell>
for i in xrange(25):
print A[i]
# <codecell>
vertices[3].label
# <codecell>
a = [3,2,1]
# <codecell>
not 4 in a
# <codecell>
j[4]
# <codecell>
1 != 0
# <codecell>
p2 = re.compile('\s*,\s*')
# <codecell>
k = p2.split('24,509909')
# <codecell>
k[1]
# <codecell>
sys.maxint
# <codecell>
| [
"Javier@Javiers-MacBook-Pro.local"
] | Javier@Javiers-MacBook-Pro.local |
f5c46fec69927e9f21e7cc8f04c7a75aa8aeab41 | dfa1bb1bd7ef84f9e225d51761c4fc2470b51112 | /Tries/DesignAdd&SearchWordswithdot.py | 0243d05ded237c8fe86bde0e7f248feacfa2b58b | [] | no_license | gowriaddepalli/Leetcode_solutions | f31d9716a3690dcdbc0e7e4eaf9773b8bd8f7453 | 1f33cbeb52f9ec6332d05bbd3da69935ba553cf8 | refs/heads/master | 2023-04-05T11:34:42.736638 | 2021-04-16T07:33:02 | 2021-04-16T07:33:02 | 158,172,755 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,755 | py | class WordDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.trie = {}
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: None
"""
t = self.trie
for char in word:
if char not in t:
t[char] = {}
t = t[char]
t["*"] = True
def search(self, word):
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
## Creating a recursive function
def searchUtil(word, node):
# Trying to get indices
for i,char in enumerate(word):
# checking for case when character is not present
if not char in node:
# checking for case when character is not present but is replaced by '.'
if char == '.':
# Recursively travelling through all possible nodes in star
for x in node:
if x != "*" and searchUtil(word[i+1:], node[x]):
return True
return False
# checking for case when character is present
else:
node = node[char]
return '*' in node
return searchUtil(word, self.trie)
# Your WordDictionary object will be instantiated and called as such:
# obj = WordDictionary()
# obj.addWord(word)
# param_2 = obj.search(word)
| [
"noreply@github.com"
] | gowriaddepalli.noreply@github.com |
158391a0ca82c0639608e6f98dede3195bd12b40 | 9d862dd68f8b4ea4e7de9397fef8592824c77449 | /app/top/api/rest/FenxiaoDiscountsGetRequest.py | 2989652b695920224f032670cc2c84c122f36215 | [] | no_license | hi-noikiy/tmall-sku-outer_id | ffaca630dfb288ca33d962b8a050932d1047b9c8 | 1bcf29386a513bcb210bf5d91016e0dcb1ebc1ad | refs/heads/master | 2021-05-09T18:20:27.150316 | 2017-03-08T06:43:57 | 2017-03-08T06:43:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | '''
Created by auto_sdk on 2016.03.05
'''
from app.top.api.base import RestApi
class FenxiaoDiscountsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.discount_id = None
self.ext_fields = None
def getapiname(self):
return 'taobao.fenxiao.discounts.get'
| [
"1037096435@qq.com"
] | 1037096435@qq.com |
e925cae9746d4510a5277d88ffa5e8a07c3c90e6 | 4eaab9327d25f851f9e9b2cf4e9687d5e16833f7 | /problems/critical_connections_in_a_network/solution.py | 7ddf628a29bc4208c9823e84011f61a218c0010c | [] | no_license | kadhirash/leetcode | 42e372d5e77d7b3281e287189dcc1cd7ba820bc0 | 72aea7d43471e529ee757ff912b0267ca0ce015d | refs/heads/master | 2023-01-21T19:05:15.123012 | 2020-11-28T13:53:11 | 2020-11-28T13:53:11 | 250,115,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | class Solution:
def criticalConnections(self, n: int, connections: List[List[int]]) -> List[List[int]]:
def dfs(previous = -1, current = 0, depth = 1):
nonlocal depths, output
temp_depth = depth
depths[current] = depth
for neighbor in graph[current]:
if neighbor == previous:
continue
neighbor_depth = depths[neighbor] or dfs(current, neighbor, depth + 1)
if depth < neighbor_depth:
output.append((current, neighbor))
elif neighbor_depth < temp_depth:
temp_depth = neighbor_depth
depths[current] = temp_depth
return temp_depth
graph = [[] for _ in range(n)]
depths = [0] * n
output = []
for u, v in connections:
graph[u].append(v)
graph[v].append(u)
dfs()
return output | [
"kadhirash@gmail.com"
] | kadhirash@gmail.com |
1abbca8e926a23e8caa6e5e4d9ac72d760b5ffd7 | 5825e9bc074f7bffd72a71707fd55e3015795c19 | /app/rtanalysis/RTDataDAO.py | e3b1b656df06e8973ef4959c66d8a3f87238885e | [] | no_license | stash4/VisualizeRTNetwork | 709cefb95b964e7209c86be600501511696656fd | 680f17efbe7bd1c121963a14c73e0f7f2a898b93 | refs/heads/master | 2022-12-10T14:49:40.993918 | 2018-08-04T14:25:38 | 2018-08-04T14:25:38 | 118,082,599 | 1 | 0 | null | 2022-12-08T00:50:24 | 2018-01-19T05:44:15 | Python | UTF-8 | Python | false | false | 1,503 | py | from ..models import db, Tweet, User, Link
def register_tweet(tweet_id, text):
tweet = db.session.query(Tweet).filter_by(id=tweet_id).first()
if tweet is None:
tweet = Tweet(tweet_id, text)
db.session.add(tweet)
db.session.commit()
return tweet
def init_user(user_id, tweet_id, name, group):
user = db.session.query(User)\
.filter_by(id=user_id, tweet_id=tweet_id).first()
if user is None:
user = User(user_id, tweet_id, name, group)
return user
def init_link(tweet_id, source_id, target_id, distance):
link = db.session.query(Link)\
.filter_by(tweet_id=tweet_id, source_id=source_id,
target_id=target_id).first()
if link is None:
link = Link(tweet_id, source_id, target_id, distance)
return link
def register(rt_tree_dict):
tw_id = str(rt_tree_dict['tweetid'])
tweet = register_tweet(tw_id, rt_tree_dict['text'])
users = []
for item in rt_tree_dict['users']:
user = init_user(str(item['userid']), tw_id,
item['name'], item['group'])
users.append(user)
links = []
for item in rt_tree_dict['links']:
source = str(item['source'])
target = str(item['target'])
if source == '' and target == '':
continue
link = init_link(tw_id, source, target, item['distance'])
links.append(link)
tweet.users = users
tweet.links = links
db.session.commit()
| [
"high.mackey@gmail.com"
] | high.mackey@gmail.com |
1c70223238adb6b00f186ce6e565db01b8f927a6 | 5f8b1beafc1a293f21d45a30247348cfc6f99299 | /run-length-encoding/run_length_encoding.py | 7f1c6c3e044494048fc98721f36fa0d7fe6a5c6e | [] | no_license | vineel96/PythonTrack-Exercism | aca3d16b0ca352060a7ab16c2627f44695f6b855 | c756cf38e39aa6347199cf3143f8b401345ad832 | refs/heads/master | 2020-06-20T17:43:48.972591 | 2019-07-30T06:54:51 | 2019-07-30T06:54:51 | 197,196,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | from collections import defaultdict
from itertools import groupby
import re
def decode(string):
num=[int(n) if n else 1 for n in re.split('\D',string)]
char="".join(n for n in re.split('\d',string) if n)
return "".join([num*char for num, char in zip(num,char)])
def encode(string):
out=""
for char,group in groupby(string):
l=len(list(group))
if l>1:
out+=str(l)+char
else:
out+=char
return out | [
"vineel_abhi96@yahoo.com"
] | vineel_abhi96@yahoo.com |
25c89d257159450711d3d594fcb6228fc6b74f46 | 480ec8b93da517e13b302d41d0bca63837d9427e | /Programming Distributed Applications/app.py | bd524c75fd215fa14ffadcda04f2459e9047005f | [] | no_license | TUM-FAF/FAF-121-Paul-Sanduleac | 0adba7aed2c298cdbfc8a545cb16bf8b92133bf8 | c68775ca7a4e7b699104c720b7bbc588a0b0ddbf | refs/heads/master | 2021-01-10T15:10:30.870633 | 2016-02-25T11:45:09 | 2016-02-25T11:45:09 | 52,519,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,987 | py | from cassandra.cluster import Cluster
from flask import Flask, request, jsonify, make_response
from flask_restful import Resource, Api
import socket, requests, os, atexit
import settings
app = Flask(__name__)
api = Api(app)
cluster = Cluster()
dbsession = cluster.connect('dev')
@app.errorhandler(404)
def not_found(error): # handles any 404 requests to the instance
return make_response(jsonify({'Error': '404 Not Found'}), 404)
class Article(Resource): # REST resource
def get(self, aid):
single = dbsession.execute("SELECT aid, title FROM articles WHERE aid=%s", [aid])
return {'aid': single[0][0],
'title': single[0][1]}
def post(self, aid):
atitle=request.form['title']
single = dbsession.execute("INSERT INTO articles(aid, title) VALUES (%s,%s)", [aid,atitle])
return {'status': 'OK'}
api.add_resource(Article, '/article/<int:aid>') # add resource and create REST endpoint
def findport(): # find an open port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # socket magic to find a free port
sock.bind(('localhost', 0))
openport = sock.getsockname()[1]
sock.close() # closing the socket, we know the port is free
return openport
def open_instance(host, port): # notify proxy of the new instance
try:
response = 1
url = 'http://' + settings.proxyhost + ':' + str(settings.proxyport) + '/openinstance'
payload = {'host': host,'port': port}
r=requests.post(url, data=payload)
response = r.status_code
except requests.exceptions.MissingSchema:
print 'The proxy was not notified of the new instance. Please re-check the URL in settings.py.'
except requests.exceptions.ConnectionError as error:
print 'Could not connect to proxy: ' + str(error)
else:
print 'Instance open - proxy notification sent, response: %d' % response
return response
def close_instance(host, port): # notify proxy of instance closing
try:
response = 1
url = 'http://' + settings.proxyhost + ':' + str(settings.proxyport) + '/closeinstance'
payload = {'host': host,'port': port}
r=requests.post(url, data=payload)
response = r.status_code
except requests.exceptions.MissingSchema:
print 'The proxy was not notified of instance closing. Please re-check the URL in settings.py.'
except requests.exceptions.ConnectionError as error:
print 'Could not connect to proxy: ' + str(error)
else:
print 'Instance closing, proxy notification sent, response: %d' % response
return response
if __name__ == '__main__':
settings.instanceport=findport() #comment this line to use the default port from settings.py
print(open_instance(settings.instancehost, settings.instanceport))
#app.debug = True
app.run(host=settings.instancehost, port=settings.instanceport)
atexit.register(close_instance, settings.instancehost, settings.instanceport)
| [
"paulsanduleac@gmail.com"
] | paulsanduleac@gmail.com |
69961552cf5a9df9d897614b9be2d73de2c9afc7 | 3c81cfc8681ab11e54bf78f6286d48fce09d6d65 | /routes/comment.py | b5c129c1a4c25507dfc1bbe3dcfae83fcebdb5e0 | [] | no_license | xb21/bbs | 4d014231a7bb50d123a6c387555910a8d1dfc8ac | 07ca9882e0448c5ee541c5f03f2a7f37470d6da0 | refs/heads/master | 2021-08-07T22:06:59.469685 | 2017-11-09T03:07:17 | 2017-11-09T03:07:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | from models.comment import Comment
from routes import *
main = Blueprint('comment', __name__)
Model = Comment
@main.route('/add', methods=['POST'])
@login_required
def add():
form = request.form
m = Model(form)
m.topic_id = int(form.get('topic_id'))
# 惨,username 和 user_id 都要手动添加
u = current_user()
m.username = u.username
m.user_id = u.id
m.save()
return redirect(url_for('topic.show', id=m.topic_id))
@main.route('/delete/<int:id>')
def delete(id):
c = Model.query.get(id)
c.delete()
return redirect(url_for('topic.show', id=c.topic_id))
# @main.route('/')
# def index():
# ms = Model.query.all()
# return render_template('topic_index.html', node_list=ms)
# @main.route('/new')
# def new():
# return render_template('topic_new.html')
# @main.route('/<int:id>')
# def show(id):
# m = Model.query.get(id)
# return render_template('topic.html', topic=m)
# @main.route('/edit/<id>')
# def edit(id):
# t = Model.query.get(id)
# return render_template('topic_edit.html', todo=t)
# @main.route('/update/<int:id>', methods=['POST'])
# def update(id):
# form = request.form
# t = Model.query.get(id)
# t.update(form)
# return redirect(url_for('.index'))
| [
"autu1995@outlook.com"
] | autu1995@outlook.com |
3f049a829e4f528690569a3ddb76c01f8186474e | aae95f673759e71d8859b05973eb563d15200114 | /M3HW1_AgeClassifier_NOVAK.py | b22943545cce9fc94576bf8756876bc736c2a747 | [] | no_license | tjnovak58/cti110 | 74ae3cb355c1bbde8a51ba9bdc943c172f109885 | 1edec8b428f5aa5858662219ddd2a4dd93a91a9c | refs/heads/master | 2021-05-15T00:47:08.440056 | 2017-11-13T20:18:03 | 2017-11-13T20:18:03 | 103,067,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # CTI-110
# M3HW1 - Age Classifier
# Timothy Novak
# 09/24/17
#
# This program takes an age input and outputs a classification.
#
age = float(input('Enter age: '))
def main():
if age >= 20:
print('The person is an Adult')
if age >= 13 and age < 20:
print('The person is a Teenager')
if age > 1 and age < 13:
print('The person is a Child')
if age <= 1:
print('The person is an Infant')
main()
| [
"noreply@github.com"
] | tjnovak58.noreply@github.com |
ce6c323a26426f3a80db831e190eaa0a90371b93 | e865dfd9f629a63a731694aa4f066e1ecc625898 | /bot_instagram.py | 8bc8fea411b17cbd1736fa028d14b24c26834419 | [] | no_license | ayslanleal/mining_instagram | a08c0f79c090ce2bfb6050b63e763fb8ca2f4c32 | f753708be0b7368d6d7d95064bb12e5ba84fc04e | refs/heads/master | 2020-07-13T08:03:26.017882 | 2019-10-08T23:46:45 | 2019-10-08T23:46:45 | 205,040,553 | 2 | 2 | null | 2019-10-07T00:30:04 | 2019-08-28T23:25:52 | Python | UTF-8 | Python | false | false | 1,440 | py | from bs4 import BeautifulSoup as bs
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
import requests
def remove_repetidos(lista):
l = []
for i in lista:
if i not in l:
l.append(i)
return l
tag = 'nome da tag'
ff = webdriver.Firefox()
ff.get('https://www.instagram.com/explore/tags/'+tag)
sleep(3)
motion_len=250
current_scroll_position, new_height = 0, 100
aux,cont=0,0
bs_obj, content, img_urls = None, None, None
img_content = []
while current_scroll_position <= new_height:
if cont%5==0:
bs_obj = bs(ff.page_source, 'html.parser')
content = bs_obj.findAll('div', {'class':'KL4Bh'})
img_urls = [cont.find('img').get('src') for cont in content]
img_content.extend(img_urls)
current_scroll_position += motion_len
for i in range(0,5):
ff.execute_script("window.scrollTo(0, %d);"%(current_scroll_position))
sleep(0.5)
new_height = ff.execute_script("return document.body.scrollHeight")
if current_scroll_position>=new_height and aux!=4:
current_scroll_position-=250
sleep(8)
aux+=1
cont+=1
img_links = remove_repetidos(img_content)
contador = 1
for img in img_links:
imag_bc = requests.get(img)
save = open('./Imagens/%s%d.jpg'%(tag,contador), 'wb')
save.write(imag_bc.content)
contador+=1
| [
"ayslanleal15@gmail.com"
] | ayslanleal15@gmail.com |
f64962701f0c5d867ac114de49fc3c7153391009 | cf9fca3e968353b74eaffe2b52919a56e7bfe385 | /y.py | 1be9caa6e1e4f27e1509a9b86c1ecf696f5460f1 | [] | no_license | cmailhot4/morse | 7f80b4feb568a7c9eeb856fed14e0c248b918231 | 5398b15f937bb32e43aedfbaeae25e82c43dd20e | refs/heads/master | 2020-05-02T15:30:00.054584 | 2019-03-28T21:12:09 | 2019-03-28T21:12:09 | 178,042,735 | 0 | 0 | null | 2019-03-28T20:26:47 | 2019-03-27T17:23:58 | Python | UTF-8 | Python | false | false | 240 | py | def run():
from gpiozero import LED
from time import sleep
led = LED(17)
led.on()
sleep(3)
led.off()
sleep(1)
led.on()
sleep(1)
led.off()
sleep(1)
led.on()
sleep(3)
led.off()
sleep(1)
led.on()
sleep(3)
led.off()
sleep(3)
| [
"mailhot.christophe@carrefour.cegepvicto.ca"
] | mailhot.christophe@carrefour.cegepvicto.ca |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.