blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a976abc157ca011f31dc259caf012a24e0288c1
|
00118547c207df7d0e20c4faa70651f57ea88f93
|
/TestApi/urls.py
|
4347a609bface106fbfd69338a29e27effc51270
|
[] |
no_license
|
maheshch123/Django_Rest_Api
|
311da66cbad9b864bf391fca8e02a850751d7d25
|
290679c970f9a2bd6e12f256e14a0e5880fbb26e
|
refs/heads/master
| 2022-11-08T20:05:02.993311
| 2020-07-06T06:51:15
| 2020-07-06T06:51:15
| 277,303,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
"""TestApi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('API.urls')),
path('api/token/',obtain_auth_token, name='obtain-token'),
]
|
[
"a1nusuz7"
] |
a1nusuz7
|
06322667cef027cf634ddc8793a130834965eff0
|
63685682acb59882362fd9e4294db6e7a8ad374d
|
/COMpiler DEsign/cd_IC/cd_ic.py
|
1a885ff694bce9813f2e9093ab2836aa34ab430b
|
[] |
no_license
|
GTron-1729/VirtualBox
|
7c6b17f1793fcad70031700d84bd408a1307229e
|
4abbae6615ca61e7a80d1f02e4e3e183f5918dd0
|
refs/heads/main
| 2023-03-05T22:09:18.520930
| 2021-02-19T06:19:28
| 2021-02-19T06:19:28
| 340,276,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
def IsInt(s):
try:
int(s)
return True
except ValueError:
return False
IS = {"STOP" : "00", "ADD" : "01", "SUB" : "02","MULTI" : "03", "MOVER" : "04", "MOVEM" :"05" , "COMB" : "06", "BC" :"07" , "DIV" :"08" , "READ" :"09" , "PRINT" :"10"}
AD = {"END" : "02", "START" : "01", "ORIGIN" : "03","EQU" : "04", "LTORG" : "05"}
DL={"DS": "01" , "DC" : "02"}
RG={"AREG":"01","BREG":"02","CREG":"03","DREG":"04"}
literals=[]
symbols=dict()
f = open("test.txt", "r")
w = open("result.txt", "w+")
symbol_count=0
k=0
p=0
o=0
for l in f:
l = l.replace(",", " ")
ins=l.split()
for i in ins:
if i in IS:
w.write("(IS, "+ str(IS[i])+")")
elif i in AD:
if i =="LTORG":
w.write("(AD,05)(DL,02)(C,"+ str(literals[p][2:4])+")")
p+=1
if i=="END":
while(p!=len(literals)):
w.write("(AD,02)(DL,02)(C,"+ str(literals[p][2:3])+")")
w.write("\n")
p+=1
else:
w.write("(AD, "+ str(AD[i])+")")
elif i in DL:
w.write("(DL, "+ str(DL[i])+")")
elif i in RG:
w.write("(RG, "+ str(RG[i])+")")
elif i[0]=="=":
w.write("(L,0"+ str(k)+")")
literals.append(i)
k+=1
elif IsInt(i):
w.write("(C, "+i+")")
elif not(IsInt(i)):
if i not in symbols:
symbols[i] = o
o+=1
w.write("(S,0"+str(symbols[i])+")")
w.write('\n')
print(literals)
print(list(set(symbols)))
for line in w:
print(line)
w.close()
f.close()
|
[
"noreply@github.com"
] |
GTron-1729.noreply@github.com
|
b81efd9509dfca6b45daae583390c07532597f9f
|
99a951aba3eaec191a720794505602c04daeafbf
|
/src/settings.py
|
58c42733ec854937dbb232a982a839afb2389616
|
[] |
no_license
|
jian9ang-git/Cutter2
|
6cdf1fec8e44f62c617064e09ca5677e5f812052
|
19f8793e6f8731e72dbc05e72f21d5a0046b1188
|
refs/heads/master
| 2023-04-04T00:13:01.054320
| 2021-04-16T14:43:04
| 2021-04-16T14:43:04
| 358,627,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,573
|
py
|
"""
Django settings for src project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
import django_heroku
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-bt848evdkv!4x8n@l9+(07qj5-ab^09y2gb@ph=_@mrw)$-*rz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'src.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates', 'todo/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'src.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join('BASE_DIR', 'static'),
]
django_heroku.settings(locals())
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"jian9ang1@ya.ru"
] |
jian9ang1@ya.ru
|
fc7c34dbe438dfb35ae275fe547efd9189d342c7
|
c5ea14333ad364e3b1a698c54e84cd8980ef0497
|
/OpenCV-Face-detection/liveCheck2.py
|
b65b2b6b31758534a2fef0a0cbdc02429c0e0204
|
[
"MIT"
] |
permissive
|
lingdantiancai/face-FD-FR
|
2d0d5a48b9bf9d8dc5393723604a04ab85a91c9c
|
48f1acafb4a4fc767c8d389a28e4b4e73246a7ea
|
refs/heads/master
| 2020-03-16T01:08:21.910897
| 2018-05-29T08:41:25
| 2018-05-29T08:41:25
| 132,433,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
import urllib, sys
import ssl
import urllib.request
# client_id 为官网获取的AK, client_secret 为官网获取的SK
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=4a9WXWn9VtuX0bTAGDuo0iAF&client_secret=p8O7KCal5A2qmEmBgEjU6Sw6EC32kMXj'
request = urllib.request(host)
request.add_header('Content-Type', 'application/json; charset=UTF-8')
response = urllib2.urlopen(request)
content = response.read()
if (content):
print(content)
|
[
"lingdantiancai@163.com"
] |
lingdantiancai@163.com
|
0b8d320c7555f12567117dea800b9a9d89583b92
|
dbfd10d75b6e2f8daca971053c8e38717520f5a1
|
/problem_util_yr/batchinfer_sh/nfyy_zhusu_chunk/test_batchinfer_ProblemDecoder_chunk_xbs.py
|
0fb2189663869dc9fe014b877ec39b20bb006bad
|
[] |
no_license
|
2877992943/src-201705-201907
|
b0117bbdf4aadc9612f8c1bf0fc811094ac90cb6
|
ff8302047d1d3a28f329c1b1008a350bb51d627e
|
refs/heads/master
| 2021-01-03T23:03:01.314303
| 2020-03-25T08:19:38
| 2020-03-25T08:19:38
| 240,273,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,027
|
py
|
# coding:utf-8
import sys
reload(sys)
sys.setdefaultencoding("utf8")
""" test model with individual samples ,before batch run"""
import sys
print sys.path
from itertools import groupby
from operator import itemgetter
from problem_util_yr.infer.get_chunk_symp_sign import get_abstract
from problem_util_yr.loadDict.label_and_CN import get_LABEL_NEEDED_this_problem
from problem_util_yr.infer.get_chunk_symp_sign import limit_ceiling_floor
#from problem_util_yr.infer.ProblemDecoder_singleTest_forChunk import singleTest_predict_chunk_writeout
import problem_util_yr.t2t162.ProblemDecoder_predict as pd
import tensorflow as tf
import json,copy,time
import numpy as np
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("model_dir", None,"tmp")
flags.DEFINE_string("usr_dir", None,"tmp")
flags.DEFINE_string("inputFile", None,"tmp")
flags.DEFINE_string("outputFile", None,"tmp")
#FLAGS.schedule=""
# FLAGS.worker_gpu=0
# FLAGS.decode_extra_length=50
# #FLAGS.decode_return_beams=2
# FLAGS.decode_beam_size=6
# FLAGS.decode_alpha=0.6
# FLAGS.decode_batch_size=1
tf.logging.set_verbosity(tf.logging.INFO)
#tf.logging.set_verbosity(tf.logging.DEBUG)
import argparse,os
parser = argparse.ArgumentParser(description='Problem decoder test(all in memory)')
parser.add_argument('--problem',dest='problem',help='registered problem')
parser.add_argument('--hparams',dest='hparams', help='hparams set')
parser.add_argument('--model_dir',dest='model_dir', help='model directory')
parser.add_argument('--model_name',dest='model_name', help='model name')
parser.add_argument('--usr_dir',dest='usr_dir', help='user problem directory')
#parser.add_argument('--port',dest='port', help='Listening port')
#parser.add_argument('--isGpu',dest='isGpu',type=int, help='if using GPU')
#parser.add_argument('--dict_dir',dest='dict_dir', help='dict port')
parser.add_argument('--data_dir',dest='data_dir', help='dict port')
#parser.add_argument('--log_dir',dest='log_dir', help='dict port')
#parser.add_argument('--timeout',dest='timeout', help='dict port')
#parser.add_argument('--beam_size',dest='beam_size',type=int,help='decode beam size',default=1)
#parser.add_argument('--decode_extra_length',dest='decode_extra_length',type=int,help='decode_extra_length',default=0)
#parser.add_argument('--worker_gpu_memory_fraction',dest='worker_gpu_memory_fraction',type=float,default=0.95,help='memory fraction')
#parser.add_argument('--decode_alpha',dest='decode_alpha',type=float,default=0.6,help='decode alpha')
#parser.add_argument('--return_beams',dest='return_beams',type=bool,default=False,help='return beams')
#parser.add_argument('--use_last_position_only',dest='use_last_position_only',type=bool,default=True,help='use_last_position_only')
#parser.add_argument('--is_short_sympton',dest='is_short_sympton',type=int,default=1,help='is_short_sympton')
#parser.add_argument('--alternate',dest='alternate',type=str,default=None,help='alternate server')
parser.add_argument('--inputFile',dest='inputFile',type=str,default=None,help='Input text file')
parser.add_argument('--outputFile',dest='outputFile',type=str,default=None,help='Output text file')
args = parser.parse_args()
if __name__=='__main__':
##########
## parameter
##########
currentpath=os.path.abspath('./')
debug_or_batchRunInDocker = 1 # 本地调试 还是 DOCKER上预测
data_dir = [args.data_dir, '../data'][debug_or_batchRunInDocker]
# load model and predict param
problem = "xbschunk_problem"
model_dir = [args.model_dir, "../model"][debug_or_batchRunInDocker]
model_name = "transformer"
hparams_set = "transformer_base_single_gpu"
usr_dir = [args.usr_dir, "../src"][debug_or_batchRunInDocker]
inputFile = [args.inputFile, './tmp/'][debug_or_batchRunInDocker]
outputFile = [args.outputFile, './tmp/'][debug_or_batchRunInDocker]
inp_fname='tmp_xbs.json'
corpus = 'xbs' # json文件中的key名字
cut_sentenct_flag=True # zhusu not cut, xbs do cut
enlabel = """symptom
signal
test
inspect
diagnosis
treatment
other
pad""".split()
enlabel_need = ['symptom', 'signal']
OOM_LIMIT=40000
extralen = 0
paramlist=[data_dir,problem,model_dir,model_name,hparams_set,usr_dir,
inputFile,outputFile,inp_fname,
corpus,cut_sentenct_flag,enlabel,enlabel_need,OOM_LIMIT,extralen]
from problem_util_yr.t2t162.test_batchinfer_ProblemDecoder_chunk import main
main(paramlist,start_file_ind=0)
# ######## data generator -> cut sent -> sort by length from long to short 主诉不用切段落
# ##### sort -> group by length 因为批量长度不同会PADDING 该CHUNK问题需要输入输出长度一致 不接受PADDING
# if cut_sentenct_flag==False:
# steps_zhusuParagraph_sort_writeInDisk(inputFile, inp_fname, outputFile)
# elif cut_sentenct_flag==True:
# steps_xbsParagraph_cut_sort_writeInDisk(inputFile, inp_fname, outputFile)
# #
# # study_distribution()
#
#
#
#
#
#
#
#
#
# time_start = time.time()
# ############
# # start predict
#
# fll=get_filename_list(os.path.join(outputFile,'sort/'))
# for fname in fll[:]:
# print 'start ...第几个文件:%d(要是OOM停了从这里启动)...文件名:%s'%(fll.index(fname),fname)
# length_this_file=int(fname.split('/')[-1].strip('.json'))
# # cache and batch setting
# batch_size_this_lengthRange=OOM_LIMIT/length_this_file
# MAX_MEMORY_COULD_HOLD=3*batch_size_this_lengthRange
# # data generator
# data_gene_single = single_generator(fname,'text')
# data_gene_cache=data_generator_cache(data_gene_single,MAX_MEMORY_COULD_HOLD);
#
# # ### debug
# # for d in data_gene_cache:
# # print ''
#
#
#
#
# tf.reset_default_graph()
# decoder = pd.ProblemDecoder_predict(problem=problem,
# model_dir=model_dir,
# model_name=model_name,
# hparams_set=hparams_set,
# usr_dir=usr_dir,
# data_dir=data_dir,
# isGpu=True,
# timeout=15000,
# fraction=0.95,
# beam_size=1,
# alpha=0.6,
# return_beams=False,
# extra_length=111,
# use_last_position_only=False,
# batch_size_specify=batch_size_this_lengthRange,
# write_beam_scores=False,
# eos_required=False,
# hparams_key_value=None)
#
#
#
#
# ######
#
#
#
# ### 输出的路径 不同长度的不写到一个文件里因为中间断了可以继续
# labNeed_writer_dict = dict(zip(enlabel_need, [''] * len(enlabel_need)))
# for labFileName in enlabel_need:
#
# writer_this_lab=open(os.path.join(outputFile,'result',
# '%s_chunk_batchsize%d_length%d.json'%(labFileName,batch_size_this_lengthRange,length_this_file)),'w')
# labNeed_writer_dict[labFileName]=writer_this_lab
# writer_tmp=open(os.path.join(outputFile,'result',
# 'allchunk_tmp_result_batchsize%d_length%d.json'%(batch_size_this_lengthRange,length_this_file)),'w')
#
#
#
#
#
#
# # as large as memory can hold : dic_cache
# for dic_cache in data_gene_cache: #dic_cache [d,d] d={'vid':xxx,'xbs':xxx # such as 10000 obs
# #
# start_time_i=time.time()
# # predict
#
# input_string_ll=[dic['text'].decode('utf-8') for dic in dic_cache]
# if len(input_string_ll)==0:continue
# inputsList, retll=decoder.infer_batch_seq2seq(input_string_ll)#list
# print 'done', len(dic_cache),time.time()-start_time_i #100piece/1second
#
# steps_xbschunk_afterInferProcess_writeInDisk(inputsList,retll,dic_cache,enlabel,writer_tmp,labNeed_writer_dict)
# #
#
#
#
#
#
#
#
#
# #####
# print 'it take how long',time.time()-time_start
# print inp_fname
## batch size 600 time ? 135
## batch size 300 time ?120
## batch size 200 time ?112
## batch size 100 time ?116
## batch size 50 time ?148
|
[
"2877992943@qq.com"
] |
2877992943@qq.com
|
8a24ec7494a8f9ae7c22898ee2fd432538cdbbaa
|
97df98c69b1742111a2d48a0a31aeecd959ad68f
|
/모듈/내장함수.py
|
87a53861f7e8b3058e066bb71387526e89a0b7e7
|
[] |
no_license
|
5d5ng/Python_Study
|
c3eb2a87b498b51c53240f89aff1a4903c5709cf
|
fdd14188deec38968040031910a029f8e7a338ef
|
refs/heads/master
| 2022-10-17T08:09:43.163745
| 2020-06-15T04:43:19
| 2020-06-15T04:43:19
| 270,594,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
#input : 사용자 입력을 받는 함수
# language = input("무슨 언어?")
# print(language)
#dir : 어떤 객체를 넘겨줬을 떄 그 객체가 어떤 변수와 함수를 가지고 있는 표시
print(dir())
import random # 외장함수
print(dir())
import pickle
print(dir())
print(dir(random))
lst = [1 , 2 , 3]
print(dir(lst))
#list of python builtin 이라고 검색해보면 내장함수 정보를 얻얼 수 있다.
|
[
"deo1915@gmail.com"
] |
deo1915@gmail.com
|
a3ec060fccd09341fcdefccc903e4ab2ef8e03c5
|
973d81b2edaea791b7f941c191507af830a839ac
|
/api/admin.py
|
94f3998135d36a9a94950edfb9cd5cba2ef35ada
|
[] |
no_license
|
pymq/abrakabra
|
3dee5cbd33829fa49e8dfbf111dc354bda876501
|
849e68460cbf7f46390b6d28b25142cac0319025
|
refs/heads/master
| 2021-04-12T03:39:02.263625
| 2018-04-23T14:57:21
| 2018-04-23T14:57:21
| 125,816,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
from django.contrib import admin
from .models import Article, Ticket
admin.site.register(Article)
admin.site.register(Ticket)
|
[
"maksimmerzh@gmail.com"
] |
maksimmerzh@gmail.com
|
862d5c6502ec9a266ac632f8360b731fe3f5108f
|
54ddb3f38cd09ac25213a7eb8743376fe778fee8
|
/topic_14_virtual_environments/examples/where_libs.py
|
6c4acda8450921793b5fae72e4601329a3c2f802
|
[] |
no_license
|
ryndovaira/leveluppythonlevel1_300321
|
dbfd4ee41485870097ee490f652751776ccbd7ab
|
0877226e6fdb8945531775c42193a90ddb9c8a8b
|
refs/heads/master
| 2023-06-06T07:44:15.157913
| 2021-06-18T11:53:35
| 2021-06-18T11:53:35
| 376,595,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
print('\n--------------------------------------- Системные пакеты ----------------------------------------------------')
import sys
print(sys.prefix) # /usr
print('\n--------------------------------------- Сторонние пакеты ----------------------------------------------------')
import site
data = site.getsitepackages()
print(data)
# ['/usr/local/lib/python3.7/dist-packages', '/usr/lib/python3/dist-packages', '/usr/lib/python3.7/dist-packages']
|
[
"ryndovaira@gmail.com"
] |
ryndovaira@gmail.com
|
d3d4b587937017073a905905674b5d5469e5aa24
|
61b9fc6e9acac58fdf9cab6d8310137ed9a4f83b
|
/blog/views.py
|
8f93679c72fd6f465604345f82c5530efac76273
|
[] |
no_license
|
khushbua1/my-first-blog
|
370000ce127fbe47012c57df3ec0486d43d91208
|
7a63387a26e872892388a2a0f8797294e516b0b8
|
refs/heads/master
| 2022-12-16T02:30:30.882818
| 2020-09-22T12:33:25
| 2020-09-22T12:33:25
| 297,565,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,413
|
py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from .models import Post
from .forms import PostForm
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post=get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
|
[
"you@example.com"
] |
you@example.com
|
fb16758ec5a473a6718d937f43be4d6b51a2f5e8
|
3f6e04a01af3a3863c371bedf33c0bb1d33aae28
|
/Day_7/data_checker/validation_checker.py
|
2a6d5a04cd17ae4a402b9e99d7bc1ad637208b8d
|
[] |
no_license
|
aakashv129/ADF_Assignment_1
|
004ec17848c676a762f13d1fe62998942ff2cc62
|
ffdb8f83adae8a8fd7079dc0dd82e06612b33a0c
|
refs/heads/master
| 2023-06-06T03:08:48.910263
| 2021-07-01T09:28:33
| 2021-07-01T09:28:33
| 378,084,910
| 0
| 0
| null | 2021-06-28T11:33:33
| 2021-06-18T08:29:34
|
Python
|
UTF-8
|
Python
| false
| false
| 7,353
|
py
|
"""Validation Checking file"""
import datetime
import re
from datetime import date
from datetime import datetime
#pylint: disable=anomalous-backslash-in-string
#pylint: disable=too-many-arguments
def calculate_age(birthdate_date):
"""Age Calculation"""
today = date.today()
age = today.year - birthdate_date.year - \
((today.month, today.day) < (birthdate_date.month, birthdate_date.day))
return age
def f_name_checker(name):
"""First Name Validation"""
if name.isalpha():
flag_2 = 0
reason_2 = "Success"
#logger.info("First Name is Valid")
else:
reason_2 = "Invalid name it should be ascii value"
flag_2 = 1
#logger.warning("First Name is Invalid")
return flag_2, reason_2
def m_name_checker(name):
"""Middle Name Validation"""
flag_3 = 0
if name.isalpha() or name.isspace():
reason_3 = "Success"
#logger.info("Middle name is Valid")
else:
reason_3 = "Invalid name it should be ascii value"
flag_3 = 1
#logger.warning("Middle Name is Invalid")
return flag_3, reason_3
def l_name_checker(name):
"""Last Name Validation"""
if name.isalpha():
flag_4 = 0
reason_4 = "Success"
#logger.info("Last name is Valid")
else:
reason_4 = "Invalid name it should be ascii value"
flag_4 = 1
#logger.warning("Last Name is Invalid")
return flag_4, reason_4
def dob_checker(date_of_birth):
"""Date Of Birth Validation"""
temp=[]
print(date_of_birth)
try:
birth_date = datetime.strptime(date_of_birth, "%Y-%m-%d")
birth_date = birth_date.date()
birth_date = str(birth_date)
temp = birth_date.split('-')
reason_5 = "Success"
flag_5 = 0
print(temp)
#logger.info("Date-Of-Birth is valid")
except ValueError:
flag_5 = 1
reason_5 = "Incorrect data format, should be DD-MM-YYYY"
#logger.error("Date-Of-Birth in Incorrect Format")
return flag_5, reason_5, temp
def gender_checker(gender_1):
"""Gender Validation"""
if gender_1.lower() == 'male' or gender_1.lower() == 'female':
reason_6 = "Success"
flag_6 = 0
#logger.info("Gender is Valid")
else:
flag_6 = 1
reason_6 = "Gender Should be M/F"
#logger.warning("Gender Should be M/F")
return flag_6, reason_6
def age_validity_checker(gender_gen, bind):
"""Age Validation"""
years = calculate_age(date(int(bind[0]), int(bind[1]), int(bind[2])))
print("Age:", years)
gender_gen = gender_gen.lower()
if int(years) < 21 and gender_gen == 'male':
reason_7 = "Age is less than expected"
flag_7 = 2
#logger.warning("Age Validation Failed")
elif int(years) < 18 and gender_gen == 'female':
reason_7 = "Age is less than expected"
flag_7 = 2
#logger.warning("Age Validation Failed")
else:
reason_7 = "Success"
flag_7 = 0
#logger.info("Age Validation is Succeeded")
return flag_7, reason_7
def nation_checker(nation):
"""Nationality Validation"""
if nation.lower() == 'indian' or nation.lower() == 'american':
reason_8 = "Success"
flag_8 = 0
#logger.info("Nation is Validated")
else:
reason_8 = "Should be an Indian/American"
flag_8 = 2
#logger.warning("Nationality should be Indian/American")
return flag_8, reason_8
def city_checker(cit):
"""City Validation"""
if cit.isalpha():
reason_9 = "Success"
flag_9 = 0
#logger.info("City is Validated")
else:
reason_9 = "City validation Error"
flag_9 = 1
#logger.warning("City Validation Failed")
return flag_9, reason_9
def state_checker(state_1):
"""State Validation"""
state_list = ["andhra pradesh", "arunachal pradesh", "assam", "bihar", "chhattisgarh",
"karnataka", "madhya pradesh", "odisha", "tamil nadu",
"telangana", "west bengal"]
if state_1.lower() not in state_list:
reason_10 = "State not in the list"
flag_10 = 2
#logger.warning("State Validation Failed")
else:
reason_10 = "Success"
flag_10 = 0
#logger.info("State Validation Success")
return flag_10, reason_10
def pin_code_checker(pin):
"""Pin_Code Validation"""
if len(str(pin)) == 6 and str(pin).isdigit():
reason_11 = "Success"
flag_11 = 0
#logger.info("Pin Code is in correct Format")
elif len(str(pin)) != 6:
reason_11 = "Invalid Pin-Code it should have six digits"
flag_11 = 1
#logger.warning("Pin-Code Validation Failed")
else:
reason_11 = "Invalid Pin-Code it should be digits"
flag_11 = 1
#logger.warning("Pin-Code Validation Failed")
return flag_11, reason_11
def qualification_checker(qual):
"""Qualification Validation"""
result = re.match('[a-zA-Z\s]+$', qual)
if bool(result):
reason_12 = "Success"
flag_12 = 0
#logger.info("Qualification is Validated")
else:
reason_12 = "Invalid Educational qualification"
flag_12 = 1
#logger.warning("Invalid Educational Qualification")
return flag_12, reason_12
def salary_checker(sal):
"""Salary Validation"""
if int(sal) < 10000:
reason_13 = "Salary is less than expected"
flag_13 = 2
#logger.warning("Salary is less than Expected")
elif int(sal) > 90000:
reason_13 = "Salary is more than expected"
flag_13 = 2
#logger.warning("Salary is more than expected")
else:
reason_13 = "Success"
flag_13 = 0
#logger.info("Salary is validated")
return flag_13, reason_13
def pan_checker(pan):
"""Pan_card Validation"""
if len(pan) == 10 and re.match('[A-Z]+$', pan[0:3]) and re.match('[0-9]+$', pan[3:10]):
reason_14= "success"
flag_14 = 0
#logger.info("Pan_Card is Validated")
else:
reason_14 = "Invalid Pan Credential"
flag_14 = 1
#logger.warning("Invalid Pan_details")
return flag_14, reason_14
def valid_check(f_name, m_name, l_name, dob, gender, nationality,
city, state, pin_code, qualification, salary, pan_number):
"""Validation"""
flag_1 = 0
flag_1, reason = f_name_checker(f_name)
if flag_1 == 0:
flag_1, reason = m_name_checker(m_name)
if flag_1 == 0:
flag_1, reason = l_name_checker(l_name)
if flag_1 == 0:
flag_1, reason, bind = dob_checker(dob)
if flag_1 == 0:
flag_1, reason = gender_checker(gender)
if flag_1 == 0:
flag_1, reason = age_validity_checker(gender, bind)
if flag_1 == 0:
flag_1, reason = nation_checker(nationality)
if flag_1 == 0:
flag_1, reason = city_checker(city)
if flag_1 == 0:
flag_1, reason = state_checker(state)
if flag_1 == 0:
flag_1, reason = pin_code_checker(pin_code)
if flag_1 == 0:
flag_1, reason = qualification_checker(qualification)
if flag_1 == 0:
flag_1, reason = salary_checker(salary)
if flag_1 == 0:
flag_1, reason = pan_checker(pan_number)
return flag_1, reason
|
[
"58223250+aakashv129@users.noreply.github.com"
] |
58223250+aakashv129@users.noreply.github.com
|
bd179fb68a4c4afd1e1d63b938f8901b2a5849e9
|
8808f1cbd3a658e52d972f0762fd3c885ae03eca
|
/order_test/run.py
|
776f74c766b522189f02dad494921d64d2042c8d
|
[] |
no_license
|
baihei123456/adidashk_order
|
2ba5f59ebd3b5d1443d70bb3aa6f87d79a471440
|
25053b27e762316151eb5080befaf821fd12d7f6
|
refs/heads/master
| 2023-02-23T05:54:01.457884
| 2021-01-30T09:11:21
| 2021-01-30T09:11:21
| 334,356,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
# -*- coding: UTF-8 -*-
from threading import Timer
import time
import order
def run():
order123()
Timer(600, run).start()
run()
|
[
"21846582@qq.com"
] |
21846582@qq.com
|
a01c4d4d4bb27303a8935fd832804b35f6e302e2
|
f7cc8d3f04d34b7d7e64e1b54ba458e4b39bce49
|
/DevelopTools/git/2.20.1/package.py
|
56f03b01f6f2725c97f2752ad5eaaac801b285c4
|
[
"MIT"
] |
permissive
|
cashmerepipeline/CashmereRez
|
80a53af61ddb8506bb111cd16450538c3b405689
|
13a73931d715ffac27c337abcd6df97b5c47534b
|
refs/heads/master
| 2020-05-09T12:59:28.106229
| 2019-04-17T16:39:46
| 2019-04-17T16:39:46
| 181,132,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
# -*- coding: utf-8 -*-
name = 'git'
version = '2.20.1'
author = ['git']
variants = []
def commands():
import os
develop_path = os.environ["DEVELOP_TOOLS_PATH"]
env.PATH.prepend(os.path.join(develop_path, "git", "%s" % version).replace("/", os.sep))
env.PATH.prepend(os.path.join(develop_path, "git", "%s" % version, "cmd").replace("/", os.sep))
|
[
"yes7rose@sina.com"
] |
yes7rose@sina.com
|
7c673a8469e80b5cf611f4fa8123db2d2a9bf00c
|
88dda5e76cef286c7db3ae7e5d1a32d28f7815a3
|
/reviewboard/cmdline/tests/test_console.py
|
db92375b2bb3014fa8de5b575a90eab562d327a0
|
[
"MIT"
] |
permissive
|
reviewboard/reviewboard
|
f4d3bada08ba9d6ef53add2d1fdb82bd6cc63a1e
|
c3a991f1e9d7682239a1ab0e8661cee6da01d537
|
refs/heads/master
| 2023-08-31T09:03:14.170335
| 2023-08-30T08:22:43
| 2023-08-30T08:22:43
| 285,304
| 1,141
| 353
|
MIT
| 2023-06-07T16:51:02
| 2009-08-22T21:39:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,242
|
py
|
"""Unit tests for reviewboard.cmdline.utils.console."""
import io
import math
import shutil
import kgb
from django.core.management.color import supports_color
from reviewboard.cmdline.utils.console import Console
from reviewboard.testing.testcase import TestCase
class ConsoleTests(kgb.SpyAgency, TestCase):
"""Unit tests for reviewboard.cmdline.utils.console.Console."""
def test_with_non_utf8_streams(self):
"""Testing Console with non-utf-8 stdout/stderr streams"""
stdout_buffer = io.BytesIO()
stdout = io.TextIOWrapper(stdout_buffer, encoding='latin1')
stderr_buffer = io.BytesIO()
stderr = io.TextIOWrapper(stderr_buffer, encoding='latin1')
try:
console = Console(stdout=stdout,
stderr=stderr)
# This will output Unicode content to stdout, and should fail
# if there's an encoding issue.
console.print('\U0001f9f8')
# There's no wrapper for stderr, so write to it directly.
console.stderr.write('\U0001f534')
# Make sure we got the results we expected.
self.assertEqual(stdout_buffer.getvalue(),
b'\xf0\x9f\xa7\xb8\n')
self.assertEqual(stderr_buffer.getvalue(),
b'\xf0\x9f\x94\xb4')
finally:
stdout.close()
stderr.close()
def test_print_with_styled_prefix(self):
"""Testing Console.print with styled indent prefix"""
self.spy_on(supports_color, op=kgb.SpyOpReturn(True))
self.spy_on(shutil.get_terminal_size, op=kgb.SpyOpReturn((50, 40)))
stdout_buffer = io.BytesIO()
stdout = io.TextIOWrapper(stdout_buffer)
try:
console = Console(stdout=stdout,
allow_color=True)
prefix_len = len('Warning: ')
avail_len = console.term_width - prefix_len - 1
console.warning('x ' * math.ceil(3 * (avail_len / 2)))
self.assertEqual(
stdout_buffer.getvalue(),
b'\n'
b'\x1b[33;1mWarning: \x1b[0m'
b'x x x x x x x x x x x x x x x x x x x x x\n'
b' x x x x x x x x x x x x x x x x x x x x x\n'
b' x x x x x x x x x x x x x x x x x x\n'
b'\n')
finally:
stdout.close()
def test_print_with_prefix_and_multiple_paragraphs(self):
"""Testing Console.print with indent prefix and multiple paragraphs"""
self.spy_on(shutil.get_terminal_size, op=kgb.SpyOpReturn((50, 40)))
stdout_buffer = io.BytesIO()
stdout = io.TextIOWrapper(stdout_buffer)
try:
console = Console(stdout=stdout)
console.warning('line 1\n'
'line 2\n'
'line 3\n')
self.assertEqual(
stdout_buffer.getvalue(),
b'\n'
b'Warning: line 1\n'
b'\n'
b' line 2\n'
b'\n'
b' line 3\n'
b'\n')
finally:
stdout.close()
|
[
"christian@beanbaginc.com"
] |
christian@beanbaginc.com
|
fb489992b890262faed58a161cf7c86e66686337
|
da46ce60735b9579f95a805344ccd296cb994c6f
|
/RectXor/retxor.py
|
09c144ee64c5de7722a4aa989f26557c9c5fa2da
|
[] |
no_license
|
fletchto99/ieeextreme-11.0
|
47f82a108cd747638721f78711737f74d1ca0b45
|
85feabb3ccb88bff865a17ac04dd707e1088055f
|
refs/heads/master
| 2021-07-13T06:55:44.483309
| 2017-10-16T14:10:35
| 2017-10-16T14:10:35
| 107,136,306
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,537
|
py
|
import fileinput
import math
def f(a):
res = [a,1,a+1,0];
return res[a%4];
def getXor(a, b):
return f(b)^f(a-1)
stdin = fileinput.input()
next(stdin)
for l, h, n, d1, d2 in ((int(x) for x in line.split()) for line in stdin):
#start and end X values must be computed after since we don't know where either is in the row
mb_start_x = (d1-n) % l
mb_end_x = (d2-n) % l
#Find the start and end x's
start_x = min(mb_start_x, mb_end_x)
end_x = max(mb_start_x, mb_end_x)
#find where the inner rectangle rows start
start_y = (d1-n) // l
end_y = (d2-n) // l
#compute the length of the grid
size = l * h
#compute all rows before & after the middle rectangle
a = n
b = n+start_y*l-1
c = n + ((end_y+1) * l)
d = n + size -1
xor = 0
#only xor the top partition if inner rectangle isn't in the first row
if (start_y != 0):
xor = xor^getXor(a,b)
for i in range(start_y, end_y+1):
#some maths to compute start & end of xor
start_left = n + (i*l);
end_left = start_left + start_x - 1;
start_right = start_left + end_x + 1;
end_right = start_left + l - 1;
#xor left partition, row by row
if (start_x != 0):
xor = xor ^ getXor(start_left, end_left)
#xor right partition, row by row
if (end_x != l):
xor = xor ^ getXor(start_right, end_right)
#xor bottom partition
if (end_y != size / l):
xor = xor ^ getXor(c,d);
print(xor)
|
[
"fletchto99@gmail.com"
] |
fletchto99@gmail.com
|
81a224de45a9bc182b263b6145f5718450487f80
|
cec342e42b25c28f86cf84d3ecc346b40dcff0ea
|
/0x03-python-data_structures/4-new_in_list.py
|
baf9166f7f596e6cc4f74f5e0e39f76b647ac72a
|
[] |
no_license
|
kimha1030/holbertonschool-higher_level_programming
|
91ed5e9d059697b493095d50abc1b7c52ef3208f
|
d338b38b6bca42ffa6edec9c60d8039bebe53452
|
refs/heads/master
| 2022-12-21T03:13:03.738021
| 2020-09-25T05:16:17
| 2020-09-25T05:16:17
| 259,394,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
#!/usr/bin/python3
def new_in_list(my_list, idx, element):
new_list = my_list.copy()
if idx < 0:
return (new_list)
elif idx >= len(my_list):
return (new_list)
else:
new_list[idx] = element
return new_list
|
[
"1478@holbertonschool.com"
] |
1478@holbertonschool.com
|
da9e10acb00557b0fc415527f75a9fe206f6a20c
|
40ddd841c9d14107aa48f7bd8326e9228910d5ab
|
/main.py
|
5e29e2d1127ee244b2371233689db08cb6373e4c
|
[] |
no_license
|
Raiserfier/2d-Face-Morphing
|
956a5876d8717ca4be9b54764c7b80fb8b8bc864
|
c2220d763eeecd13aaa127de854edfa97c43233f
|
refs/heads/master
| 2020-11-29T22:07:43.553997
| 2019-12-26T09:03:06
| 2019-12-26T09:03:06
| 230,226,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,482
|
py
|
from face_detection import face_detection
from Delaunay import delaunay, delaunay2
import cv2
import numpy as np
# 通过返回的三角点建立索引
def index_find(point, landmarks):
for index in range(len(landmarks)):
if point == landmarks[index]:
return index
# 根据原本三角形、结果三角形和差值三角形位置计算像素变换结果
def applyAffineTransform(src, srcTri, dstTri, size):
# 获得仿射变换矩阵
warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))
# 应用变换矩阵获得变换后的图像 INTER_LINEAR(线性插值)BORDER_REFLECT_101(边界最外层像素不会重复[ the outter-most pixels
# (a or h) are not repeated],后续融合效果好)
dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return dst
def morphTriangle(img1, img2, img, t1, t2, t, alpha):
# 获得最小外接矩形
# 返回四个值,分别是x,y,w,h
# x,y是矩阵左上点的坐标,w,h是矩阵的宽和高
r1 = cv2.boundingRect(np.float32([t1]))
r2 = cv2.boundingRect(np.float32([t2]))
r = cv2.boundingRect(np.float32([t]))
# 以外接矩形左上角为原点重设坐标
t1Rect = []
t2Rect = []
tRect = []
for i in range(0, 3):
t1Rect.append(((t1[i][0] - r1[0]), (t1[i][1] - r1[1])))
t2Rect.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))
tRect.append(((t[i][0] - r[0]), (t[i][1] - r[1])))
# 创建一个mask
mask = np.zeros((r[3], r[2], 3), dtype='float32')
cv2.fillConvexPoly(mask, np.int32(tRect), (1.0, 1.0, 1.0), 16, 0)
# 获得原本三角形外接矩形区域的像素值
img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
img2Rect = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]]
size = (r[2], r[3])
warpImage1 = applyAffineTransform(img1Rect, t1Rect, tRect, size)
warpImage2 = applyAffineTransform(img2Rect, t2Rect, tRect, size)
# 图片权重合成
imgRect = (1.0 - alpha) * warpImage1 + alpha * warpImage2
# 通过mask将矩形转化为三角形,将计算结果放入图像数组
img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (1 - mask) + imgRect * mask
if __name__ == '__main__':
# ted_cruz/hillary_clinton/Arnie/Bush/donald_trump
# paths = "paths.txt"
# fp = open(paths, 'r')
# images = []
# for each in fp.readlines():
# images.append(each.rstrip('\r\n'))
ori_img_path = "images/ted_cruz.jpg"
fin_img_path = "images/hillary_clinton.jpg"
show_Tri = 0 # 是否显示三角
frames = 40 # 帧数
ori_img = cv2.imread(ori_img_path)
fin_img = cv2.imread(fin_img_path)
if ori_img is None or fin_img is None:
print("Read img fail!")
# 初始图片和结果图片人脸检测,获得特征点数组
ori_landmarks = face_detection(ori_img)
fin_landmarks = face_detection(fin_img)
# 获得初始图片三角化的三角形点位
ori_delaunay = delaunay(ori_img, ori_landmarks, 1)
# 将获得的三角形点位转换为索引
# 后续变化该索引不变
tri_index = []
for t in ori_delaunay:
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
add = [(index_find(pt1, ori_landmarks), index_find(pt2, ori_landmarks), index_find(pt3, ori_landmarks))]
tri_index.extend(add)
fourcc = cv2.VideoWriter_fourcc(*"DIVX")
fps = 20
videoWriter = cv2.VideoWriter("result/video.avi", fourcc, fps, (600, 800))
# 逐帧计算
for k in range(0, frames + 1):
alpha = k / frames
landmarks_Middle = []
# 人脸特征点插值
for i in range(len(ori_landmarks)):
x = int((1 - alpha) * ori_landmarks[i][0] + alpha * fin_landmarks[i][0])
y = int((1 - alpha) * ori_landmarks[i][1] + alpha * fin_landmarks[i][1])
landmarks_Middle.append((x, y))
# 放中间结果
imgMorph = np.zeros(ori_img.shape, dtype=ori_img.dtype)
# 逐个三角形计算扭曲
for j in range(len(tri_index)):
# 获得点位索引
x = tri_index[j][0]
y = tri_index[j][1]
z = tri_index[j][2]
# 根据索引获得点坐标
t1 = [ori_landmarks[x], ori_landmarks[y], ori_landmarks[z]]
t2 = [fin_landmarks[x], fin_landmarks[y], fin_landmarks[z]]
t = [landmarks_Middle[x], landmarks_Middle[y], landmarks_Middle[z]]
# 对一个三角形做morphing
morphTriangle(ori_img, fin_img, imgMorph, t1, t2, t, alpha)
# 结果
if show_Tri == 1:
imgMorph_delaunay = delaunay2(imgMorph, tri_index, landmarks_Middle, (255, 255, 255))
cv2.imshow("Morphed Face", np.uint8(imgMorph_delaunay))
cv2.imwrite("result/frames/"+str(k)+".jpg",imgMorph_delaunay)
videoWriter.write(imgMorph_delaunay)
cv2.waitKey(50)
else:
cv2.imshow("Morphed Face", np.uint8(imgMorph))
cv2.imwrite("result/frames/" + str(k) + ".jpg", imgMorph)
videoWriter.write(imgMorph)
cv2.waitKey(50)
videoWriter.release()
|
[
"noreply@github.com"
] |
Raiserfier.noreply@github.com
|
3de507910aa9123b0c70cf6f165d6c1fb0cfd95e
|
35b1f561b6214400d3ed1482a52040283018834c
|
/actionscript.py
|
37ad9c2538a081d63107503e662d1cdd90e3415e
|
[] |
no_license
|
ih8ih8sn0w/lmdump-py
|
d5315b8084e38a3f1edfe6d62fb418e264f9121d
|
3852c3b807c7acb87809125c8c8363db5745f066
|
refs/heads/master
| 2021-06-06T08:20:02.145422
| 2020-02-15T03:17:30
| 2020-02-15T03:17:30
| 131,685,885
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,876
|
py
|
import msvcrt as m # temp
opcodesToName = {0x00:"ActionEnd", 0x04:"NextFrame", 0x05:"PreviousFrame", 0x06:"Play", 0x07:"Stop", 0x08:"ToggleQuality", 0x09:"StopSounds", 0x0A:"Add", 0x0B:"Subtract", 0x0C:"Multiply", 0x0D:"Divide", 0x0E:"Equals", 0x0F:"Less", 0x10:"And", 0x11:"Or", 0x12:"Not", 0x13:"StrEquals", 0x14:"StrLength", 0x15:"StrExtract", 0x17:"Pop", 0x18:"ToInt", 0x1C:"GetVar", 0x1D:"SetVar", 0x20:"SetTarget2", 0x21:"StrAdd", 0x22:"GetProperty", 0x24:"CloneSprite", 0x25:"RemoveSprite", 0x26:"Trace", 0x27:"StartDrag", 0x28:"EndDrag", 0x29:"StringLess", 0x2A:"Throw", 0x2B:"CastOp", 0x2C:"Implements", 0x30:"RandNum", 0x31:"MbStrLen", 0x32:"CharToAscii", 0x33:"AsciiToChar", 0x34:"GetTime", 0x35:"MBStringExtract", 0x36:"MBCharToAscii", 0x37:"MBAsciiToChar", 0x3A:"Delete", 0x3B:"Delete2", 0x3C:"DefineLocal", 0x3D:"CallFunc", 0x3E:"Return", 0x3F:"Mod", 0x40:"NewObject", 0x41:"DefineLocal2", 0x42:"InitArray", 0x43:"InitObject", 0x44:"TypeOf", 0x45:"TargetPath", 0x46:"Enumerate", 0x47:"Add2", 0x48:"Less2", 0x49:"Equals2", 0x4A:"ToNum", 0x4B:"ToStr", 0x4C:"PushDuplicate", 0x4D:"StackSwap", 0x4E:"GetMember", 0x4F:"SetMember", 0x50:"Increment", 0x51:"Decrement", 0x52:"CallMethod", 0x53:"NewMethod", 0x54:"InstanceOf", 0x55:"Enumerate2", 0x60:"BitwiseAnd", 0x61:"BitwiseOr", 0x62:"BitwiseXor", 0x63:"BitwiseLeftShift", 0x64:"SignedBitwiseRightShift", 0x65:"UnsignedBitwiseRightShift", 0x66:"StrictEquals", 0x67:"TypedGreaterThan", 0x68:"StringGreaterThan", 0x69:"Extends", 0x81:"GoToFrame", 0x83:"GetURL", 0x87:"StoreRegister", 0x88:"ConstantPool", 0x8A:"WaitForFrame", 0x8B:"SetTarget", 0x8C:"GoToLabel", 0x8D:"WaitForFrame2", 0x8E:"DefineFunction2(Block)", 0x94:"With", 0x96:"Push", 0x99:"Branch", 0x9A:"GetURL2", 0x9B:"DefineFunction(Block)", 0x9D:"If", 0x9E:"Call", 0x9F:"GoToFrame2"}
opcodesWithoutExtData = {"ActionEnd":0x00, "Play":0x06, "Stop":0x07, "Add":0x0A, "Subtract":0x0B, "Not":0x12, "Pop":0x17, "GetVar":0x1C, "SetVar":0x1D, "Delete":0x3A, "DefineLocal":0x3C,"CallFunc":0x3D, "Return":0x3E, "NewObject":0x40, "DefineLocal2":0x41, "InitArray":0x42, "Add2":0x47, "Less2":0x48, "Equals2":0x49, "GetMember":0x4E, "SetMember":0x4F, "Increment":0x50, "CallMethod":0x52, "NewMethod":0x53, "TypedGreaterThan":0x67} # List severely incomplete. Things will definitely not decompile correctly in its current state
opcodesWithOneItem = {"GoToFrame":"Constant", "StoreRegister":"Register", "GoToFrame2":"Constant", "GoToLabel":"Str", "If":"unk_2", "Branch":"sint"} # these bastards I guess only have the frame number
opcodesWithAssumedDatatypes = {"DefineFunction(Block)":["Str", "param", "length"], "DefineFunction2(Block)":["Str", "unk_2", "length"]}
nameToOpcodes = {v: k for k, v in opcodesToName.items()}
datatypes = {0x00:"Str", 0x01:"Float", 0x02:"Null", 0x03:"Undefined", 0x04:"Register", 0x05:"Boolean", 0x06:"Double", 0x07:"Int", 0x08:"Constant", 0x09:"Large Constant", "unk_1":"unk_1", "unk_2":"unk_2", "param":"param", "length":"length"} # Note: datatypes that don't start with a hex value are just for usage in defining unflagged values
datatypeLengths = {"Str":2, "Float":4, "Null":0, "Undefined":0, "Register":1, "Boolean":1, "Double":8, "Int":4, "Constant":1, "Large Constant":2, "unk_1":1, "unk_2":2, "length":2, "param":2, "sint":2}
def test():
print("This is the first dictionary")
for k, v in opcodesToName.items():
print("0x",format(k, "0>2X"), " ", v, sep='')
print("\nThis is the second dictionary")
for k, v in nameToOpcodes.items():
print(k ," 0x",format(v, "0>2X"), sep='')
def parseFunc(numActions, data, symbols, endianess):
if endianess == "big":
dumb_endianess = "little"
else:
dumb_endianess = "big"
for x in range(numActions):
counter = 4
actionLength = int.from_bytes(data[:4], byteorder=endianess)
print("\nAction 0x", format(x, "0>2X"), ":", "# Length: ", actionLength, sep='')
print("----------------------------------------------------------")
disassembly = []
while counter <= actionLength + 3:
try:
opcode = check_opcode(data[counter])
except:
print(data[counter])
opcode = ("Unknown Opcode: 0x" + format(int.from_bytes(data[counter], byteorder=endianess), "0>4X"))
print("\n\nOpcode:", opcode, end='')
counter += 1
if opcode not in opcodesWithoutExtData:
if opcode in opcodesWithOneItem:
length = check_length(data[counter:counter + 2], dumb_endianess)
print(", Length: ", length, end='', sep='')
datatype = opcodesWithOneItem[opcode]
datatypeLength = datatypeLengths[datatype]
print(", Datatype: ", datatype, end='', sep='')
counter += 2
value = data[counter:counter + length]
print(", Value: ", value, end='', sep='')
counter += datatypeLength
elif opcode in opcodesWithAssumedDatatypes:
if opcode == "DefineFunction(Block)":
length = check_length(data[counter:counter + 2], dumb_endianess)
counter += 2
symbol = int.from_bytes(data[counter:counter + datatypeLengths["Str"]], byteorder=dumb_endianess)
counter += datatypeLengths["Str"]
params = int.from_bytes(data[counter:counter + datatypeLengths["param"]], byteorder=dumb_endianess)
counter += datatypeLengths["param"]
funcLength = int.from_bytes(data[counter:counter + datatypeLengths["length"]], byteorder=dumb_endianess)
counter += datatypeLengths["length"]
paramList = []
for x in range(params):
paramList.append("Param " + str(x) + ": " + str(data[counter:counter + datatypeLengths["Str"]]) + "\n")
counter += datatypeLengths["Str"]
# m.getch()
print(", NumParams: ", format(params, "0>2X"), ",Param Names: (", paramList, "), Function Length: ", funcLength, sep='')
elif opcode == "DefineFunction2(Block)":
length = check_length(data[counter:counter + 2], dumb_endianess)
counter += 2
symbol = int.from_bytes(data[counter:counter + datatypeLengths["Str"]], byteorder=dumb_endianess)
counter += datatypeLengths["Str"]
params = int.from_bytes(data[counter:counter + 2], byteorder=dumb_endianess)
counter += 2
unk_1 = int.from_bytes(data[counter:counter + 1], byteorder=dumb_endianess)
counter += 1
flags = int.from_bytes(data[counter:counter + datatypeLengths["Large Constant"]], byteorder=dumb_endianess)
counter += datatypeLengths["Large Constant"]
print(", Symbol: 0x", format(symbol, "0>4X"), ", NumParams: 0x", format(params, "0>4X"), ", Unk_1: 0x", format(unk_1, "0>2X"), ", Flags: 0x", format(flags, "0>4X"), sep='', end='')
if params > 0:
print(", Params: (", end='')
for x in range(params):
register = int.from_bytes(data[counter:counter + 1], byteorder=dumb_endianess)
counter += 1
reg_sym = int.from_bytes(data[counter:counter + 2], byteorder=dumb_endianess)
counter += 2
print("r", register, ":", format(reg_sym, "0>4X"), sep='', end='')
if x != params - 1:
print(', ', end='')
else:
print(')', end='')
func_len = int.from_bytes(data[counter:counter + 2], byteorder=dumb_endianess)
counter += 2
print(", func_len: 0x", format(func_len, "0>4X"), sep='', end='')
else:
datatypes = opcodesWithAssumedDatatypes[opcode]
length = check_length(data[counter:counter + 2], dumb_endianess)
counter += 2
stuff = []
for x in datatypes:
stuff.append(int.from_bytes(data[counter:counter + datatypeLengths[x]], byteorder=dumb_endianess))
counter += datatypeLengths[x]
print("AAAHHHHHH", stuff)
else:
length = check_length(data[counter:counter + 2], dumb_endianess)
print(", Length: ", length, end='', sep='')
counter += 2
while length != 0:
datatype, datatypeLength = check_datatype(data[counter])
print(", Datatype: ", datatype, end='', sep='')
counter += 1
length -= 1
value = int.from_bytes(data[counter:counter + datatypeLength], byteorder=dumb_endianess)
if datatype == "Str":
try:
value = symbols[value]
except:
pass
print(", Value: ", format(value, "0>4X"), end='', sep='')
counter += datatypeLength
length -= datatypeLength
if counter % 4 != 0:
counter += (4 - counter % 4)
data = data[counter:]
def check_opcode(data):
global opcodesToName
return opcodesToName[data]
def check_length(data, endianess):
return int.from_bytes(data, byteorder=endianess)
def check_datatype(data):
global datatypes
global datatypeLengths
return datatypes[data], datatypeLengths[datatypes[data]]
something_dumb = bytes.fromhex('0000000307000000000000249603000060001C960300000C004E96080007010000000061001C96030000620052170000000000249603000060001C960300001C004E96080007010000000061001C96030000620052170000000000088C02002C00060000000000078C020014000000000000000306000000000000088C02003400060000000000249603000060001C960300001C004E96080007010000000061001C960300006300521700000000001D960300001C001C96080007010000000061001C9603000062005217000000000000000029960D00070200000007010000000060001C9603000030004E9603000035004E9603000064005217000000000000000019960B0000140007010000000060001C96030000650052170000000000000009EA9B0600B8000000070096020008663E009B0A00B9000200BA00BB000100008E0900BC000000022A003E0096050007000000008701000117960400040108671C48129D0200220096020008681C960700040107000000004F9602000401508701000117990200D0FF008E0C00BD000100022A0001BE00500096020008681C96020004014E960700070100000008691C960200086A52129D02001B00960200086B1C9604000401086B1C96020004014E504F9902001200960200086B1C960700040107000000004F008E0C00BF000100022A0001BE001700960200086B1C96020004014E9605000701000000493E008E0C00C0000100022A0001BE001700960200086B1C96020004014E9605000700000000673E008E0900B4000000036A012F009606000402086C05004F9606000402086D05004F9606000402086E05004F960B00085B0701000000040108645217008E0C00B6000100032A0102C10015009606000401086F05014F9606000401087004024F009B0600C200000021009609000871070100000008721C96020008734E96020008744E96020008755217009B0600C300000021009609000876070100000008721C96020008734E96020008744E96020008755217009B0600C400000021009609000877070100000008721C96020008734E96020008744E96020008755217009B0600C500000021009609000878070100000008721C96020008734E96020008744E96020008755217009B0600C600000021009609000879070100000008721C96020008734E96020008744E96020008755217009B0600C70000002100960900087A070100000008721C96020008734E96020008744E96020008755217008E0C00C8000100022A0001C9002300960B000401087B070200000008721C96020008734E96020008744E96020008755217008E0C00CA000100022A0001C9002300960B000401087C070200000008721C96020008734E96020008744E96020008755217008E0C00CB000100022A0001C9002300960B000401087D070200000008721C96020008734E96020008744E96020008755217008E0C00CC000100022A0001C9002300960B000401087E070200000008721C96020008734E96020008744E96020008755217008E0F00CD00020004290103C90000820055009606000402087F05014F9606000402088004034F960400040108818E0900000000000329012C009604000402086F4E129D02001E00960400040208704E960700070100000008823D17960400040108813A17004F008E1800CE000500072A0001CF0005D00003D10004D20006D300BC00960200040187010002179607000401070000000067129D02003B00960400040104056712129D02000F009602000403870100021799020018009604000401040467129D02000A00960200040687010002179902005F009607000401070000000048129D02004E009609000401070000000004050B4812129D02001500960700070000000004030B870100021799020024009609000401070000000004040B48129D02001000960700070000000004060B870100021796020004023E008E0F00D4000200032A0002D50001D60030009607000401070A0000004812129D02000E0096040004020401473E990200100096040004020883479602000401473E008E0F00D7000200032A0002D50001D6005600960700040107640000004812129D02000E0096040004020401473E99020036009607000401070A0000004812129D0200140096040004020883479602000401473E990200100096040004020884479602000401473E008E0C0062000100022A0001D8005700960400040108854E963D000886070000000007FF00000007FF00000007FF000000070100000006273108AC1C5AECBF06273108AC1C5AECBF06273108AC1C5AECBF070800000008721C96020008874E9602000888534F008E0C0063000100022A0001D8005700960400040108854E963D0008860700000000071D000000071D000000071D000000070100000006273108AC1C5AEC3F06273108AC1C5AEC3F06273108AC1C5AEC3F070800000008721C96020008874E9602000888534F008E0C00D9000100022A0001D8005700960400040108854E963D0008860700000000071D000000071D000000071D000000070100000006273108AC1C5AEC3F06273108AC1C5AEC3F06273108AC1C5AEC3F070800000008721C96020008874E9602000888534F008E0C00DA000100022A0001D8005700960400040108854E963D000886070000000007FF00000007FF00000007FF000000070100000006273108AC1C5AECBF06273108AC1C5AECBF06273108AC1C5AECBF070800000008721C96020008874E9602000888534F0096020008891C960400088A05004F960700088B07000000003C960700088C07010000003C960700088D07020000003C960700088E07030000003C960700088F07040000003C960700089007050000003C960700089107060000003C960700089207070000003C960700089307080000003C960700089407090000003C9607000895070A0000003C9607000896070B0000003C9607000897070C0000003C960700089807530000003C9607000899075A0000003C960700089A07570000003C960700089B07410000003C960700089C07510000003C960700089D07450000003C960700089E07250000003C960700089F07270000003C96070008A007260000003C96070008A107280000003C960400086808A11C96020008A01C960200089F1C960200089E1C960200089D1C960200089C1C960200089B1C960200089A1C96020008991C96020008981C960500070A000000423C960400086B08971C960700070100000008A2403C96020008891C960400086C05004F96020008891C96040008A305004F96020008891C960400086E05004F96020008891C960400087F05004F96020008891C960700088007000000004F96020008891C960400086F05004F96020008891C960700087007000000004F96020008891C96070008A407000000004F96020008891C96070008A507000000004F96020008891C96070008A607000000004F96020008891C96070008A707000000004F96020008891C96070008A807000000004F96020008891C96070008A907000000004F96020008891C96070008AA07000000004F96020008891C96070008AB07000000004F96020008891C96070008AC07000000004F96020008891C96070008AD07000000004F96020008891C96070008AE07000000004F96020008891C96070008AF07000000004F96020008891C96040008B005004F96020008891C96040008B105004F96020008891C96040008B205004F96020008891C96040008B305004F96020008B41C960A000208B4070300000008721C96020008734E96020008744E96020008B5521796020008B61C960A000208B6070300000008721C96020008734E96020008744E96020008B5521796020008891C96070008B707080000004F000000000000008D96070008DB07000000003C96020008DB1C96020008891C96020008B74E48129D02003F0096040008DC08611C96040008DD08DB1C960500070100000047474E3C9609000803070100000008DC1C9602000864521796040008DB08DB1C501D990200A8FF96090008030701000000084A1C960200086452179609000803070100000008491C960200086452170000000000000000170796080007000000000061001C96030000C50052170000000000008D96070008DB07000000003C96020008DB1C96020008891C96020008B74E48129D02003F0096040008DC08611C96040008DD08DB1C960500070100000047474E3C9609000804070100000008DC1C9602000864521796040008DB08DB1C501D990200A8FF96090008040701000000084A1C960200086452179609000804070100000008491C960200086452170000000000000000170796080007000000000061001C96030000C7005217000000')
parseFunc(15, something_dumb, "a", "big")
|
[
"noreply@github.com"
] |
ih8ih8sn0w.noreply@github.com
|
c8fc584a12109f24ddf81fc102c0def132481d3e
|
a248e50edb7fb61192e2c341008585e0b37e4f01
|
/math/root/root.py
|
d4e887885e1fe1adf3a04e7c8bf0999518a94d2d
|
[] |
no_license
|
akipta/hobbyutil
|
48508351a86a8f3cebcac97ede8b17097bf408e3
|
1f9bffe7f3c2b3655177e5f8e1916c476344b9c8
|
refs/heads/master
| 2021-01-18T17:19:05.074492
| 2014-08-28T03:37:28
| 2014-08-28T03:37:28
| 39,736,637
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,361
|
py
|
'''
Root Finding Routines (should work with python 2 or 3)
The following functions find real roots of functions. Note you can
call them using e.g. mpmath numbers and find roots to arbitrary
precision. The functions QuadraticEquation, CubicEquation, and
QuarticEquation use functions from the math and cmath library, so they
can't be used with other floating point implementations.
Note that the fp keyword arguments let you perform these root-finding
calculations with any floating point type that can convert strings
like "1.5" to a floating point number. Python's float type is the
default.
The following calls may be abbreviated; see the actual function
definitions for details.
Bisection(f, x1, x2, tol=1.0e-9, switch=False)
Finds a root by bisection. Slow but reliable if the root is
bracketed in [x1, x2].
Brent(f, x1, x2, tol=1e-6, maxit=100)
Brent's method. The root must be in [x1, x2].
FindRoots(f, n, x1, x2, eps=epsilon, itmax=100)
This is a general-purpose root finding routine. It uses
SearchIntervalForRoots to divide the interval [x1, x2] into n
intervals and look for roots in each subinterval. If a subinterval has
a root, the RootFinder routine is used to find the root to precision
eps. If more than itmax iterations are done in any interval, an
exception is raised. Returns a tuple of the roots found.
Pound(x, adjust, eps=float(epsilon))
Utility function to reduce complex numbers with small real or
imaginary components to pure imaginary or pure real numbers,
respectively.
Ridders(f, a, b, tol=1.0e-9, itmax=100)
Finds a root via Ridder's method if the root is bracketed.
Converges quadratically with two function evaluations per
iteration.
RootFinder(x0, x2, f, eps=epsilon, itmax=100)
Finds a root with quadratic convergence that lies between x0 and
x2. x0 and x2 must bracket the root. The function whose root is
being found is f(x). The root is found when successive estimates
differ by less than eps. The routine will throw an exception if
the number of iterations exceeds itmax. The returned value is the
root. Based on a C algorithm by Jack Crenshaw.
NewtonRaphson(f, fd, x, tolerance=1e-9, maxit=200, show=False)
Quadratically-converging root-finding method; you need to supply the
function f, its derivative fd, and an initial guess x.
SearchIntervalForRoots(f, n, x1, x2)
Given a function f of one variable, divide the interval [x1, x2]
into n subintervals and determine if the function crosses the x
axis in each subinterval. Return a tuple of the intervals where
there is a zero crossing (i.e., there's at least one root in each
intervale in the tuple).
The following functions find real and complex roots and use the math
library, so are only for calculations with floats. If adjust is True,
any root where Im/Re < epsilon is converted to a real root. epsilon is a
global variable. Set adjust to False, to have the roots returned as
complex numbers.
QuadraticEquation(a, b, c, adjust=True)
Returns the two roots of a*x^2 + b*x + c = 0. If adjust is true,
any root where Im/Re < eps is converted to a real root. Set
adjust to zero to have all roots returned as complex numbers.
CubicEquation(a, b, c, d, adjust=True)
Returns the three roots of a*x^3 + b*x^2 + c*x + d = 0. If adjust
is true, any root where Im/Re < eps is converted to a real root.
Set adjust to zero to have all roots returned as complex numbers.
QuarticEquation(a, b, c, d, e, adjust=True)
Returns the four roots of a*x^4 + b*x^3 + c*x^2 + d*x + e = 0.
If adjust is true, any root where Im/Re < eps is converted to a
real root. Set adjust to zero to have all roots returned as
complex numbers.
'''
# Copyright (c) 2006, 2010 Don Peterson
# Contact: gmail.com@someonesdad1
#
#
from __future__ import division, print_function
import sys, math, cmath, unittest, numbers
class TooManyIterations(Exception): pass
# Ratio of imag/real to decide when something is a real root (or
# real/imag to decide when something is pure imaginary). Also used as
# the default tolerance for root finding. Note it's a string because
# it will be converted to a floating point type inside the function
# it's used in (some of the functions can allow other numerical types
# besides floating point).
epsilon = "2.5e-15"
def FindRoots(f, n, x1, x2, eps=epsilon, itmax=100, fp=float, args=[], kw={}):
'''This is a general-purpose root finding routine that returns a
tuple of the roots found of the function f on the interval
[x1, x2].
It uses SearchIntervalForRoots to divide the interval into n
intervals and look for roots in each subinterval. If a
subinterval has a root, the RootFinder routine is used to find the
root to precision eps. If more than itmax iterations are used in
any interval, an exception is raised.
Parameters
f Function to search for roots
n Number of subintervals
x1 Start of overall interval to search
x2 End of overall interval to search
eps Precision to find roots
itmax Maximum number of iterations
fp Floating point type to use for calculations
args Extra parameters for f()
kw Extra keyword arguments for f()
Example: Find the roots of sin(x)/x = 0 on the interval [1, 10]:
import math
for i in FindRoots(lambda x: math.sin(x)/x, 1000, 1, 10):
print(i)
which prints
3.14159265359
6.28318530718
9.42477796077
Note these are integer multiples of pi.
'''
if not f:
raise ValueError("f must be defined")
if not isinstance(n, numbers.Integral):
raise TypeError("n must be integer")
if x1 >= x2:
raise ValueError("Must have x1 < x2")
intervals = SearchIntervalForRoots(f, n, x1, x2, fp=fp, args=args, kw=kw)
if not intervals:
return tuple()
roots = []
for x1, x2 in intervals:
try:
x, numits = RootFinder(x1, x2, f, eps=eps, itmax=itmax,
args=args, kw=kw)
except TooManyIterations:
pass
else:
roots.append(x)
return tuple(roots)
def RootFinder(x0, x2, f, eps=epsilon, itmax=100, fp=float, args=[], kw={}):
'''Root lies between x0 and x2. f is the function to evaluate; it
takes one parameter and returns a number. eps is the precision to find
the root to and itmax is the maximum number of iterations allowed.
fp is the number type to use in the calculation. args is a
sequence of any extra arguments that need to be passed to f; kw is
a dictionary of keywords that will be passed to f.
Returns a tuple (x, numits) where
x is the root.
numits is the number of iterations taken.
The routine will throw an exception if it receives bad input
data or it doesn't converge.
----------------------------------------------------------------
A root finding routine. See "All Problems Are Simple" by Jack
Crenshaw, Embedded Systems Programming, May, 2002, pg 7-14,
jcrens@earthlink.com. Can be downloaded from
www.embedded.com/code.htm.
Originally translated from Crenshaw's C code and modified by Don
Peterson 20 May 2003.
The method is called "inverse parabolic interpolation" and will
converge rapidly as it's a 4th order algorithm. The routine works
by starting with x0, x2, and finding a third x1 by bisection. The
ordinates are gotten, then a horizontally-opening parabola is
fitted to the points. The abcissa to the parabola's root is
gotten, and the iteration is repeated.
The root value is returned.
'''
zero, half, one, two, eps = fp("0"), fp("0.5"), fp("1"), fp("2"), fp(eps)
assert x0 < x2 and eps > zero and itmax > 0
x1 = y0 = y1 = y2 = b = c = temp = y10 = y20 = y21 = xm = ym = zero
xmlast = x0
if args:
y0, y2 = ((f(x0, *args, **kw), f(x2, *args, **kw)) if kw else
(f(x0, *args), f(x2, *args)))
else:
y0, y2 = (f(x0, **kw), f(x2, **kw)) if kw else (f(x0), f(x2))
if y0 == zero:
return x0, 0
if y2 == zero:
return x2, 0
if y2 * y0 > zero:
raise ValueError("Root not bracketed: y0 = %f, y2 = %f\n"% (y0, y2))
for i in range(itmax):
x1 = half*(x2 + x0)
if args:
y1 = f(x1, *args, **kw) if kw else f(x1, *args)
else:
y1 = f(x1, **kw) if kw else f(x1)
if (y1 == zero) or (abs(x1 - x0) < eps):
return x1, i + 1
if y1*y0 > zero:
x0, x2, y0, y2 = x2, x0, y2, y0
y10, y21, y20 = y1 - y0, y2 - y1, y2 - y0
if y2*y20 < two*y1*y10:
x2, y2 = x1, y1
else:
b, c = (x1 - x0)/y10, (y10 - y21)/(y21 * y20)
xm = x0 - b*y0*(one - c*y1)
if args:
ym = f(xm, *args, **kw) if kw else f(xm, *args)
else:
ym = f(xm, **kw) if kw else f(xm)
if ((ym == zero) or (abs(xm - xmlast) < eps)):
return xm, i + 1
xmlast = xm
if ym*y0 < zero:
x2, y2 = xm, ym
else:
x0, y0, x2, y2 = xm, ym, x1, y1
raise TooManyIterations("No convergence in RootFinder()")
def NewtonRaphson(f, fd, x, tolerance=1e-9, maxit=200, show=False,
fp=float, args=[], kw={}):
'''Newton-Raphson algorithm for solving f(x) = 0.
f = the function (must be a function object)
fd = the function's derivative (must be a function object)
x = initial guess of the root's location
tolerance = number used to determine when to quit
maxit = the maximum number of iterations.
fp = type of numbers to calculate with
args = extra arguments for f
kw = keyword arguments for f
show = print intermediate values
The iteration is
xnew = x - f(x)/f'(x)
until
|dx|/(1+|x|) < tolerance
is achieved. Here, dx = f(x)/fd(x). This termination condition
is a compromise between |dx| < tolerance, if x is small and
|dx|/|x| < tolerance, if x is large.
Newton-Raphson converges quadratically near the root; however, its
downfalls are well-known: i) near zero derivatives can send it into
the next county; ii) ogive-shaped curves can make it oscillate and not
converge; iii) you need to have an expression for both the function and
its derivative.
Adapted from
http://www.phys.uu.nl/~haque/computing/WPark_recipes_in_python.html
'''
count, one = 0, fp("1.")
while True:
if args:
dx = f(x, *args, **kw)/fd(x) if kw else f(x, *args)/fd(x)
else:
dx = f(x, **kw)/fd(x) if kw else f(x)/fd(x)
if abs(dx) < tolerance * (one + abs(x)):
return x - dx
x = x - dx
count += 1
if count > maxit:
raise TooManyIterations("Too many iterations in NewtonRaphson()")
if show:
print("NewtonRaphson[%d]: x = %s" % (count, x))
def BracketRoots(f, x1, x2, maxit=100, fp=float, args=[], kw={}):
'''Given a function f and an initial interval [x1, x2], expand the
interval geometrically until a root is bracketed or the number of
iterations exceeds maxit. Return (x3, x4), where the interval
definitely brackets a root. If the maximum number of iterations
is exceeded, an exception is raised.
fp Floating point type to use
args Sequence of extra arguments to be passed to f
kw Dictionary of keywords that will be passed to f
Adapted from zbrac in chapter 9 of Numerical Recipes in C, page 352.
'''
assert f and x1 != x2
zero = fp("0")
if x1 > x2:
x1, x2 = x2, x1
if args:
f1, f2 = ((f(x1, *args, **kw), f(x2, *args, **kw)) if kw else
(f(x1, *args), f(x2, *args)))
else:
f1, f2 = (f(x1, **kw), f(x2, **kw)) if kw else (f(x1), f(x2))
factor, count = fp("1.6"), 0
while True:
if f1*f2 < zero:
return (x1, x2)
if abs(f1) < abs(f2):
x1 += factor*(x1 - x2)
if args:
f1 = f(x1, *args, **kw) if kw else f(x1, *args)
else:
f1 = f(x1, **kw) if kw else f(x1)
else:
x2 += factor*(x2 - x1)
if args:
f2 = f(x2, *args, **kw) if kw else f(x2, *args)
else:
f2 = f(x2, **kw) if kw else f(x2)
count += 1
if count > maxit:
raise TooManyIterations("No convergence in BracketRoots()")
def SearchIntervalForRoots(f, n, x1, x2, fp=float, args=[], kw={}):
'''Given a function f of one variable, divide the interval [x1,
x2] into n subintervals and determine if the function crosses the
x axis in each subinterval. Return a tuple of the intervals where
there is a zero crossing. fp is the floating point type to use.
args is a sequency of any extra parameters needed by f; kw is a
dictionary of any keyword parameters needed by f.
Idea from Numerical Recipes in C, zbrak, chapter 9, page 352.
'''
assert f and n > 0 and x1 < x2
if args:
y0 = f(x1, *args, **kw) if kw else f(x1, *args)
else:
y0 = f(x1, **kw) if kw else f(x1)
x0, delta, intervals = x1, (x2 - x1)/(n + fp("1.")), []
for i in range(1, n + 1):
x = x1 + i*delta
if args:
y = f(x, *args, **kw) if kw else f(x, *args)
else:
y = f(x, **kw) if kw else f(x)
if y0*y < 0:
intervals.append((x0, x))
x0, y0 = x, y
return tuple(intervals)
# For the following Bisection and Ridders methods, note these
# algorithms are in scipy DLLs, so they are probably implemented in
# C/C++ and will be faster.
def Bisection(f, x1, x2, tol=1.0e-9, switch=False):
'''Returns (root, num_it) (the root and number of iterations) by
finding a root of f(x) = 0 by bisection. The root must be
bracketed in [x1,x2]. The iteration is done when the root is
found to less than the indicated relative tolerance tol.
If switch is true, an exception will be raised if the function
appears to be increasing during bisection. Be careful with this,
as the polynomial test case converges just fine with bisection,
but will cause an exception if switch is True.
If the root is bracketed, bisection is guaranteed to converge,
either on some root in the interval or a singularity within the
interval. It's also conceptually simple to understand: draw a
line between the two bracketing points and look at the midpoint.
Choose the new interval containing the midpoint and the other
point that evaluates to the opposite sign. Repeat until you find
the root to the required accuracy. Each iteration adds a
significant digit to the answer.
The number of iterations and function evaluations will be
log2(abs(x2 - x1)/tol).
Adapted slightly from the book "Numerical Methods in Engineering
with Python" by Jaan Kiusalaas, 2nd ed. You can get the book's
algorithms from http://www.cambridge.org/us/download_file/202203/.
'''
f1, f2 = f(x1), f(x2)
if not f1:
return x1, 0
if not f2:
return x2, 0
if f1*f2 > 0.0:
raise ValueError("Root is not bracketed")
# Get the number of iterations we'll need
num_iterations = int(math.ceil(math.log(abs(x2 - x1)/tol)/math.log(2)))
for i in range(num_iterations):
x3 = 0.5*(x1 + x2) # Abscissa of interval midpoint
f3 = f(x3) # Ordinate of interval midpoint
if f3 == 0.0:
return x3, i + 1
if switch and abs(f3) > abs(f1) and abs(f3) > abs(f2):
msg = "f(x) increasing on interval bisection (i.e., a singularity)"
raise ValueError(msg)
# Choose which half-interval to use based on which one continues to
# bracket the root.
if f2*f3 < 0.0:
x1, f1 = x3, f3 # Right half-interval contains the root
else:
x2, f2 = x3, f3 # Left half-interval contains the root
return (x1 + x2)/2.0, num_iterations
def Ridders(f, a, b, tol=1.0e-9, itmax=100):
'''Returns (root, num_it) (root and the number of iterations)
using Ridders' method to find a root of f(x) = 0 to the specified
relative tolerance tol. The root must be bracketed in [a,b]. If
the number of iterations exceeds itmax, an exception will be
raised.
Wikipedia states: Ridders' method is a root-finding algorithm
based on the false position method and the use of an exponential
function to successively approximate a root of a function f.
Ridders' method is simpler than Brent's method but Press et al.
(1988) claim that it usually performs about as well. It converges
quadratically, which implies that the number of additional
significant digits doubles at each step; but the function has to
be evaluated twice for each step so the order of the method is
2**(1/2). The method is due to Ridders (1979).
Adapted slightly from the book "Numerical Methods in Engineering
with Python" by Jaan Kiusalaas, 2nd ed. You can get the book's
algorithms from http://www.cambridge.org/us/download_file/202203/.
'''
fa, fb = f(a), f(b)
if fa == 0.0:
return a, 0
if fb == 0.0:
return b, 0
if fa*fb > 0.0:
raise ValueError("Root is not bracketed")
for i in range(itmax):
# Compute the improved root x from Ridder's formula
c = 0.5*(a + b)
fc = f(c)
s = math.sqrt(fc**2 - fa*fb)
if s == 0.0:
if not fc:
return c, i + 1
raise ValueError("No root")
dx = (c - a)*fc/s
if (fa - fb) < 0.0:
dx = -dx
x = c + dx
fx = f(x)
# Test for convergence
if i > 0 and abs(x - x_old) < tol*max(abs(x), 1.0):
return x, i + 1
x_old = x
# Re-bracket the root as tightly as possible
if fc*fx > 0.0:
if fa*fx < 0.0:
b, fb = x, fx
else:
a, fa = x, fx
else:
a, b, fa, fb = c, x, fc, fx
raise TooManyIterations("Too many iterations in Ridders()")
def Pound(x, adjust, eps=float(epsilon)):
'''Turn x into a real if the imaginary part is small enough
relative to the real part and adjust is True. The analogous thing
is done for a nearly pure imaginary number.
This function's name was orginally IfReal, but I enlarged its
abilities to handle complex numbers whose direction was nearly
parallel to either the real or imaginary axis. The name comes
from imagining the complex number is a nail which a light tap from
a hammer makes it lie parallel to the axis.
'''
# Handle the "pure" cases first.
if not x.real and not x.imag:
return 0
elif x.real and not x.imag:
return x.real
elif not x.real and x.imag:
return x.imag*1j
if adjust and x.real and abs(x.imag/x.real) < eps:
return x.real
elif adjust and x.imag and abs(x.real/x.imag) < eps:
return x.imag*1j
return x
def QuadraticEquation(a, b, c, adjust=True, force_real=False):
'''Return the two roots of a quadratic equation. Equation is
a*x^2 + b*x + c = 0. Note this works with float types only.
Set force_real to True to force the returned values to be real.
Here's a derivation of the method used. Multiply by 4*a and
complete the square to get
(2*a*x + b)**2 = (b**2 - 4*a*c)
x = (-b +/- sqrt(b**2 - 4*a*c))/(2*a) (1)
Next, multiply the equation by 1/x**2 to get
a + b*(1/x) + c*(1/x**2) = 0
Complete the square to find
1/x = (-b -/+ sqrt(b**2 - 4*a*c))/(2*c)
or
x = 2*c/(-b -/+ sqrt(b**2 - 4*a*c)) (2)
Equations 1 or 2 may provide more accuracy for a particular root.
Note there can be loss of precision in the discriminant when a*c
is small compared to b**2. This happens when the roots vary
greatly in absolute magnitude. Suppose they are x1 and x2; then
(x - x1)*(x - x2) = x**2 - (x1 + x2)*x + x1*x2 = 0. Here,
a = 1
b = -(x1 + x2)
c = x1*x2
Suppose x1 = 1000 and x2 = 0.001. Then b = -1000.001 and c = 1.
The square root of the discriminant is 999.999 and the subtraction
b - sqrt(D) results in 0.0001, with a loss of around 6 significant
figures.
The algorithm is to use two equations depending on the sign of b
(D = b**2 - 4*a*c):
b >= 0:
x1 = -b - sqrt(D))/(2*a) and x2 = 2*c/(-b - sqrt(D))
b < 0:
x1 = 2*c/(-b + sqrt(D)) and x2 = -b + sqrt(D))/(2*a)
'''
if not a:
raise ValueError("a cannot be zero")
if isinstance(b, numbers.Complex):
p = b/a
q = c/a
d = cmath.sqrt(p*p/4 - q)
if force_real:
return tuple([i.real for i in (-p/2 + d, -p/2 - d)])
return Pound(-p/2 + d, adjust), Pound(-p/2 - d, adjust)
else:
# More stable numerical method
D = cmath.sqrt(b*b - 4*a*c)
if b >= 0:
x1, x2 = (-b - D)/(2*a), 2*c/(-b - D)
else:
x1, x2 = 2*c/(-b + D), (-b + D)/(2*a)
if force_real:
return tuple([i.real for i in (x1, x2)])
return Pound(x1, adjust), Pound(x2, adjust)
# The following Mathematica commands were used to generate the code for
# the cubic and quartic routines.
#
# (* Cubic *)
# f = a*x^3 + b*x^2 + c*x + d;
# g = Solve[f == 0, x];
# FortranForm[g]
#
# (* Quartic *)
# f = a*x^4 + b*x^3 + c*x^2 + d*x + e;
# g = Solve[f == 0, x];
# FortranForm[g]
#
# The output was edited with the following changes:
#
# 1. Change (0,1) to 1j
# 2. Remove extra parentheses and comma at end of expression
# 3. Substitute (1/3.) for 0.3333333333333333
# 4. Substitute (2/3.) for 0.6666666666666666
# 5. Put backslashes on lines as appropriate
#
# After this manipulation, common terms were looked for and set up as
# single variables to avoid recalculation. This removed a lot of
# duplication.
#
# The special case where we're finding the third or fourth root of a
# real or complex number, we use De Moivre's theorem: Let z be a
# complex number written in polar form z = r*(cos(x) + i*sin(x)).
# Then
#
# z^(1/n) = r^(1/n)*(cos((x + 2*k*pi)/n) + i*sin((x + 2*k*pi)/n))
#
# where k varies from 0 to n-1 to give the n roots of z.
def CubicEquation(a, b, c, d, adjust=True, force_real=False):
'''Returns the roots of a cubic with complex coefficients: a*z**3
+ b*z**2 + c*z + d.
You can set force_real to True to make all the returned roots be
real (this causes the real part of the calculated roots to be
returned). This may be of use e.g. when solving cubic equations
of state like the Peng-Robinson or Redlich-Kwong equations. You
must exercise caution, as you might be throwing a true complex
root away.
If adjust is True and the roots have imaginary parts small enough
relative to the real part, they are converted to real numbers.
Example:
for i in CubicEquation(1, 1, 1, 1):
print(i)
prints
-1.0
(-6.93889390391e-17+1j)
(-6.93889390391e-17-1j)
However,
for i in CubicEquation(1, 1, 1, 1, adjust=True):
print(i)
prints
-1.0
1j
-1j
Note
for i in CubicEquation(1, 1, 1, 1, force_real=True):
print(i)
prints
-1.0
-6.93889390391e-17
-6.93889390391e-17
which is probably *not* what you want.
'''
if not a:
raise ValueError("a must not be zero")
if b == 0 and c == 0 and d == 0:
return 0, 0, 0
if b == 0 and c == 0:
# Find the three cube roots of (-d) using De Moivre's theorem.
r = abs(-d) # Magnitude
# Get the argument
if isinstance(-d, numbers.Complex):
x = math.atan2((-d).imag, (-d).real)
else:
x = 0
if (-d) < 0:
x = math.pi
n = 3
rn = r**(1./n)
def f(x, k):
return (rn*(math.cos((x + 2*k*math.pi)/n) +
1j*math.sin((x + 2*k*math.pi)/n)))
roots = f(x, 0), f(x, 1), f(x, 2)
if force_real:
return tuple(i.real for i in roots)
return tuple(Pound(i, adjust) for i in roots)
u = -2*b**3 + 9*a*b*c - 27*a**2*d
D = -b**2 + 3*a*c
v = cmath.sqrt(4*D**3 + u**2)
w = 2**(1./3)
y = (u + v)**(1./3)
st = 1j*math.sqrt(3)
z = -b/(3.*a)
t = 3*2**(2./3)*a*y
x = 6*w*a
x1 = z - w*D/(3.*a*y) + y/(3.*w*a)
x2 = z + ((1 + st)*D)/t - ((1 - st)*y)/x
x3 = z + ((1 - st)*D)/t - ((1 + st)*y)/x
if force_real:
return tuple(i.real for i in (x1, x2, x3))
return tuple(Pound(i, adjust) for i in (x1, x2, x3))
def QuarticEquation(a, b, c, d, e, adjust=True, force_real=False):
'''Returns the roots of a quartic with complex coefficients:
a*x**4 + b*x**3 + c*x**2 + d*x + e. Note this works with float
types only. Set force_real to make all the returned roots be
real.
You can set force_real to True to make all the returned roots be
real (this causes the real part of the calculated roots to be
returned). You must exercise caution, as you might be throwing a
true complex root away.
If adjust is True and a root has an imaginary part small enough
relative to the real part, it is converted to a real number.
Analogously, if the real parts are small enough relative to the
imaginary parts, the root is converted to a pure imaginary.
Example 1:
for i in QuarticEquation(1, 1, 1, 1, 1):
print(i)
prints
(-0.809016994375-0.587785252292j)
(-0.809016994375+0.587785252292j)
(0.309016994375-0.951056516295j)
(0.309016994375+0.951056516295j)
Example 2: (x-1)*(x-2)*(x-3)*(x-4) is a quartic polynomial with
a = 1, b = -10, c = 35, d = -50, and e = 24. Then
for i in QuarticEquation(1, -10, 35, -50, 24):
print(i)
prints
'''
if not a:
raise ValueError("a must not be zero")
if b == 0 and c == 0 and d == 0 and e == 0:
return 0, 0, 0, 0
if b == 0 and c == 0 and d == 0:
# Find the four fourth roots of (-e) using De Moivre's theorem.
r = abs(-e) # Magnitude
# Get the argument
if isinstance(-e, numbers.Complex):
x = math.atan2((-e).imag, (-e).real)
else:
x = 0
if (-e) < 0:
x = math.pi
n = 4
rn = r**(1./n)
def f(x, k):
return (rn*(math.cos((x + 2*k*math.pi)/n) +
1j*math.sin((x + 2*k*math.pi)/n)))
roots = f(x, 0), f(x, 1), f(x, 2), f(x, 3)
if force_real:
return tuple([i.real for i in roots])
return tuple([Pound(i, adjust) for i in roots])
cr3 = 2**(1./3)
p = -b/(4.*a)
q = (c**2 - 3*b*d + 12*a*e)
r = 2*c**3 - 9*b*c*d + 27*a*d**2 + 27*b**2*e - 72*a*c*e
s = cmath.sqrt(-4*q**3 + r**2)
t = (3*a*(r + s)**(1./3))
u = (r + s)**(1./3)/(3.*cr3*a)
v = (-(b**3./a**3) + (4.*b*c)/a**2 - (8.*d)/a)
w = cmath.sqrt(b**2/(4.*a**2) - (2*c)/(3.*a) + (cr3*q)/t + u)
x = b**2/(2.*a**2) - (4*c)/(3.*a) - (cr3*q)/t - u
y = cmath.sqrt(x - v/(4.*w))/2
z = cmath.sqrt(x + v/(4.*w))/2
roots = p - w/2. - y, p - w/2. + y, p + w/2. - z, p + w/2. + z
if force_real:
return tuple([i.real for i in roots])
return tuple(Pound(i, adjust) for i in roots)
def Brent(f, x1, x2, args=[], kw={}, tol=1e-6, maxit=100):
'''Return (r, numits) where r is the root of the function f that
is known to lie in the interval [x1, x2]. The root will be found
within the absolute tolerance tol. numits is the number of
iterations it took.
args is a sequence of extra arguments for f(); kw is a dictionary
of keyword arguments for f().
The is essentially the zbrent routine from "Numerical Recipes in
C", translated into python.
'''
def F(x):
if args:
return f(x, *args, kw=kw) if kw else f(x, *args)
else:
return f(x, kw=kw) if kw else f(x)
a, b, c = x1, x2, x2
i, EPS = 0, 3.0e-8
fa, fb = F(a), F(b)
fc = fb
if (fa > 0.0 and fb > 0.0) or (fa < 0.0 and fb < 0.0):
raise ValueError("Root must be bracketed")
while i < maxit:
i += 1
if (fb > 0.0 and fc > 0.0) or (fb < 0.0 and fc < 0.0):
c, fc = a, fa
e = d = b - a
if abs(fc) < abs(fb):
a = b
b = c
c = a
fa = fb
fb = fc
fc = fa
tol1 = 2.0*EPS*abs(b) + 0.5*tol
xm = 0.5*(c - b)
if abs(xm) <= tol1 or fb == 0.0:
return (b, i) # *** Found the root ***
if abs(e) >= tol1 and abs(fa) > abs(fb):
s = fb/fa
if a == c:
p, q = 2.0*xm*s, 1.0 - s
else:
q, r = fa/fc, fb/fc
p = s*(2.0*xm*q*(q - r) - (b - a)*(r - 1.0))
q = (q - 1.0)*(r - 1.0)*(s - 1.0)
if p > 0.0:
q = -q
p = abs(p)
min1 = 3.0*xm*q - abs(tol1*q)
min2 = abs(e*q)
if 2.0*p < (min1 if min1 < min2 else min2):
e = d
d = p/q
else:
d = xm
e = d
else:
d = xm # Bounds decreasing too slowly, use bisection.
e = d
a, fa = b, fb
if abs(d) > tol1: # Evaluate new trial root.
b += d
else:
b += tol1 if xm >= 0 else -tol1
fb = F(b)
raise ValueError("Maximum number of iterations exceeded")
|
[
"donp@localhost"
] |
donp@localhost
|
1398b9712eb558c5758dca5169e457768641001c
|
47d75780751f820ccf6a350caa94a5605be5ac80
|
/pySDC/tutorial/step_6/playground_parallelization.py
|
f3406ee56a8f6c2e8a73d839edc63677eaea0cc3
|
[
"BSD-2-Clause"
] |
permissive
|
ruthschoebel/pySDC
|
1e8484efc89eaaf9bb71f350790a4bc08b0f2127
|
de2cd523411276083355389d7e7993106cedf93d
|
refs/heads/master
| 2022-06-07T19:50:57.181584
| 2022-05-11T12:50:29
| 2022-05-11T12:50:29
| 146,288,756
| 0
| 1
|
BSD-2-Clause
| 2021-08-11T12:54:48
| 2018-08-27T11:34:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,467
|
py
|
import sys
from mpi4py import MPI
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
from pySDC.tutorial.step_6.A_run_non_MPI_controller import set_parameters_ml
if __name__ == "__main__":
"""
A simple test program to do MPI-parallel PFASST runs
"""
# set MPI communicator
comm = MPI.COMM_WORLD
# get parameters from Part A
description, controller_params, t0, Tend = set_parameters_ml()
# instantiate controllers
controller = controller_MPI(controller_params=controller_params, description=description, comm=comm)
# get initial values on finest level
P = controller.S.levels[0].prob
uinit = P.u_exact(t0)
# call main functions to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
filtered_stats = filter_stats(stats, type='niter')
# convert filtered statistics to list of iterations count, sorted by process
iter_counts = sort_stats(filtered_stats, sortby='time')
# combine statistics into list of statistics
iter_counts_list = comm.gather(iter_counts, root=0)
rank = comm.Get_rank()
size = comm.Get_size()
if rank == 0:
# we'd need to deal with variable file names here (for testing purpose only)
if len(sys.argv) == 2:
fname = sys.argv[1]
else:
fname = 'step_6_B_out.txt'
f = open(fname, 'a')
out = 'Working with %2i processes...' % size
f.write(out + '\n')
print(out)
# compute exact solutions and compare with both results
uex = P.u_exact(Tend)
err = abs(uex - uend)
out = 'Error vs. exact solution: %12.8e' % err
f.write(out + '\n')
print(out)
# build one list of statistics instead of list of lists, the sort by time
iter_counts_gather = [item for sublist in iter_counts_list for item in sublist]
iter_counts = sorted(iter_counts_gather, key=lambda tup: tup[0])
# compute and print statistics
for item in iter_counts:
out = 'Number of iterations for time %4.2f: %1i ' % (item[0], item[1])
f.write(out + '\n')
print(out)
f.write('\n')
print()
assert all([item[1] <= 8 for item in iter_counts]), "ERROR: weird iteration counts, got %s" % iter_counts
|
[
"r.speck@fz-juelich.de"
] |
r.speck@fz-juelich.de
|
67a3f3a04613459464a4ed8759f9cb6ff4a3791c
|
eed51baed10ce8f80e7925d94199f40ee70c8e9e
|
/users/helpers.py
|
7cbf139f333740f31aea320c80909f7021cccffc
|
[] |
no_license
|
fortniteclipz-com/ts_test
|
1b0a4e1d1ede6713a8c13c4a75bebec82270f9ae
|
f213b1ea20bbffc6b1fdbe6f4b87fa46be3c266d
|
refs/heads/master
| 2022-02-04T12:25:03.218671
| 2019-06-21T18:37:55
| 2019-06-21T18:37:55
| 145,085,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,627
|
py
|
import csv
import json
import requests
COLUMNS = [
'twitch_user_id',
'twitch_user_name',
'twitch_followers',
'twitch_videos_percentage_fortnite',
'instagram',
'email',
'twitter',
'twitch_views',
'twitch_videos_count_total',
'twitch_videos_count_fortnite',
'latest_stream_id',
'latest_stream_view_count',
'latest_stream_duration',
'latest_stream_date',
'contact_instagram',
'contact_email',
'contact_twitter',
]
HEADERS = {
'Content-Type': "application/json",
'Client-ID': "xrept5ig71a868gn7hgte55ky8nbsa",
}
def get_videos():
url = "https://api.twitch.tv/helix/videos"
videos = []
pagination = None
while len(videos) < 1000:
print("helpers | get_videos | loop")
params = {
'game_id': '33214',
'period': 'day',
'sort': 'views',
'language': 'en',
'type': 'archive',
'first': '100',
'after': pagination,
}
r = requests.get(
url,
headers=HEADERS,
params=params,
)
if r.status_code != 200:
break
body = r.json()
videos += body['data']
pagination = body['pagination']['cursor']
return videos
def get_channel(user):
url = f"https://api.twitch.tv/kraken/channels/{user['twitch_user_name']}"
params = {}
r = requests.get(
url,
headers=HEADERS,
params=params,
)
body = r.json()
channel = body
return channel
def get_channel_videos(user):
url = f"https://api.twitch.tv/kraken/channels/{user['twitch_user_name']}/videos"
params = {
'broadcast_type': 'archive',
'limit': '100',
}
r = requests.get(
url,
headers=HEADERS,
params=params,
)
body = r.json()
videos = body['videos']
return videos
def get_users():
with open('./users.csv', 'a+') as f:
f.seek(0, 0)
csv_reader = csv.DictReader(f)
users = list(csv_reader)
return users
def save_users(users):
users.sort(key=lambda u: (
-__make_int(u.get('twitch_followers', 0)),
-__make_int(u.get('twitch_videos_percentage_fortnite', 0)),
-__make_int(u.get('twitch_views', 0)),
-__make_int(u.get('latest_stream_view_count', 0)),
))
with open('./users.csv', 'w') as f:
csv_writer = csv.DictWriter(f, fieldnames=COLUMNS)
csv_writer.writeheader()
for u in users:
csv_writer.writerow(u)
def __make_int(s):
s = str(s).strip()
return int(s) if s else 0
|
[
"sachinahj@gmail.com"
] |
sachinahj@gmail.com
|
c7410b3413ce3080ad0184a2b5e94d3dab6b53f5
|
c489dd902955d805b6753f847c4fee38281c4c2f
|
/VB_Classes/mouse_and_match.py
|
a02720bf2e9eac663595a426c04ddefa2719ab52
|
[
"MIT"
] |
permissive
|
germal/OpenCVB
|
f4f866abdc1910ccac5804ff8581702f38d2e93b
|
312c452f3e981aa8238aa5f2d3d9928100bbeab6
|
refs/heads/master
| 2023-03-06T06:09:15.749020
| 2021-02-19T00:11:52
| 2021-02-19T00:11:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,092
|
py
|
'''
mouse_and_match.py [-i path | --input path: default ../data/]
Demonstrate using a mouse to interact with an image:
Read in the images in a directory one by one
Allow the user to select parts of an image with a mouse
When they let go of the mouse, it correlates (using matchTemplate) that patch with the image.
SPACE for next image
ESC to exit
'''
import numpy as np
import cv2 as cv
# built-in modules
import os
import sys
import glob
import argparse
from math import *
title_window = 'Mouse_and_match.py'
class App():
drag_start = None
sel = (0,0,0,0)
def onmouse(self, event, x, y, flags, param):
if event == cv.EVENT_LBUTTONDOWN:
self.drag_start = x, y
self.sel = (0,0,0,0)
elif event == cv.EVENT_LBUTTONUP:
if self.sel[2] > self.sel[0] and self.sel[3] > self.sel[1]:
patch = self.gray[self.sel[1]:self.sel[3], self.sel[0]:self.sel[2]]
result = cv.matchTemplate(self.gray, patch, cv.TM_CCOEFF_NORMED)
result = np.abs(result)**3
_val, result = cv.threshold(result, 0.01, 0, cv.THRESH_TOZERO)
result8 = cv.normalize(result, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)
cv.imshow("result", result8)
self.drag_start = None
elif self.drag_start:
#print flags
if flags & cv.EVENT_FLAG_LBUTTON:
minpos = min(self.drag_start[0], x), min(self.drag_start[1], y)
maxpos = max(self.drag_start[0], x), max(self.drag_start[1], y)
self.sel = (minpos[0], minpos[1], maxpos[0], maxpos[1])
img = cv.cvtColor(self.gray, cv.COLOR_GRAY2BGR)
cv.rectangle(img, (self.sel[0], self.sel[1]), (self.sel[2], self.sel[3]), (0,255,255), 1)
cv.imshow("gray", img)
else:
print("selection is complete")
self.drag_start = None
def run(self):
parser = argparse.ArgumentParser(description='Demonstrate mouse interaction with images')
parser.add_argument("-i","--input", default='../Data/', help="Input directory.")
args = parser.parse_args()
path = args.input
cv.namedWindow("gray",1)
cv.setMouseCallback("gray", self.onmouse)
'''Loop through all the images in the directory'''
for infile in glob.glob( os.path.join(path, '*.*') ):
ext = os.path.splitext(infile)[1][1:] #get the filename extension
if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm":
print(infile)
img = cv.imread(infile,1)
if img is None:
continue
self.sel = (0,0,0,0)
self.drag_start = None
self.gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow("gray", self.gray)
if cv.waitKey() == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
App().run()
cv.destroyAllWindows()
|
[
"bobdavies2000@gmail.com"
] |
bobdavies2000@gmail.com
|
e29f2e9ff22f4f8425a6cbfccab6305ed6546f73
|
3febaca6c3983d7accc2696d134423090d93bb3c
|
/mPyPl/mdict.py
|
baf45e3e91aa96e32a3f977abd78e1d00ffb7c19
|
[
"MIT"
] |
permissive
|
dbr176/mPyPl
|
f9160333943ff6b3b4e0137a72fde72520641e8d
|
c3df1d8b1535e51783be1b44222ce10dfbf11cf1
|
refs/heads/master
| 2020-04-18T20:21:25.991259
| 2018-11-28T21:36:28
| 2018-11-28T21:36:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,993
|
py
|
# mPyPl - Monadic Pipeline Library for Python
# http://github.com/shwars/mPyPl
import enum
from .utils.coreutils import getattritem
"""
Different evaluation strategies that can be used for `mdict` slots:
* `Value` - just the value stored as in normal dictionary
* `LazyMemoized` - a function is stored, which is evaluated upon calling the field with `x[...]`. Result is stored back into the field, so that in is not re-computed again.
* `OnDemand` - similar to `LazyMemoized`, but the result is not stored, and function is called each time to get the value. This is very useful for large video objects not to persist in memory.
"""
EvalStrategies = enum.Enum('EvalStrategies','Default Value LazyMemoized OnDemand')
def lazy_strategy(eval_strategy):
"""
Determines if a given eval strategy is lazy (`LazyMemoized` or `OnDemand`)
:param eval_strategy: input evaluation strategy
:return: True, if eval_strategy is lazy
"""
return eval_strategy==EvalStrategies.LazyMemoized or eval_strategy==EvalStrategies.OnDemand
class mdict(dict):
"""
Base Dictionary class that flows through the pipeline. It supports different evaluation strategies, including
lazy evaluation with or without memoization.
"""
def __init__(self,*args,**kwargs):
dict.__init__(self,*args,**kwargs)
self.eval_strategies = {}
def set(self,key,value,eval_strategy=None):
"""
Set the value of a slot and optionally its evaluation strategy
"""
dict.__setitem__(key,value)
self.set_eval_strategy(key,eval_strategy)
def set_eval_strategy(self,key,eval_strategy):
if eval_strategy is not None:
self.eval_strategies[key] = eval_strategy
def __getitem__(self, item):
res = dict.__getitem__(self,item)
if callable(res) and self.eval_strategies.get(item,EvalStrategies.Default) != EvalStrategies.Value:
r = res.__call__()
if self.eval_strategies.get(item,EvalStrategies.LazyMemoized) == EvalStrategies.LazyMemoized:
self[item] = r
return r
else:
return res
def get(self, item, default=None):
return dict.__getitem__(self,item,default)
def as_float(self,item):
return float(self[item])
def as_int(self,item):
return int(self[item])
def as_csv(self):
return ','.join(map(encode_csv,self.values()))
def as_csv_header(self):
return ','.join(map(encode_csv,self.keys()))
@staticmethod
def extract_from_object(x,fields):
"""
Create new `mdict`, extracting specified fields from a given object or dictionary
:param x: Object to use
:param fields: List of fields to extract. If a field contains `.`, complex extraction is performed.
:return: new `mdict` containing all specified fields
"""
m = mdict()
for z in fields:
m[z] = getattritem(x,z)
return m
|
[
"dmitri@soshnikov.com"
] |
dmitri@soshnikov.com
|
2fb41fcf6177477f8a6e3c023a76ca452fae2062
|
119e0fc8341c622b9f71e9de514c9674d6aa37df
|
/api/admin.py
|
71a9e32259a985348e8106a9d799ce7b51c54b8f
|
[] |
no_license
|
and2carvalho/apiDjangoMongo
|
44bcbde60f231614fd85d423c9ba866efc1d7d1b
|
3d82412646c99989920e8cdc2c11713110d88c63
|
refs/heads/master
| 2022-12-11T21:58:41.802798
| 2020-09-04T20:31:05
| 2020-09-04T20:31:05
| 292,935,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
from django.contrib import admin
from .models import Autor, Noticia
admin.site.register(Autor)
admin.site.register(Noticia)
|
[
"and2carvalho@gmail.com"
] |
and2carvalho@gmail.com
|
2189449b9cc2cd7a1976a6b753bfcb007e9cb6f6
|
3e61ffb39fcd79e7d7c1647ce89a6670a0416dc5
|
/2-GUI/2-window-layouts/3-grid/gui.py
|
b39a4497e4a5a161219318d5549a0e40a6a137af
|
[] |
no_license
|
adam-worley/com404
|
34b0904de32b8c361d29487f8b1e66482a15fb44
|
e01f2e38a680be0a694c886a9aa0c5fbdb10cd45
|
refs/heads/master
| 2020-07-31T13:27:18.201443
| 2019-12-10T13:01:19
| 2019-12-10T13:01:19
| 210,617,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,080
|
py
|
from tkinter import *
class Gui(Tk):
def __init__(self):
super().__init__()
# set window properties
self.title("Newsletter")
self.configure(bg="#ccc", padx=10, pady=10)
# add components
self.__add_outer_frame()
self.__add_heading_label()
self.__add_instruction_label()
self.__add_email_label()
self.__add_email_entry()
self.__add_subscribe_button()
def __add_outer_frame(self):
self.outer_frame = Frame()
self.outer_frame.grid(row=0, column=0)
self.outer_frame.configure( bg="#eee",
padx=10,
pady=10)
def __add_heading_label(self):
self.heading_label = Label(self.outer_frame)
self.heading_label.grid(row=0, column=0, columnspan=2)
self.heading_label.configure( bg="#eee",
font="Arial 18",
text="RECEIVE OUR NEWSLETTER")
def __add_instruction_label(self):
self.instruction_label = Label(self.outer_frame)
self.instruction_label.grid(row=1, column=0, columnspan=2, sticky=W)
self.instruction_label.configure( bg="#eee",
text="Please enter your email below to receiver our newsletter")
def __add_email_label(self):
self.email_label = Label(self.outer_frame)
self.email_label.grid(row=2, column=0, sticky=E)
self.email_label.configure( pady=20,
text="Email:")
def __add_email_entry(self):
self.email_entry = Entry(self.outer_frame)
self.email_entry.grid(row=2, column=1, sticky=W)
self.email_entry.configure(width=40)
def __add_subscribe_button(self):
self.subscribe_button = Button(self.outer_frame)
self.subscribe_button.grid(row=3, column=0, columnspan=2, sticky=N+E+S+W)
self.subscribe_button.configure(bg="#fcc",
text="Subscribe")
|
[
"50365867+adam-worley@users.noreply.github.com"
] |
50365867+adam-worley@users.noreply.github.com
|
693341e6d7d4f8346684324303360f2ab9c7e412
|
dfc0ca4068e56487939db9d1db97efb0c172055e
|
/app/core/tests/test_admin.py
|
0416862d0d3b23e736e859bbfc47c736382d527d
|
[] |
no_license
|
melisayu/recipe-app-api
|
a13753fb3c92db127f55e7d12ad1b7acfe62d8bd
|
2f021f3866060adc0c4e9f81229dd9d021cddc83
|
refs/heads/master
| 2022-12-11T03:12:14.159960
| 2020-09-13T06:24:30
| 2020-09-13T06:24:30
| 293,997,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
from django.test import Client, TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@yopmail.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@yopmail.com',
password='pass123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users area listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
# url reverse will create something like this: /admin/core/user/id
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
|
[
"melisayuliakristin@gmail.com"
] |
melisayuliakristin@gmail.com
|
d0eda5cb465ef3bf2462ef042e41d7d94f39edb2
|
543ec18f1e0ba4e950f4a26eaae550f8a89617f9
|
/ETC/11652.py
|
4e9a076cc6467e6c6b8c6197b1d9e5783d709aa6
|
[] |
no_license
|
YUNKWANGYOU/Quiz
|
287e1dcb3a9851fa7c42b6931c550b7895aea112
|
92723eedfe645ae66b130685fa5a7be7ba58c293
|
refs/heads/master
| 2023-08-25T17:26:29.207440
| 2021-10-23T13:17:56
| 2021-10-23T13:17:56
| 292,081,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
import sys
n = int(sys.stdin.readline())
dict = {}
for i in range(n) :
num = int(sys.stdin.readline())
if num not in dict.keys() :
dict[num] = 1
else :
dict[num] += 1
max_num = max(list(dict.values())) #개수중 최대 반환
a = []
for key, value in dict.items() : #아이템 쓱 한번 훑음
if value == max_num :
a.append(key)
print(min(a))
|
[
"1996yyk@gmail.com"
] |
1996yyk@gmail.com
|
5e0f11dfe838ed901fe64ae8ff13718f8e6d8536
|
c551be0fefd13de5f05593fcceb7f5938664dfc3
|
/Backend/config/urls.py
|
711ce5d0d0d64f67ff0d15bba262419cb64771d3
|
[] |
no_license
|
Take-Tetsu/rasp_training
|
7ef8d450a5e14b44f57a03ecc36e0bd3a9e3ecb8
|
d212e100476354a8661032a7ca480bee9f70cb9c
|
refs/heads/master
| 2021-09-23T16:09:11.390348
| 2020-09-05T02:10:44
| 2020-09-05T02:10:44
| 246,305,228
| 0
| 0
| null | 2021-09-22T19:42:08
| 2020-03-10T13:15:11
|
PLpgSQL
|
UTF-8
|
Python
| false
| false
| 894
|
py
|
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from api.views import UserList, LoginView
urlpatterns = [
path('admin/', admin.site.urls),
path('users/', UserList.as_view()),
path('login/', LoginView.as_view())
]
|
[
"takeyama.tohru@sharp.co.jp"
] |
takeyama.tohru@sharp.co.jp
|
96afdf579b1e2cde2163c2b4c6b7271e7f5f7fbe
|
e2032d5efbe5b3a771a6104def770c8a66e3cc95
|
/problem3.py
|
1473f400cf175637bd6d86376cf2738d145b636e
|
[] |
no_license
|
rmvook/HW7_CMSC389R
|
d8359cd8fa54a6095751cbca2975d378650a757d
|
8c8167ca37ca3dfa29c6aedda8db3f1ce66383b1
|
refs/heads/master
| 2020-03-07T15:13:31.654545
| 2018-03-31T20:15:04
| 2018-03-31T20:15:04
| 127,548,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import socket, hashlib, string, sys, os, time
host = "irc.csec.umiacs.umd.edu" # IP address or URL
port = 4444 # port
def main():
# use these to connect to the service
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
for i in range (4):
# receive some data
data = s.recv(1024)
print(data)
values = data.split("\n") #split on \n to isolate the line
print values
operation = values[1] #second part had equation
equation = operation.split(" ") #break up equation
print equation
num1 = int(equation[0]) #first number
operator = str(equation[1]) #operator + or -
num2 = int(equation[2]) #second number
if (operator == "+"): #seemed to only have two operations
answer = num1 + num2
elif (operator == "-"):
answer = num1 - num2
else:
answer = 1337 #error, gets turned into LEET
ans = str(answer)
hex_result = hashlib.sha256(ans).hexdigest()
s.send(bytes(hex_result + "\n")) #make sure to send as bytes
# close the connection
s.close()
if __name__ == "__main__":
main()
|
[
"rmvook@gmail.com"
] |
rmvook@gmail.com
|
041d8284f15d756503d3b538adeb3acc844a7f13
|
45169e140053eb97b4cc171a4c01126536c4cc4f
|
/05+EenvoudigeFuncties/Vis viva.py
|
585786c049b80d1e91793aea0c6895275eb263d0
|
[] |
no_license
|
EmielThomaes/5WWIPython
|
e2357a5df133fdc3b50f0c807ca1c8d783f01eb4
|
3178be9d516e350ff743a68c17d0315985215243
|
refs/heads/master
| 2020-07-21T05:11:23.923329
| 2020-03-06T11:00:21
| 2020-03-06T11:00:21
| 206,760,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
from math import pi, sqrt
# invoer
afstand = float(input('afstand van de satelliet tot het middelpunt van de aarde = '))
snelheid = float(input('snelheid van de satelliet ten opzichte van de aarde = '))
# berekening
geocentrische_constante = 398600.4418 * pow(10, 9)
lengte_grote_as = (geocentrische_constante * afstand) / ((2 * geocentrische_constante) - (afstand * (pow(snelheid, 2))))
lengte_periode = 2 * pi * sqrt(pow(lengte_grote_as, 3) / geocentrische_constante)
aantal_dagen = int(lengte_periode // (24 * 60 * 60))
rest_lengte_periode = lengte_periode % (24 * 60 * 60)
aantal_uren = int(rest_lengte_periode // (60 * 60))
rest_lengte_periode %= (60 * 60)
aantal_minuten = int(rest_lengte_periode // 60)
uitvoer_1 = 'grote as: {} meter'
uitvoer_2 = 'periode: {} seconden'
uitvoer_3 = 'periode: {} dagen, {} uren en {} minuten'
# uitvoer
print(uitvoer_1.format(lengte_grote_as))
print(uitvoer_2.format(lengte_periode))
print(uitvoer_3.format(aantal_dagen, aantal_uren, aantal_minuten))
|
[
"emiel.thomaes@sgsintpaulus.eu"
] |
emiel.thomaes@sgsintpaulus.eu
|
d2c8bad35b2e517726eb0b6570f47d80111ffddf
|
6cb82a6798716a99ea481637490a5ca64afa447a
|
/Visualization_all_pair_cosine.py
|
40098b11ad715f80b80959a8b335c9a22f8d0f20
|
[] |
no_license
|
BYOMKESHJHAIIITB/firstGit1
|
e453a55f87b5977d18517986c273ed705a2f446e
|
e2bf2e1393c037b1dce083a082fb66574d4feed1
|
refs/heads/master
| 2021-01-17T08:53:16.250318
| 2016-04-18T05:22:24
| 2016-04-18T05:22:24
| 33,564,283
| 0
| 1
| null | 2016-04-18T05:22:25
| 2015-04-07T19:40:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,647
|
py
|
import json
import matplotlib.pyplot as plt
import numpy as np
cluster_info_file = open("pepperfry_mean_vecotor_mar_03_with_source.json","r")
def getInfoMean(seed,cluster_info_file):
cluster_info_file_inside = open("pepperfry_mean_vecotor_mar_03_with_source.json","r")
final_position = []
urlh = []
title = []
source = []
can_angle = []
can_dist = []
mean_simi = []
for each in cluster_info_file_inside:
cluster_info = json.loads(each)
if (seed == cluster_info.keys()[0]):
# print "Match"
final_position = cluster_info[cluster_info.keys()[0]][3]
urlh = cluster_info[cluster_info.keys()[0]][0]
title = cluster_info[cluster_info.keys()[0]][1]
source = cluster_info[cluster_info.keys()[0]][2]
can_angle = cluster_info[cluster_info.keys()[0]][5]
can_dist = cluster_info[cluster_info.keys()[0]][6]
mean_simi = cluster_info[cluster_info.keys()[0]][4]
return final_position,urlh,title,source,can_angle,can_dist,mean_simi
def getInfo(seed,cluster_info_file):
cluster_info_file_inside = open("pepperfry_mean_vecotor_mar_03_with_source.json","r")
final_position = []
cosine_similarity = []
urlh = []
title = []
source = []
all_pair_dict = {}
can_angle = []
can_dist = []
mean_simi = []
for each in cluster_info_file_inside:
cluster_info = json.loads(each)
if (seed == cluster_info.keys()[0]):
# print "Match"
final_position = cluster_info[cluster_info.keys()[0]][5]
cosine_similarity = cluster_info[cluster_info.keys()[0]][1]
urlh = cluster_info[cluster_info.keys()[0]][0]
title = cluster_info[cluster_info.keys()[0]][2]
source = cluster_info[cluster_info.keys()[0]][3]
all_pair_dict = cluster_info[cluster_info.keys()[0]][6]
can_angle = cluster_info[cluster_info.keys()[0]][7]
can_dist = cluster_info[cluster_info.keys()[0]][8]
mean_simi = cluster_info[cluster_info.keys()[0]][9]
return final_position,cosine_similarity,urlh,title,source,all_pair_dict,can_angle,can_dist,mean_simi
r_out = open("input_for_r_pepperfry_mean_vector_simi_mar_04.json","w")
for idx,line in enumerate(cluster_info_file):
bundle = json.loads(line)
seed = bundle.keys()[0]
print idx
# final_position,cosine_similarity,urlh,title,source,all_pair_dict,angle,dist,mean_simi = getInfo(seed,cluster_info_file)
final_position,urlh,title,source,can_angle,can_dist,mean_simi = getInfoMean(seed,cluster_info_file)
r = {}
r["fp"] = ["seed"] + final_position
r["ms"] = [1] + mean_simi
r["title"] = ["Mean_vector"] + title
r["source"] = ["TP_clustering"] + source
r["mean_angle"] = [0] + can_angle
r["mean_dist"] = [0] + can_dist
r_out.write(json.dumps(r)+ "\n")
|
[
"byomcourses@gmail.com"
] |
byomcourses@gmail.com
|
891c6901af6494d438f31911941663f0620ea9e8
|
58ea52e6537ab0a82d106eac1955633d57301fc6
|
/authapp/urls.py
|
91a5777c3f5713429951d89dfb2c1aa5d5ef8fc5
|
[] |
no_license
|
VProgramMist/geekshop
|
8053d3f0dab483195dceca56063ad4906dad144b
|
63899385d2a187abaf06e453109d782396a82f54
|
refs/heads/master
| 2023-03-25T21:29:23.883706
| 2021-03-22T08:52:51
| 2021-03-22T08:52:51
| 339,402,876
| 0
| 0
| null | 2021-03-22T08:52:52
| 2021-02-16T13:12:18
|
CSS
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
from django.urls import path
from django.contrib.auth.decorators import login_required
import authapp.views as authapp
app_name = 'authapp'
urlpatterns = [
path('login/', authapp.UserLoginView.as_view(), name='login'),
path('register/', authapp.UserCreateView.as_view(), name='register'),
path('logout/', authapp.UserLogoutView.as_view(), name='logout'),
path('profile/', login_required(authapp.UserUpdateView.as_view()), name='profile'),
]
|
[
"vvvaleriyyy@gmail.com"
] |
vvvaleriyyy@gmail.com
|
256d5b571a6a4caff57032edceb77b9f507cec59
|
15e308ddb4d03767900a7ae52b3d01e009dd5bcb
|
/yapily/models/authorisation_embedded_request_response.py
|
ab22519a92cd841460d0c9779b4782521bcd9acb
|
[] |
no_license
|
MedatechUK/yapily
|
be6d01df2a4d8a5f4ce8c79dcb098bd297fb884c
|
e4440d67a504dc6a8ec9f314e1362d5f6cf424a4
|
refs/heads/main
| 2023-07-07T16:53:41.059013
| 2021-08-23T06:57:23
| 2021-08-23T06:57:23
| 381,364,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,947
|
py
|
# coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
The version of the OpenAPI document: 0.0.359
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from yapily.configuration import Configuration
class AuthorisationEmbeddedRequestResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'user_uuid': 'str',
'application_user_id': 'str',
'reference_id': 'str',
'institution_id': 'str',
'status': 'str',
'created_at': 'datetime',
'transaction_from': 'datetime',
'transaction_to': 'datetime',
'expires_at': 'datetime',
'time_to_expire_in_millis': 'int',
'time_to_expire': 'str',
'feature_scope': 'list[str]',
'authorisation_url': 'str',
'consent_token': 'str',
'qr_code_url': 'str',
'sca_methods': 'list[ScaMethod]',
'state': 'str',
'selected_sca_method': 'ScaMethod',
'authorized_at': 'datetime',
'institution_consent_id': 'str'
}
attribute_map = {
'id': 'id',
'user_uuid': 'userUuid',
'application_user_id': 'applicationUserId',
'reference_id': 'referenceId',
'institution_id': 'institutionId',
'status': 'status',
'created_at': 'createdAt',
'transaction_from': 'transactionFrom',
'transaction_to': 'transactionTo',
'expires_at': 'expiresAt',
'time_to_expire_in_millis': 'timeToExpireInMillis',
'time_to_expire': 'timeToExpire',
'feature_scope': 'featureScope',
'authorisation_url': 'authorisationUrl',
'consent_token': 'consentToken',
'qr_code_url': 'qrCodeUrl',
'sca_methods': 'scaMethods',
'state': 'state',
'selected_sca_method': 'selectedScaMethod',
'authorized_at': 'authorizedAt',
'institution_consent_id': 'institutionConsentId'
}
def __init__(self, id=None, user_uuid=None, application_user_id=None, reference_id=None, institution_id=None, status=None, created_at=None, transaction_from=None, transaction_to=None, expires_at=None, time_to_expire_in_millis=None, time_to_expire=None, feature_scope=None, authorisation_url=None, consent_token=None, qr_code_url=None, sca_methods=None, state=None, selected_sca_method=None, authorized_at=None, institution_consent_id=None, local_vars_configuration=None): # noqa: E501
"""AuthorisationEmbeddedRequestResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._user_uuid = None
self._application_user_id = None
self._reference_id = None
self._institution_id = None
self._status = None
self._created_at = None
self._transaction_from = None
self._transaction_to = None
self._expires_at = None
self._time_to_expire_in_millis = None
self._time_to_expire = None
self._feature_scope = None
self._authorisation_url = None
self._consent_token = None
self._qr_code_url = None
self._sca_methods = None
self._state = None
self._selected_sca_method = None
self._authorized_at = None
self._institution_consent_id = None
self.discriminator = None
if id is not None:
self.id = id
if user_uuid is not None:
self.user_uuid = user_uuid
if application_user_id is not None:
self.application_user_id = application_user_id
if reference_id is not None:
self.reference_id = reference_id
if institution_id is not None:
self.institution_id = institution_id
if status is not None:
self.status = status
if created_at is not None:
self.created_at = created_at
if transaction_from is not None:
self.transaction_from = transaction_from
if transaction_to is not None:
self.transaction_to = transaction_to
if expires_at is not None:
self.expires_at = expires_at
if time_to_expire_in_millis is not None:
self.time_to_expire_in_millis = time_to_expire_in_millis
if time_to_expire is not None:
self.time_to_expire = time_to_expire
if feature_scope is not None:
self.feature_scope = feature_scope
if authorisation_url is not None:
self.authorisation_url = authorisation_url
if consent_token is not None:
self.consent_token = consent_token
if qr_code_url is not None:
self.qr_code_url = qr_code_url
if sca_methods is not None:
self.sca_methods = sca_methods
if state is not None:
self.state = state
if selected_sca_method is not None:
self.selected_sca_method = selected_sca_method
if authorized_at is not None:
self.authorized_at = authorized_at
if institution_consent_id is not None:
self.institution_consent_id = institution_consent_id
@property
def id(self):
"""Gets the id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AuthorisationEmbeddedRequestResponse.
:param id: The id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: str
"""
self._id = id
@property
def user_uuid(self):
"""Gets the user_uuid of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The user_uuid of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: str
"""
return self._user_uuid
@user_uuid.setter
def user_uuid(self, user_uuid):
"""Sets the user_uuid of this AuthorisationEmbeddedRequestResponse.
:param user_uuid: The user_uuid of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: str
"""
self._user_uuid = user_uuid
@property
def application_user_id(self):
"""Gets the application_user_id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The application_user_id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: str
"""
return self._application_user_id
@application_user_id.setter
def application_user_id(self, application_user_id):
"""Sets the application_user_id of this AuthorisationEmbeddedRequestResponse.
:param application_user_id: The application_user_id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: str
"""
self._application_user_id = application_user_id
@property
def reference_id(self):
"""Gets the reference_id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The reference_id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: str
"""
return self._reference_id
@reference_id.setter
def reference_id(self, reference_id):
"""Sets the reference_id of this AuthorisationEmbeddedRequestResponse.
:param reference_id: The reference_id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: str
"""
self._reference_id = reference_id
@property
def institution_id(self):
"""Gets the institution_id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The institution_id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: str
"""
return self._institution_id
@institution_id.setter
def institution_id(self, institution_id):
"""Sets the institution_id of this AuthorisationEmbeddedRequestResponse.
:param institution_id: The institution_id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: str
"""
self._institution_id = institution_id
@property
def status(self):
"""Gets the status of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The status of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this AuthorisationEmbeddedRequestResponse.
:param status: The status of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: str
"""
allowed_values = ["AWAITING_AUTHORIZATION", "AWAITING_FURTHER_AUTHORIZATION", "AWAITING_RE_AUTHORIZATION", "AUTHORIZED", "CONSUMED", "REJECTED", "REVOKED", "FAILED", "EXPIRED", "UNKNOWN", "INVALID", "AWAITING_PRE_AUTHORIZATION", "PRE_AUTHORIZED", "AWAITING_DECOUPLED_AUTHORIZATION", "AWAITING_SCA_METHOD", "AWAITING_SCA_CODE"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def created_at(self):
"""Gets the created_at of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The created_at of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this AuthorisationEmbeddedRequestResponse.
:param created_at: The created_at of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def transaction_from(self):
"""Gets the transaction_from of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The transaction_from of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: datetime
"""
return self._transaction_from
@transaction_from.setter
def transaction_from(self, transaction_from):
"""Sets the transaction_from of this AuthorisationEmbeddedRequestResponse.
:param transaction_from: The transaction_from of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: datetime
"""
self._transaction_from = transaction_from
@property
def transaction_to(self):
"""Gets the transaction_to of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The transaction_to of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: datetime
"""
return self._transaction_to
@transaction_to.setter
def transaction_to(self, transaction_to):
"""Sets the transaction_to of this AuthorisationEmbeddedRequestResponse.
:param transaction_to: The transaction_to of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: datetime
"""
self._transaction_to = transaction_to
@property
def expires_at(self):
"""Gets the expires_at of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The expires_at of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: datetime
"""
return self._expires_at
@expires_at.setter
def expires_at(self, expires_at):
"""Sets the expires_at of this AuthorisationEmbeddedRequestResponse.
:param expires_at: The expires_at of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: datetime
"""
self._expires_at = expires_at
@property
def time_to_expire_in_millis(self):
"""Gets the time_to_expire_in_millis of this AuthorisationEmbeddedRequestResponse. # noqa: E501
Deprecated. Use `timeToExpire` instead. # noqa: E501
:return: The time_to_expire_in_millis of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: int
"""
return self._time_to_expire_in_millis
@time_to_expire_in_millis.setter
def time_to_expire_in_millis(self, time_to_expire_in_millis):
"""Sets the time_to_expire_in_millis of this AuthorisationEmbeddedRequestResponse.
Deprecated. Use `timeToExpire` instead. # noqa: E501
:param time_to_expire_in_millis: The time_to_expire_in_millis of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: int
"""
self._time_to_expire_in_millis = time_to_expire_in_millis
@property
def time_to_expire(self):
"""Gets the time_to_expire of this AuthorisationEmbeddedRequestResponse. # noqa: E501
ISO 8601 duration # noqa: E501
:return: The time_to_expire of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: str
"""
return self._time_to_expire
@time_to_expire.setter
def time_to_expire(self, time_to_expire):
"""Sets the time_to_expire of this AuthorisationEmbeddedRequestResponse.
ISO 8601 duration # noqa: E501
:param time_to_expire: The time_to_expire of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: str
"""
self._time_to_expire = time_to_expire
@property
def feature_scope(self):
"""Gets the feature_scope of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The feature_scope of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: list[str]
"""
return self._feature_scope
@feature_scope.setter
def feature_scope(self, feature_scope):
"""Sets the feature_scope of this AuthorisationEmbeddedRequestResponse.
:param feature_scope: The feature_scope of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: list[str]
"""
allowed_values = ["INITIATE_PRE_AUTHORISATION", "INITIATE_ACCOUNT_REQUEST", "INITIATE_EMBEDDED_ACCOUNT_REQUEST", "ACCOUNT_REQUEST_DETAILS", "ACCOUNTS", "ACCOUNT", "ACCOUNT_TRANSACTIONS", "ACCOUNT_STATEMENTS", "ACCOUNT_STATEMENT", "ACCOUNT_STATEMENT_FILE", "ACCOUNT_SCHEDULED_PAYMENTS", "ACCOUNT_DIRECT_DEBITS", "ACCOUNT_PERIODIC_PAYMENTS", "ACCOUNT_TRANSACTIONS_WITH_MERCHANT", "IDENTITY", "ACCOUNTS_WITHOUT_BALANCE", "ACCOUNT_WITHOUT_BALANCE", "ACCOUNT_BALANCES", "INITIATE_SINGLE_PAYMENT_SORTCODE", "EXISTING_PAYMENT_INITIATION_DETAILS", "CREATE_SINGLE_PAYMENT_SORTCODE", "EXISTING_PAYMENTS_DETAILS", "INITIATE_DOMESTIC_SINGLE_PAYMENT", "INITIATE_EMBEDDED_DOMESTIC_SINGLE_PAYMENT", "CREATE_DOMESTIC_SINGLE_PAYMENT", "INITIATE_DOMESTIC_SINGLE_INSTANT_PAYMENT", "CREATE_DOMESTIC_SINGLE_INSTANT_PAYMENT", "INITIATE_DOMESTIC_VARIABLE_RECURRING_PAYMENT", "CREATE_DOMESTIC_VARIABLE_RECURRING_PAYMENT", "INITIATE_DOMESTIC_SCHEDULED_PAYMENT", "CREATE_DOMESTIC_SCHEDULED_PAYMENT", "INITIATE_DOMESTIC_PERIODIC_PAYMENT", "CREATE_DOMESTIC_PERIODIC_PAYMENT", "PERIODIC_PAYMENT_FREQUENCY_EXTENDED", "INITIATE_INTERNATIONAL_VARIABLE_RECURRING_PAYMENT", "CREATE_INTERNATIONAL_VARIABLE_RECURRING_PAYMENT", "INITIATE_INTERNATIONAL_SCHEDULED_PAYMENT", "CREATE_INTERNATIONAL_SCHEDULED_PAYMENT", "INITIATE_INTERNATIONAL_PERIODIC_PAYMENT", "CREATE_INTERNATIONAL_PERIODIC_PAYMENT", "INITIATE_INTERNATIONAL_SINGLE_PAYMENT", "CREATE_INTERNATIONAL_SINGLE_PAYMENT", "INITIATE_BULK_PAYMENT", "CREATE_BULK_PAYMENT", "TRANSFER", "OPEN_DATA_PERSONAL_CURRENT_ACCOUNTS", "OPEN_DATA_ATMS", "READ_DOMESTIC_SINGLE_REFUND", "READ_DOMESTIC_SCHEDULED_REFUND", "READ_DOMESTIC_PERIODIC_PAYMENT_REFUND", "READ_INTERNATIONAL_SINGLE_REFUND", "READ_INTERNATIONAL_SCHEDULED_REFUND", "ACCOUNT_BENEFICIARIES"] # noqa: E501
if (self.local_vars_configuration.client_side_validation and
not set(feature_scope).issubset(set(allowed_values))): # noqa: E501
raise ValueError(
"Invalid values for `feature_scope` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(feature_scope) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._feature_scope = feature_scope
@property
def authorisation_url(self):
"""Gets the authorisation_url of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The authorisation_url of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: str
"""
return self._authorisation_url
@authorisation_url.setter
def authorisation_url(self, authorisation_url):
"""Sets the authorisation_url of this AuthorisationEmbeddedRequestResponse.
:param authorisation_url: The authorisation_url of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: str
"""
self._authorisation_url = authorisation_url
@property
def consent_token(self):
"""Gets the consent_token of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The consent_token of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: str
"""
return self._consent_token
@consent_token.setter
def consent_token(self, consent_token):
"""Sets the consent_token of this AuthorisationEmbeddedRequestResponse.
:param consent_token: The consent_token of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: str
"""
self._consent_token = consent_token
@property
def qr_code_url(self):
"""Gets the qr_code_url of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The qr_code_url of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: str
"""
return self._qr_code_url
@qr_code_url.setter
def qr_code_url(self, qr_code_url):
"""Sets the qr_code_url of this AuthorisationEmbeddedRequestResponse.
:param qr_code_url: The qr_code_url of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: str
"""
self._qr_code_url = qr_code_url
@property
def sca_methods(self):
"""Gets the sca_methods of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The sca_methods of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: list[ScaMethod]
"""
return self._sca_methods
@sca_methods.setter
def sca_methods(self, sca_methods):
"""Sets the sca_methods of this AuthorisationEmbeddedRequestResponse.
:param sca_methods: The sca_methods of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: list[ScaMethod]
"""
self._sca_methods = sca_methods
@property
def state(self):
"""Gets the state of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The state of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this AuthorisationEmbeddedRequestResponse.
:param state: The state of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: str
"""
self._state = state
@property
def selected_sca_method(self):
"""Gets the selected_sca_method of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The selected_sca_method of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: ScaMethod
"""
return self._selected_sca_method
@selected_sca_method.setter
def selected_sca_method(self, selected_sca_method):
"""Sets the selected_sca_method of this AuthorisationEmbeddedRequestResponse.
:param selected_sca_method: The selected_sca_method of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: ScaMethod
"""
self._selected_sca_method = selected_sca_method
@property
def authorized_at(self):
"""Gets the authorized_at of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The authorized_at of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: datetime
"""
return self._authorized_at
@authorized_at.setter
def authorized_at(self, authorized_at):
"""Sets the authorized_at of this AuthorisationEmbeddedRequestResponse.
:param authorized_at: The authorized_at of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: datetime
"""
self._authorized_at = authorized_at
@property
def institution_consent_id(self):
"""Gets the institution_consent_id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:return: The institution_consent_id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:rtype: str
"""
return self._institution_consent_id
@institution_consent_id.setter
def institution_consent_id(self, institution_consent_id):
"""Sets the institution_consent_id of this AuthorisationEmbeddedRequestResponse.
:param institution_consent_id: The institution_consent_id of this AuthorisationEmbeddedRequestResponse. # noqa: E501
:type: str
"""
self._institution_consent_id = institution_consent_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AuthorisationEmbeddedRequestResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AuthorisationEmbeddedRequestResponse):
return True
return self.to_dict() != other.to_dict()
|
[
"simonbarnett@emerge-it.co.uk"
] |
simonbarnett@emerge-it.co.uk
|
2288003f49f68a310c971a85fef4031187c4e659
|
c828cd876d92b1e10309909c1d4e63ae64d5dfde
|
/simple_python_programs/howmanyrecordsbelow.1242.py
|
3af12bb214fccd2b8ea3ae085a564d25504bb60d
|
[] |
no_license
|
walkergussler/random_python
|
111770b2c06c0560145210073510568c5eeca564
|
c7d8386d906ce4c21490b651d9f3a49ab3fa24e5
|
refs/heads/main
| 2023-01-09T16:39:58.916523
| 2020-11-10T07:24:18
| 2020-11-10T07:24:18
| 311,574,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
#!/usr/bin/python
import sys
import os
import re
file=sys.argv[1]
f=open(file,"r")
lines=f.readlines()
f.close()
val=0
for line in lines:
splitline=line.split("\t")
val+=1
if float(splitline[2])>.1242:
print(line)
break
print(val-2)
|
[
"mnz0@biolinux.biotech.cdc.gov"
] |
mnz0@biolinux.biotech.cdc.gov
|
2baf1a5d99f7d318b289dd5c04beb3958f6099f9
|
c27c51f5c33e0431dbe7db6e18c21b249d476cfa
|
/OpenSource_Python_Code/pyke-1.1.1/examples/towers_of_hanoi/compiled_krb/towers_of_hanoi_bc.py
|
2ba8698538eb3a35b97edcb01141d097a8eced30
|
[
"MIT"
] |
permissive
|
bopopescu/Python_Stuff
|
9bef74e0db17bb5e3ba2d908ced01ee744820d80
|
9aa94a0fa5e4e802090c7b29ec88b840e304d9e5
|
refs/heads/master
| 2022-11-20T06:54:36.581623
| 2017-12-04T18:56:02
| 2017-12-04T18:56:02
| 282,171,169
| 0
| 0
| null | 2020-07-24T08:54:37
| 2020-07-24T08:54:36
| null |
UTF-8
|
Python
| false
| false
| 19,914
|
py
|
# towers_of_hanoi_bc.py
from __future__ import with_statement
import itertools
from pyke import contexts, pattern, bc_rule
pyke_version = '1.1.1'
compiler_version = 1
def solve(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
mark1 = context.mark(True)
if rule.pattern(0).match_data(context, context,
tuple(range(context.lookup_data('n')))):
context.end_save_all_undo()
with engine.prove(rule.rule_base.root_name, 'solve2', context,
(rule.pattern(0),
rule.pattern(1),
rule.pattern(1),
rule.pattern(1),
rule.pattern(1),
rule.pattern(0),
rule.pattern(2),
rule.pattern(3),
rule.pattern(4),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"towers_of_hanoi.solve: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark1)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def solve2_done(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def solve2_not_done(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'move', context,
(rule.pattern(0),
rule.pattern(1),
rule.pattern(2),
rule.pattern(3),
rule.pattern(4),
rule.pattern(5),
rule.pattern(6),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"towers_of_hanoi.solve2_not_done: got unexpected plan from when clause 1"
if context.lookup_data('from') != context.lookup_data('last_move'):
mark3 = context.mark(True)
if rule.pattern(7).match_data(context, context,
(context.lookup_data('a'), context.lookup_data('b'), context.lookup_data('c'))):
context.end_save_all_undo()
if context.lookup_data('freeze') not in context.lookup_data('frozen_boards'):
with engine.prove(rule.rule_base.root_name, 'solve2', context,
(rule.pattern(3),
rule.pattern(4),
rule.pattern(5),
rule.pattern(8),
rule.pattern(9),
rule.pattern(10),
rule.pattern(11),
rule.pattern(12),
rule.pattern(13),)) \
as gen_5:
for x_5 in gen_5:
assert x_5 is None, \
"towers_of_hanoi.solve2_not_done: got unexpected plan from when clause 5"
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark3)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def move_01(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'ok', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"towers_of_hanoi.move_01: got unexpected plan from when clause 1"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def move_02(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'ok', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"towers_of_hanoi.move_02: got unexpected plan from when clause 1"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def move_10(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'ok', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"towers_of_hanoi.move_10: got unexpected plan from when clause 1"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def move_12(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'ok', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"towers_of_hanoi.move_12: got unexpected plan from when clause 1"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def move_20(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'ok', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"towers_of_hanoi.move_20: got unexpected plan from when clause 1"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def move_21(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'ok', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"towers_of_hanoi.move_21: got unexpected plan from when clause 1"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def ok_empty(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def ok_smaller(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
if context.lookup_data('disc') < context.lookup_data('top'):
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def populate(engine):
This_rule_base = engine.get_create('towers_of_hanoi')
bc_rule.bc_rule('solve', This_rule_base, 'solve',
solve, None,
(contexts.variable('n'),
contexts.variable('moves'),),
(),
(contexts.variable('disks'),
pattern.pattern_literal(()),
pattern.pattern_literal(1),
pattern.pattern_tuple((pattern.pattern_tuple((contexts.variable('disks'), pattern.pattern_literal(()), pattern.pattern_literal(()),), None),), None),
contexts.variable('moves'),))
bc_rule.bc_rule('solve2_done', This_rule_base, 'solve2',
solve2_done, None,
(contexts.variable('a'),
contexts.variable('b'),
contexts.variable('c'),
contexts.variable('a'),
contexts.variable('b'),
contexts.variable('c'),
contexts.anonymous('_last_move'),
contexts.anonymous('_frozen_boards'),
pattern.pattern_literal(()),),
(),
())
bc_rule.bc_rule('solve2_not_done', This_rule_base, 'solve2',
solve2_not_done, None,
(contexts.variable('a1'),
contexts.variable('b1'),
contexts.variable('c1'),
contexts.variable('a2'),
contexts.variable('b2'),
contexts.variable('c2'),
contexts.variable('last_move'),
contexts.variable('frozen_boards'),
pattern.pattern_tuple((pattern.pattern_tuple((contexts.variable('from'), contexts.variable('to'),), None),), contexts.variable('moves')),),
(),
(contexts.variable('a1'),
contexts.variable('b1'),
contexts.variable('c1'),
contexts.variable('a'),
contexts.variable('b'),
contexts.variable('c'),
pattern.pattern_tuple((contexts.variable('from'), contexts.variable('to'),), None),
contexts.variable('freeze'),
contexts.variable('a2'),
contexts.variable('b2'),
contexts.variable('c2'),
contexts.variable('to'),
pattern.pattern_tuple((contexts.variable('freeze'),), contexts.variable('frozen_boards')),
contexts.variable('moves'),))
bc_rule.bc_rule('move_01', This_rule_base, 'move',
move_01, None,
(pattern.pattern_tuple((contexts.variable('a1'),), contexts.variable('as')),
contexts.variable('b'),
contexts.variable('c'),
contexts.variable('as'),
pattern.pattern_tuple((contexts.variable('a1'),), contexts.variable('b')),
contexts.variable('c'),
pattern.pattern_literal((0, 1,)),),
(),
(contexts.variable('a1'),
contexts.variable('b'),))
bc_rule.bc_rule('move_02', This_rule_base, 'move',
move_02, None,
(pattern.pattern_tuple((contexts.variable('a1'),), contexts.variable('as')),
contexts.variable('b'),
contexts.variable('c'),
contexts.variable('as'),
contexts.variable('b'),
pattern.pattern_tuple((contexts.variable('a1'),), contexts.variable('c')),
pattern.pattern_literal((0, 2,)),),
(),
(contexts.variable('a1'),
contexts.variable('c'),))
bc_rule.bc_rule('move_10', This_rule_base, 'move',
move_10, None,
(contexts.variable('a'),
pattern.pattern_tuple((contexts.variable('b1'),), contexts.variable('bs')),
contexts.variable('c'),
pattern.pattern_tuple((contexts.variable('b1'),), contexts.variable('a')),
contexts.variable('bs'),
contexts.variable('c'),
pattern.pattern_literal((1, 0,)),),
(),
(contexts.variable('b1'),
contexts.variable('a'),))
bc_rule.bc_rule('move_12', This_rule_base, 'move',
move_12, None,
(contexts.variable('a'),
pattern.pattern_tuple((contexts.variable('b1'),), contexts.variable('bs')),
contexts.variable('c'),
contexts.variable('a'),
contexts.variable('bs'),
pattern.pattern_tuple((contexts.variable('b1'),), contexts.variable('c')),
pattern.pattern_literal((1, 2,)),),
(),
(contexts.variable('b1'),
contexts.variable('c'),))
bc_rule.bc_rule('move_20', This_rule_base, 'move',
move_20, None,
(contexts.variable('a'),
contexts.variable('b'),
pattern.pattern_tuple((contexts.variable('c1'),), contexts.variable('cs')),
pattern.pattern_tuple((contexts.variable('c1'),), contexts.variable('a')),
contexts.variable('b'),
contexts.variable('cs'),
pattern.pattern_literal((2, 0,)),),
(),
(contexts.variable('c1'),
contexts.variable('a'),))
bc_rule.bc_rule('move_21', This_rule_base, 'move',
move_21, None,
(contexts.variable('a'),
contexts.variable('b'),
pattern.pattern_tuple((contexts.variable('c1'),), contexts.variable('cs')),
contexts.variable('a'),
pattern.pattern_tuple((contexts.variable('c1'),), contexts.variable('b')),
contexts.variable('cs'),
pattern.pattern_literal((2, 1,)),),
(),
(contexts.variable('c1'),
contexts.variable('b'),))
bc_rule.bc_rule('ok_empty', This_rule_base, 'ok',
ok_empty, None,
(contexts.anonymous('_disc'),
pattern.pattern_literal(()),),
(),
())
bc_rule.bc_rule('ok_smaller', This_rule_base, 'ok',
ok_smaller, None,
(contexts.variable('disc'),
pattern.pattern_tuple((contexts.variable('top'),), contexts.anonymous('_rest')),),
(),
())
Krb_filename = '../towers_of_hanoi.krb'
Krb_lineno_map = (
((16, 20), (16, 16)),
((24, 24), (18, 18)),
((26, 39), (19, 19)),
((54, 58), (22, 22)),
((72, 76), (25, 26)),
((78, 89), (28, 28)),
((90, 90), (29, 29)),
((93, 93), (30, 30)),
((95, 95), (31, 31)),
((96, 109), (32, 33)),
((124, 128), (36, 36)),
((130, 136), (38, 38)),
((149, 153), (41, 41)),
((155, 161), (43, 43)),
((174, 178), (46, 46)),
((180, 186), (48, 48)),
((199, 203), (51, 51)),
((205, 211), (53, 53)),
((224, 228), (56, 56)),
((230, 236), (58, 58)),
((249, 253), (61, 61)),
((255, 261), (63, 63)),
((274, 278), (66, 66)),
((292, 296), (69, 69)),
((298, 298), (71, 71)),
)
|
[
"thelma1944@gmail.com"
] |
thelma1944@gmail.com
|
a0963ca4ea83528e9df19dcb0eb12b6b55b35657
|
dfafecd03f99aa411659e03fca3ca4233e323bf3
|
/PredictiveProject/PredictiveAcceptance/context_processors.py
|
8f605b1a0a915263358699c3e7140b7f05fe9310
|
[] |
no_license
|
Chermaraj/PredictiveProject
|
822c5c18f5e61792b3f870ada8bb0635bbd7befa
|
30485d501b98976924be5e3cb0b69c6cad4d7787
|
refs/heads/master
| 2023-01-07T16:07:17.206303
| 2019-04-01T03:57:43
| 2019-04-01T03:57:43
| 174,879,398
| 0
| 0
| null | 2022-12-27T15:35:28
| 2019-03-10T20:50:38
|
CSS
|
UTF-8
|
Python
| false
| false
| 299
|
py
|
from PredictiveAcceptance.models import PredictiveUsers
def getuserName(request):
if(request.session['username']):
user = PredictiveUsers.objects.get(username=request.session['username'])
return{'userList': user}
else:
user = ['NoEmptyList']
return{'userList': user}
|
[
"mchermaraj@gmail.com"
] |
mchermaraj@gmail.com
|
96e4b45ecec051d2bc35cb2a4b8873169812644b
|
0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e
|
/regexlib/python_re2_test_file/regexlib_1114.py
|
109c76865def6cb38c127f4b4f59b0efaf53831b
|
[
"MIT"
] |
permissive
|
agentjacker/ReDoS-Benchmarks
|
c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
refs/heads/main
| 2023-05-10T13:57:48.491045
| 2021-05-21T11:19:39
| 2021-05-21T11:19:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
# 1114
# ^p(ost)?[ |\.]*o(ffice)?[ |\.]*(box)?[ 0-9]*[^[a-z ]]*
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:"po"+" "*5000+"! _1_POA(i)"
import re2 as re
from time import perf_counter
regex = """^p(ost)?[ |\.]*o(ffice)?[ |\.]*(box)?[ 0-9]*[^[a-z ]]*"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "po" + " " * i * 10000 + "! _1_POA(i)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!")
|
[
"liyt@ios.ac.cn"
] |
liyt@ios.ac.cn
|
e46fa6b64d83fdaa442c5a983b25d61813819cee
|
5bea7fd1fed98c3124861d7cb142abba11146910
|
/PL0/program.py
|
2be770c7759af577d9bf3a732efe5891040899a5
|
[] |
no_license
|
handsome-fish/pl0
|
b932ef5255cebfb979f1ad348248bc11cc15b1b0
|
b0a0378773adaaf549ee48da84c4691fa24845fc
|
refs/heads/master
| 2020-05-03T00:54:10.667383
| 2019-03-29T03:27:29
| 2019-03-29T03:38:35
| 178,322,546
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
# program -> block period_token
def program_block(ast):
return ast[1]
|
[
"1658431245@qq.com"
] |
1658431245@qq.com
|
3e8959dc002cd803da25fdb901f44ec5bee9cc59
|
c4c0f5d742b88106053b63a0c0e710efeb867e79
|
/flowcells/migrations/0006_contamination_cctttccc.py
|
b8fdc76adeeda7146e84c6a9c89c6221fa37716e
|
[
"MIT"
] |
permissive
|
bihealth/digestiflow-server
|
0ed49c371cd59da5c331259a65cdd75ca71fab76
|
83f94d068d8592d83254a0b4271be9523334339d
|
refs/heads/main
| 2023-02-18T05:56:13.261282
| 2022-04-21T09:43:58
| 2022-04-21T09:52:52
| 165,990,428
| 16
| 3
| null | 2023-02-16T05:17:33
| 2019-01-16T06:56:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,500
|
py
|
# -*- coding: utf-8 -*-
"""Add contamination ``"CCTTTCCC"``"""
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
KnownIndexContamination = apps.get_model("flowcells", "KnownIndexContamination")
db_alias = schema_editor.connection.alias
KnownIndexContamination.objects.using(db_alias).bulk_create(
[
KnownIndexContamination(
title="i7 single-index contamination #2",
sequence="CCTTTCCC",
description=(
"""Sometimes seen in i7 reads of a dual indexing library that is "contamined" with a single """
"""index library (plus a T>C read error in the first base). Also see [Illumina index """
"""sequencing – where is my sample?] """
""""(http://enseqlopedia.com/2018/01/illumina-index-sequencing-sample/) on Enseqlpedia."""
),
factory_default=True,
)
]
)
def reverse_func(apps, schema_editor):
"""Remove sequence again"""
KnownIndexContamination = apps.get_model("flowcells", "KnownIndexContamination")
db_alias = schema_editor.connection.alias
KnownIndexContamination.objects.using(db_alias).filter(sequence="CCTTTCCC").delete()
class Migration(migrations.Migration):
dependencies = [("flowcells", "0005_auto_20190121_1559")]
operations = [migrations.RunPython(forwards_func, reverse_func)]
|
[
"manuel.holtgrewe@bihealth.de"
] |
manuel.holtgrewe@bihealth.de
|
14f6313e88a64fbfd396d42616a65189708c8b08
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/datashare/v20201001preview/get_blob_container_data_set_mapping.py
|
29bb28811a48733545b123863e9ee6d29436f6f5
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,363
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetBlobContainerDataSetMappingResult',
'AwaitableGetBlobContainerDataSetMappingResult',
'get_blob_container_data_set_mapping',
'get_blob_container_data_set_mapping_output',
]
@pulumi.output_type
class GetBlobContainerDataSetMappingResult:
"""
A Blob container data set mapping.
"""
def __init__(__self__, container_name=None, data_set_id=None, data_set_mapping_status=None, id=None, kind=None, name=None, provisioning_state=None, resource_group=None, storage_account_name=None, subscription_id=None, system_data=None, type=None):
if container_name and not isinstance(container_name, str):
raise TypeError("Expected argument 'container_name' to be a str")
pulumi.set(__self__, "container_name", container_name)
if data_set_id and not isinstance(data_set_id, str):
raise TypeError("Expected argument 'data_set_id' to be a str")
pulumi.set(__self__, "data_set_id", data_set_id)
if data_set_mapping_status and not isinstance(data_set_mapping_status, str):
raise TypeError("Expected argument 'data_set_mapping_status' to be a str")
pulumi.set(__self__, "data_set_mapping_status", data_set_mapping_status)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_group and not isinstance(resource_group, str):
raise TypeError("Expected argument 'resource_group' to be a str")
pulumi.set(__self__, "resource_group", resource_group)
if storage_account_name and not isinstance(storage_account_name, str):
raise TypeError("Expected argument 'storage_account_name' to be a str")
pulumi.set(__self__, "storage_account_name", storage_account_name)
if subscription_id and not isinstance(subscription_id, str):
raise TypeError("Expected argument 'subscription_id' to be a str")
pulumi.set(__self__, "subscription_id", subscription_id)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> str:
"""
BLOB Container name.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="dataSetId")
def data_set_id(self) -> str:
"""
The id of the source data set.
"""
return pulumi.get(self, "data_set_id")
@property
@pulumi.getter(name="dataSetMappingStatus")
def data_set_mapping_status(self) -> str:
"""
Gets the status of the data set mapping.
"""
return pulumi.get(self, "data_set_mapping_status")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource id of the azure resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Kind of data set mapping.
Expected value is 'Container'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the data set mapping.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> str:
"""
Resource group of storage account.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> str:
"""
Storage account name of the source data set.
"""
return pulumi.get(self, "storage_account_name")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> str:
"""
Subscription id of storage account.
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
System Data of the Azure resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
class AwaitableGetBlobContainerDataSetMappingResult(GetBlobContainerDataSetMappingResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBlobContainerDataSetMappingResult(
container_name=self.container_name,
data_set_id=self.data_set_id,
data_set_mapping_status=self.data_set_mapping_status,
id=self.id,
kind=self.kind,
name=self.name,
provisioning_state=self.provisioning_state,
resource_group=self.resource_group,
storage_account_name=self.storage_account_name,
subscription_id=self.subscription_id,
system_data=self.system_data,
type=self.type)
def get_blob_container_data_set_mapping(account_name: Optional[str] = None,
data_set_mapping_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_subscription_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBlobContainerDataSetMappingResult:
"""
A Blob container data set mapping.
:param str account_name: The name of the share account.
:param str data_set_mapping_name: The name of the dataSetMapping.
:param str resource_group_name: The resource group name.
:param str share_subscription_name: The name of the shareSubscription.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['dataSetMappingName'] = data_set_mapping_name
__args__['resourceGroupName'] = resource_group_name
__args__['shareSubscriptionName'] = share_subscription_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:datashare/v20201001preview:getBlobContainerDataSetMapping', __args__, opts=opts, typ=GetBlobContainerDataSetMappingResult).value
return AwaitableGetBlobContainerDataSetMappingResult(
container_name=__ret__.container_name,
data_set_id=__ret__.data_set_id,
data_set_mapping_status=__ret__.data_set_mapping_status,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_group=__ret__.resource_group,
storage_account_name=__ret__.storage_account_name,
subscription_id=__ret__.subscription_id,
system_data=__ret__.system_data,
type=__ret__.type)
@_utilities.lift_output_func(get_blob_container_data_set_mapping)
def get_blob_container_data_set_mapping_output(account_name: Optional[pulumi.Input[str]] = None,
data_set_mapping_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBlobContainerDataSetMappingResult]:
"""
A Blob container data set mapping.
:param str account_name: The name of the share account.
:param str data_set_mapping_name: The name of the dataSetMapping.
:param str resource_group_name: The resource group name.
:param str share_subscription_name: The name of the shareSubscription.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
f0b17c9ee092eb0b078e03c22fa8042ac4abd3d1
|
ff467df3e087362642e1362f805504023b3425c7
|
/current_doctor.py
|
ac661752dfa717ed8be10defd31a3953f6b75990
|
[] |
no_license
|
Rishabh-Nagar/Doctor_appointment
|
3de18fda342f4b747a087181554ac290efd2feb4
|
5b6ce4da7ff147f9d8080af4b8b909cbb4ff21cf
|
refs/heads/main
| 2023-06-15T12:37:04.712226
| 2021-07-16T16:27:51
| 2021-07-16T16:27:51
| 386,699,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
import sqlite3
cur_d = sqlite3.connect("current_data_doctor.db", check_same_thread=False)
cud = cur_d.cursor()
def create_current_doctor_table():
cud.execute('CREATE TABLE IF NOT EXISTS current_doctor_table(name TEXT, age INTEGER, sex TEXT, degree TEXT, cur_hosp TEXT)')
def add_current_doctor_data(name, age, sex, degree, cur_hosp):
cud.execute('INSERT INTO current_doctor_table(name, age, sex, degree, cur_hosp) VALUES (?,?,?,?,?)', (name, age, sex, degree, cur_hosp))
cur_d.commit()
def view_all_current_doctor():
cud.execute('SELECT * FROM doctor_table')
data = cud.fetchall()
return data
|
[
"noreply@github.com"
] |
Rishabh-Nagar.noreply@github.com
|
951b436e9ab532c7b4051aec778167f13acf0cf6
|
f38ae6d6d38aaf6123b45105ac07251d6b82a4d1
|
/tests/test_fileio.py
|
b35d23b0f650ccd0289c280dadd3913f1572f75f
|
[] |
no_license
|
brettviren/btdtwf
|
96d37a505629a52fff445244351fd90edc4071d7
|
26a5eede976c6a5287e8087cfd29f830bec6c240
|
refs/heads/master
| 2021-01-17T12:00:38.972031
| 2016-07-22T21:06:34
| 2016-07-22T21:06:34
| 11,324,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
#!/usr/bin/env python
from btdtwf.nodes import *
def test_fileio():
|
[
"bv@bnl.gov"
] |
bv@bnl.gov
|
4c9b050caaa96eada935577fa77f13cd5fc42bce
|
1c535f8c755b9f09f550168141c42c99b7a377c6
|
/basicTxtReader.py
|
8324be32e2184b33590698fdfad9e18e140109ea
|
[] |
no_license
|
sametdlsk/python_Basics
|
bb8bdfac9f12ac0d24492e57d901789fc1dd9ec9
|
ba09429c5d36d7bd5ba96d022bb8ef23b7066307
|
refs/heads/main
| 2023-08-17T09:54:27.509739
| 2021-09-29T20:37:17
| 2021-09-29T20:37:17
| 411,822,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
# -*- coding:utf-8 -*-
"""
Bu kodlar Python ile Pycharm editörü kullanılarak yazılmıştır.
Bu kod basit metin dosyası okuyucusudur.
These codes are written using the Pycharm editor in Python.
This code basic txt reader,
used keyboard library.
"""
import keyboard
xname = input("Dosya yolunu giriniz, Daha sonra büyük harf ile yazdırmak için B tuşuna, Küçük harfle yazdırmak için K tuşuna basınız :")
xopener = open(xname)
words = xopener.read()
upperwords = words.upper()
lowerwords = words.lower()
while True:
if keyboard.is_pressed('b'):
print("\n", upperwords)
break
if keyboard.is_pressed('k'):
print("\n", lowerwords)
break
|
[
"noreply@github.com"
] |
sametdlsk.noreply@github.com
|
63ab4c664e6390740bcf440088b411351a2e1e9a
|
9805edf2b923c74cf72a3cfb4c2c712255256f15
|
/python/322_coin_change.py
|
5bab0ce6427d8e79bee25ffbef31ab0675ca3a91
|
[
"MIT"
] |
permissive
|
jixinfeng/leetcode-soln
|
5b28e49c2879cdff41c608fc03628498939b0e99
|
24cf8d5f1831e838ea99f50ce4d8f048bd46c136
|
refs/heads/master
| 2022-10-12T17:02:53.329565
| 2022-10-06T03:21:56
| 2022-10-06T03:21:56
| 69,371,757
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,300
|
py
|
"""
You are given coins of different denominations and a total amount of money
amount. Write a function to compute the fewest number of coins that you need to
make up that amount. If that amount of money cannot be made up by any
combination of the coins, return -1.
Example 1:
coins = [1, 2, 5], amount = 11
return 3 (11 = 5 + 5 + 1)
Example 2:
coins = [2], amount = 3
return -1.
Note:
You may assume that you have an infinite number of each kind of coin.
Credits:
Special thanks to @jianchao.li.fighter for adding this problem and creating
all test cases.
"""
class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
if amount == 0:
return 0
if coins == []:
return -1
dp = [0] + [-1] * amount
for i in range(amount):
if dp[i] < 0:
continue
for coin in coins:
if i + coin > amount:
continue
if dp[i + coin] < 0 or dp[i + coin] > dp[i] + 1:
dp[i + coin] = dp[i] + 1
return dp[-1]
a = Solution()
print(a.coinChange([1,2,5], 11) == 3)
print(a.coinChange([2], 3) == -1)
print(a.coinChange([186,419,83,408], 6249) == 20)
"""
Notes: solved as integer linear programming
https://discuss.leetcode.com/topic/59509/solving-this-as-a-integer-linear-programming
def coinChange(self, coins, amount):
if amount == 0:
return 0
if coins == []:
return -1
import numpy as np
from cvxopt import matrix
from cvxopt.glpk import ilp
c = np.ones(len(coins))
A = np.array([coins])
b = np.array([amount])
G = np.diag(-1 * np.ones(len(coins)))
h = np.zeros(len(coins))
intVars = set(range(len(coins)))
status, isol = ilp(c = matrix(c.astype(float)),
G = matrix(G.astype(float)),
h = matrix(h.astype(float)),
A = matrix(A.astype(float)),
b = matrix(b.astype(float)),
I = intVars)
return int(sum(isol)) if status == 'optimal' else -1
"""
|
[
"ufjfeng@users.noreply.github.com"
] |
ufjfeng@users.noreply.github.com
|
94d1f5e3f98f0e32c793d9288b12588bc92de180
|
1d0a4750e216f301ec49a247bf7bf07cd61fa29f
|
/app/models/upload_application_feature.py
|
bbd46d549bb7aaf9567880cc1cba4b89566f574e
|
[] |
no_license
|
smoothbenefits/BenefitMY_Python
|
52745a11db2cc9ab394c8de7954974e6d5a05e13
|
b7e8474a728bc22778fd24fe88d1918945a8cfc8
|
refs/heads/master
| 2021-03-27T15:57:34.798289
| 2018-04-29T19:04:04
| 2018-04-29T19:04:04
| 24,351,568
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
import reversion
from django.db import models
from app.models.upload import Upload
from app.models.sys_application_feature import SysApplicationFeature
@reversion.register
class UploadApplicationFeature(models.Model):
upload = models.ForeignKey(Upload, related_name="upload_application_feature_upload")
application_feature = models.ForeignKey(SysApplicationFeature, related_name="upload_application_feature_app_feature")
feature_id = models.IntegerField()
|
[
"eagleofpc@gmail.com"
] |
eagleofpc@gmail.com
|
e77c00645ed7ca19e6c51230353d6d52c9ec8bae
|
f13acd0d707ea9ab0d2f2f010717b35adcee142f
|
/Others/past/past202112-open/f/main.py
|
c830367c026bccfff6cb820c71d467cba13482e6
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
KATO-Hiro/AtCoder
|
126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7
|
bf43320bc1af606bfbd23c610b3432cddd1806b9
|
refs/heads/master
| 2023-08-18T20:06:42.876863
| 2023-08-17T23:45:21
| 2023-08-17T23:45:21
| 121,067,516
| 4
| 0
|
CC0-1.0
| 2023-09-14T21:59:38
| 2018-02-11T00:32:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
# -*- coding: utf-8 -*-
def main():
import sys
from collections import deque
input = sys.stdin.readline
a, b = map(int, input().split())
a -= 1
b -= 1
s = [list(input().rstrip()) for _ in range(3)]
dxy = [
[(-1, -1), (-1, 0), (-1, 1)],
[(0, -1), (0, 0), (0, 1)],
[(1, -1), (1, 0), (1, 1)],
]
ndxy = list()
for i in range(3):
for j in range(3):
if s[i][j] == "#":
ndxy.append(dxy[i][j])
size = 9
d = deque()
d.append((a, b))
visited = [[False] * size for _ in range(size)]
while d:
y, x = d.popleft()
if visited[y][x]:
continue
visited[y][x] = True
for dy, dx in ndxy:
nx = x + dx
ny = y + dy
if nx < 0 or nx >= size or ny < 0 or ny >= size:
continue
if visited[ny][nx]:
continue
d.append((ny, nx))
ans = 0
for i in range(size):
for j in range(size):
if visited[i][j]:
ans += 1
print(ans)
# print(visited)
if __name__ == "__main__":
main()
|
[
"k.hiro1818@gmail.com"
] |
k.hiro1818@gmail.com
|
2252f3a2885beacd6e8ad2168b39363e3b32dd50
|
feed55181037677698055628ee40b7f1d85510c9
|
/20150622/WidgetFactory_Classes.py
|
fd4614710c2978a9127eeae76d841feb008c0afc
|
[
"BSD-3-Clause"
] |
permissive
|
lily-liang12/Summer_Project_2015
|
4a725c148e0620564874bf6c72c2c886b439a6e1
|
96c6e115dda896f18738dac36ea40c6c9267070f
|
refs/heads/master
| 2021-06-03T15:01:42.536555
| 2016-08-26T08:34:07
| 2016-08-26T08:34:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,021
|
py
|
"""
Author: Delan Huang
Date: 06/22/2015
Purpose: Holds all of the Classes needed for CubeModel_FrontEnd.py
Version: 1.0, PyQt5
Notes:
- Slider Classes not fully functioning.
- To fix this: make class accept defaults, but still create a function
createSlider()
- CubeGraphic Class not universal, may be taken out and moved
into Cube_Model_PySide.py
"""
# imports
import sys
from PyQt5.QtCore import Qt
import PyQt5.QtWidgets
import PyQt5.QtOpenGL
from OpenGL import GL
from OpenGL import GLU
class LabelledLineEdit(QWidget):
def __init__(self, labelText="TestLabel"):
super(LabelledLineEdit, self).__init__()
self.label = QLabel(labelText)
self.lineEdit = QLineEdit()
layout = QHBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.lineEdit)
self.setLayout(layout)
class RotSlider(QtGui.QWidget):
def __init__(self, minRange=0, maxRange=360,
singleStep=16, pageStep=16, tickInterval=16):
super(RotSlider, self).__init__()
self.slider = QtGui.QSlider()
self.slider.setOrientation(QtCore.Qt.Horizontal)
self.slider.setRange(minRange, maxRange * 16)
self.slider.setSingleStep(singleStep)
self.slider.setPageStep(pageStep * 16)
self.slider.setTickInterval(tickInterval * 16)
self.slider.setTickPosition(QtGui.QSlider.TicksAbove)
self.rotation = 0
class Slider(QtGui.QWidget):
def __init__(self):
super(Slider, self).__init__()
self.minRange = 0
self.maxRange = 360
self.singleStep = 16
self.pageStep = 16
self. tickInterval = 16
self.slider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.slider.setRange(self.minRange, self.maxRange * 16)
self.slider.setSingleStep(self.singleStep)
self.slider.setPageStep(self.pageStep * 16)
self.slider.setTickInterval(self.tickInterval * 16)
self.slider.setTickPosition(QtGui.QSlider.TicksAbove)
class CubeGraphic(QtOpenGL.QGLWidget):
def __init__(self):
super(CubeGraphic, self).__init__()
self.object = 0
self.xRot = 0
self.yRot = 0
self.zRot = 0
self.xPos = 0
self.yPos = 0
self.zPos = 0
self.zoom = 5.0
self.lastPos = QtCore.QPoint()
def getXRot(self):
return self.xRot
def getYRot(self):
return self.yRot
def getZRot(self):
return self.zRot
def getXPos(self):
return self.xPos
def getYPos(self):
return self.yPos
def paintGL(self):
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glLoadIdentity()
GL.glTranslated(0.0, 0.0, -50.0)
GL.glScaled(self.zoom, self.zoom, self.zoom)
GL.glRotated(self.xRot / 16.0, 1.0, 0.0, 0.0)
GL.glRotated(self.yRot / 16.0, 0.0, 1.0, 0.0)
GL.glRotated(self.zRot / 16.0, 0.0, 0.0, 1.0)
GL.glTranslate(-0.5, -0.5, -0.5)
GL.glTranslated(self.xPos / 5.0, self.yPos / 5.0, self.zPos / 5.0)
# GL.glCallList(self.object)
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
GL.glVertexPointerf(self.cubeVtxArray)
GL.glColorPointerf(self.cubeClrArray)
GL.glDrawElementsui(GL.GL_QUADS, self.cubeIdxArray)
def initGeometry(self):
self.cubeVtxArray = (
[[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
[0.0, 1.0, 1.0]])
self.cubeIdxArray = [
0, 1, 2, 3,
3, 2, 6, 7,
1, 0, 4, 5,
2, 1, 5, 6,
0, 3, 7, 4,
7, 6, 5, 4]
self.cubeClrArray = [
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
[0.0, 1.0, 1.0]]
def resizeGL(self, width, height):
GL.glViewport(0, 0, width, height)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
# GL.glOrtho(-1.5, +1.5, -1.5, +1.5, 4.0, 15.0)
GLU.gluPerspective(50.0, width / height, 1.0, 100.0)
GL.glMatrixMode(GL.GL_MODELVIEW)
def initializeGL(self):
GL.glClearColor(0.0, 0.0, 0.0, 1.0)
self.initGeometry()
GL.glShadeModel(GL.GL_FLAT)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_CULL_FACE)
def mousePressEvent(self, event):
self.lastPos = QtCore.QPoint(event.pos())
def mouseMoveEvent(self, event):
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
if event.buttons() & QtCore.Qt.LeftButton:
self.setXRotation(self.xRot + 8 * dy)
self.setYRotation(self.yRot + 8 * dx)
elif event.buttons() & QtCore.Qt.RightButton:
self.setXRotation(self.xRot + 8 * dy)
self.setZRotation(self.zRot + 8 * dx)
self.lastPos = QtCore.QPoint(event.pos())
def setXRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.xRot:
self.xRot = angle
self.emit(QtCore.SIGNAL("xRotationChanged"), self.xRot)
self.updateGL()
def setYRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.yRot:
self.yRot = angle
self.emit(QtCore.SIGNAL("yRotationChanged"), self.yRot)
self.updateGL()
def setZRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.zRot:
self.zRot = angle
self.emit(QtCore.SIGNAL("zRotationChanged(int)"), angle)
self.updateGL()
def setXPosition(self, distance):
if distance != self.xPos:
self.xPos = distance
self.emit(QtCore.SIGNAL("xPositionChanged"), self.xPos)
self.updateGL()
def setYPosition(self, distance):
if distance != self.yPos:
self.yPos = distance
self.emit(QtCore.SIGNAL("yPositionChanged"), self.yPos)
self.updateGL()
# Unsupported Code(no Widget uses this function)
# def setZPosition(self, distance):
# self.updateGL()
def setZoom(self, new_zoom):
if new_zoom != self.zoom:
self.zoom = new_zoom
self.emit(QtCore.SIGNAL("scaleChanged"), self.zoom)
self.updateGL()
def normalizeAngle(self, angle):
while angle < 0:
angle += 360 * 16
while angle > 360 * 16:
angle -= 360 * 16
return angle
|
[
"noreply@github.com"
] |
lily-liang12.noreply@github.com
|
fed253f3f90185f4ca731d8aadc7097cf2ed874c
|
4b4beafc14f4356ddf129a489ab844ad0d5acce5
|
/flask_dynamo_session/__version__.py
|
843f7e5f46c5d0a9c3021e1d332c193e8bf82fc7
|
[
"Apache-2.0"
] |
permissive
|
usethecodeluke/flask-dynamo-session
|
c87f98253e751b88aeddf6c60c2d6e6c6af458cc
|
8e4aee30e2c5aedc57841c1a460311a7ff6e0cd1
|
refs/heads/master
| 2021-06-22T08:40:34.423007
| 2017-08-17T01:16:46
| 2017-08-17T01:33:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
# -*- coding: utf-8 -*-
"""flask_dyanmo_session version."""
__version__ = "0.0.3"
|
[
"jaustinpage@gmail.com"
] |
jaustinpage@gmail.com
|
3abf4866e270b49812155e5abb75ec40a93b17ad
|
fe9ddaa70470fd58053607a4157c37390ba764e8
|
/fixture/group.py
|
03e928138e8ffb2705a7e8eec7dfba237a5dd193
|
[
"Apache-2.0"
] |
permissive
|
yakovlev-v/python_training
|
68a311e6dd62e83154db3cfc6b289a46a4fb7305
|
7a6dcc1ff66ff5776f39a7be3a6c72d494c8db2f
|
refs/heads/master
| 2021-02-07T07:49:51.181052
| 2020-03-21T14:29:54
| 2020-03-21T14:29:54
| 243,999,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
driver = self.app.driver
driver.find_element_by_link_text("groups").click()
def create(self, group):
driver = self.app.driver
# открыть страницу групп
self.open_groups_page()
driver.find_element_by_name("new").click()
# ввод данных группы
driver.find_element_by_name("group_name").click()
driver.find_element_by_name("group_name").clear()
driver.find_element_by_name("group_name").send_keys(group.name)
driver.find_element_by_name("group_header").click()
driver.find_element_by_name("group_header").clear()
driver.find_element_by_name("group_header").send_keys(group.header)
driver.find_element_by_name("group_footer").click()
driver.find_element_by_name("group_footer").clear()
driver.find_element_by_name("group_footer").send_keys(group.footer)
driver.find_element_by_xpath("//form[@action='/addressbook/group.php']").click()
driver.find_element_by_name("submit").click()
self.open_groups_page()
def delete_first_group(self):
driver = self.app.driver
# открыть страницу групп
self.open_groups_page()
# выбрать первую группу
driver.find_element_by_name("selected[]").click()
# удалить первую группу
driver.find_element_by_name("delete").click()
self.open_groups_page()
|
[
"vm-win7@users.noreply.github.com"
] |
vm-win7@users.noreply.github.com
|
a35dafa3096bfc1040e290c5dbf1e2a25c12f3f8
|
008747345b9d13ba50f164319224089dd72af7d0
|
/plot_prices.py
|
a53fa42dcaf725edf4f2d7e9649cec13600c46e0
|
[] |
no_license
|
StephanHeijl/SMH500
|
2a6cd324ed42fc1573a245c717a3ff12b9235556
|
c0c9bc2e7ace057502ebfe89ba3d6e8da9a96a9a
|
refs/heads/master
| 2021-08-30T17:32:24.162962
| 2017-12-18T20:35:14
| 2017-12-18T20:35:14
| 106,842,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
import pandas
import matplotlib.pyplot as plt
data = pandas.read_csv("coins.csv", index_col=0).T
data.plot(kind="line")
plt.show()
|
[
"root@MEMEMACHINE.localdomain"
] |
root@MEMEMACHINE.localdomain
|
cf8868ac8a9752a524f0f70c7583769be781029b
|
2b3a7ceb8e89e730afe9438cd6b8ccf9dddfdedf
|
/perimeters/urls.py
|
b411302ffe45411acd2d4ed1620879969bf61afc
|
[] |
no_license
|
komerela/perimapgit
|
bcf3bba9fc8b58ebcf1be2fd5dd5559d25a6a66a
|
fb7977dbdd9125ad20722207e6e5260129e30085
|
refs/heads/master
| 2020-08-27T19:15:42.833353
| 2019-12-10T01:56:41
| 2019-12-10T01:56:41
| 217,468,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
from django.conf.urls import url, include
from django.urls import path
from .views import PerimetersView,PerimeterCreateView,PerimeterDetailQrView,PerimeterDetailView,PerimeterDetailMapView
app_name="perimeters"
urlpatterns=[
path('',PerimetersView.as_view(),name="all"),
path('create/',PerimeterCreateView.as_view(),name="create"),
url(r'^view/(?P<pk>[0-9A-Fa-f-]+)/$',PerimeterDetailView.as_view(),name="view"),
url(r'^map_view/(?P<pk>[0-9A-Fa-f-]+)/$',PerimeterDetailMapView.as_view(),name="map_view"),
url(r'^qr_view/(?P<pk>[0-9A-Fa-f-]+)/$',PerimeterDetailQrView.as_view(),name="qr_view"),
]
|
[
"kmwongera@icloud.com"
] |
kmwongera@icloud.com
|
8418197daef9b6bb644ae8cf89eb63d23e702cc2
|
46c5914f08fac28712551fd6a779052c71ed6d2c
|
/somedifferentstuff/modules/environment.py
|
b08d72cd2c76326fd4027772a7c651c5cdf82279
|
[] |
no_license
|
H4ckTh4t/somedifferentstuff
|
16c4519b2b767e1cf54d6822bf19a62cf16ead4d
|
a93033ece32d7a5aa9aa680718215d12f0e90125
|
refs/heads/master
| 2020-04-06T06:44:24.498659
| 2016-09-21T20:57:40
| 2016-09-21T20:57:40
| 68,715,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 103
|
py
|
import os
def run(**args):
print "[*] In environment module."
return str(os.environ)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
6913bc6610f8a603ad9877caa3c1c4c6821c0e73
|
c0a1551a75e3f37041d704cd39dd03240fafbf1a
|
/src/contacts_api/settings.py
|
1f873dad19624e132c83080d288c51f53afb5cf1
|
[] |
no_license
|
zemanel/contact-server
|
d650d45dbbde2f7dfb0f7ccb067b6beae548ea35
|
fb521e6b59a70f55018ab40995216e98dc8d5748
|
refs/heads/master
| 2022-12-12T07:47:18.983752
| 2017-08-16T01:19:45
| 2017-08-16T01:19:45
| 100,430,568
| 1
| 0
| null | 2022-11-22T01:56:32
| 2017-08-16T00:27:59
|
Python
|
UTF-8
|
Python
| false
| false
| 4,245
|
py
|
"""
Django settings for contacts_api project.
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
DEBUG = bool(os.environ.get('DJANGO_DEBUG', False))
ALLOWED_HOSTS = os.environ['DJANGO_ALLOWED_HOSTS'].split()
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'contacts'
]
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
DJANGO_DEV = bool(os.environ.get('DJANGO_DEV', False))
INTERNAL_IPS = (
'127.0.0.1',
)
ROOT_URLCONF = 'contacts_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'contacts_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': os.environ.get('DJANGO_DB_ENGINE', 'django.db.backends.postgresql'),
'NAME': os.environ.get('DJANGO_DB_NAME', ''),
'USER': os.environ.get('DJANGO_DB_USER', ''),
'PASSWORD': os.environ.get('DJANGO_DB_PASSWORD', ''),
'HOST': os.environ.get('DJANGO_DB_HOST', ''),
'PORT': os.environ.get('DJANGO_DB_PORT', ''),
'ATOMIC_REQUESTS': True
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
CONTACTS_SPREADSHEET_URL = os.environ.get('CONTACTS_SPREADSHEET_URL')
REST_FRAMEWORK = {
'PAGE_SIZE': 100,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
CACHE_MIDDLEWARE_SECONDS = 600
CACHES = {
'default': {
'BACKEND': os.environ.get('DJANGO_CACHE_BACKEND', ''),
'LOCATION': os.environ.get('DJANGO_CACHE_LOCATION', '')
}
}
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'media')
MEDIA_URL = '/media/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
}
},
'root': {
'handlers': ['console'],
'level': 'DEBUG'
}
}
|
[
"zemanel@zemanel.eu"
] |
zemanel@zemanel.eu
|
c934a24e9876e7a6dca9a1fabac5327edbc81a94
|
aac8df9f0231e3b7b58d360b394b0c9873ecf6f6
|
/src/famso/urls.py
|
c086f985351a899ee74f3ecb6da83fe0ce30e3d6
|
[] |
no_license
|
seivui/nicole
|
d6d7931b49d5048b29111175018a0ca59fc28026
|
f8e902643beb43b0fc1bade8460a3b2b8ccd3f68
|
refs/heads/master
| 2020-05-17T17:58:13.343789
| 2014-09-16T18:37:12
| 2014-09-16T18:37:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'paris.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
[
"samaire@samaire.net"
] |
samaire@samaire.net
|
e8ff31f4eadf041a761df710a96602fabc804ed5
|
c2bf65f35ac84c93b815c64eee4bfb15e9c1a0ee
|
/315.计算右侧小于当前元素的个数.py
|
ef6cfc298f0d912983465d1b37d08ec161dd7143
|
[] |
no_license
|
hhs44/leetcode_learn
|
e7651548e41176b1fd56a1565effbe076d6b280a
|
fd4f51a4803202a2e4fe3d97ef2b54adc218e691
|
refs/heads/master
| 2022-03-06T14:35:51.891389
| 2022-02-09T14:55:13
| 2022-02-09T14:55:13
| 250,731,211
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
#
# @lc app=leetcode.cn id=315 lang=python3
#
# [315] 计算右侧小于当前元素的个数
#
# @lc code=start
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.count = 0
class Solution:
def countSmaller(self, nums: List[int]) -> List[int]:
root, n = None, len(nums)
res = [0 for _ in range(n)]
for i in reversed(range(n)):
root = self.insertNode(root, nums[i], res, i)
return res
def insertNode(self, root, val, res, res_index):
if root == None:
root = TreeNode(val)
elif val <= root.val:
root.count += 1
root.left = self.insertNode(root.left, val, res, res_index)
elif val > root.val:
res[res_index] += root.count + 1
root.right = self.insertNode(root.right,val,res,res_index)
return root
# @lc code=end
|
[
"1159986871@qq.com"
] |
1159986871@qq.com
|
c5c3635c53983146a06cb84d5d46930ffef0b0db
|
1cc404565448f128156d640ffe5d1c4f5a1fc783
|
/socket/udp_chat2.py
|
a36ea45c0692f08f57ea4eabde5f24dd666490c0
|
[] |
no_license
|
apricotvillage/PyWithJu
|
091baa0df5750af108537de61c79522134be6567
|
2bc685778ac91c3b0db5c552f3c65947a5ffb44e
|
refs/heads/master
| 2022-03-31T18:20:57.230477
| 2020-02-26T16:19:24
| 2020-02-26T16:19:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
import threading
import socket
def rece(sk):
while 1:
data, addr = sk.recvfrom(1024)
if data == b'exit':
break
print(data.decode())
def send(sk):
while 1:
msg_input = input("please input message:")
sk.sendto(msg_input.encode(), ('',9080))
if msg_input == "exit":
print('I am leaving.')
break
def main():
# 创建套接字
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# 绑定本机ip和端口号
udp_socket.bind(('', 9081))
t1 = threading.Thread(target=rece, args=(udp_socket,))
t2 = threading.Thread(target=send, args=(udp_socket,))
t1.start()
t2.start()
if __name__ == '__main__':
main()
|
[
"wuyichang1990@gmail.com"
] |
wuyichang1990@gmail.com
|
b46bb71bada65f6a8c042daf048b538198671f01
|
d09b549108ba28da1723896de4415246a36dffa0
|
/LGBM/Optimimzacion_LGBM.py
|
8a1f16326ca9b3e75750924dd93c3fe3a90a10e8
|
[] |
no_license
|
JorgeAsmat/Ia_Predictor
|
17a500e21a71a4a095fd151201221d0c5b185b8d
|
cbce6ddc2a2b55e8339f9b624b38697b7002e6fa
|
refs/heads/main
| 2023-02-08T11:43:11.071540
| 2020-12-30T18:13:25
| 2020-12-30T18:13:25
| 300,683,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,555
|
py
|
# Librerías
import numpy as np # algebra lineal
import pandas as pd # manipulacion de datos
import matplotlib.pyplot as plt # graficas flexibles
import seaborn as sns # graficas comunes
from scipy.stats import variation # coeficiente de variación
import pdb # librería para hacer debugging si es necesario
import warnings
#Fe
DataFrame_filtrado = pd.read_csv("Data_Base\Alpha 1.csv")
DataFrame_filtrado_CMPX2 = DataFrame_filtrado.copy()
#Se filtra la columna de salida pues buscamos solo atender a los datos no anomalos de CMPX
DataFrame_filtrado_CMPX2 = DataFrame_filtrado_CMPX2.loc[ (0 < DataFrame_filtrado_CMPX2['CMPX DE PARO POR URDIMBRE'])
& (DataFrame_filtrado_CMPX2['CMPX DE PARO POR URDIMBRE']<7.5)
]
#Guarda para el uso en el archivo
DataFrame_filtrado = DataFrame_filtrado_CMPX2.copy()
list_predictors = [
'TITULO_U','NUM_CABOS_U','Telar','ES_PEINADO_NO_CARDADO_U','ES_PEINADO_NO_CARDADO_T1','LIGAMENTO_FONDO','LIGAMENTO_ORILLO',
'DIENTES/CM_PEINE','HILOS/DIENTE_FONDO','HILOS/DIENTE_ORILLO','ANCHO_PEINE','ANCHO_CRUDO','%E_URDIMBRE','CUADROS_FONDO',
'PORC_HILOS_FONDO','TOTAL_HILOS/ANCHO_CRUDO','PASADAS/CM_T1','PORC_PASADAS/CM_T1' ,'RATIO_CONS1_CONS2',
'GR/MTL_U','GR/MTL_T1','TOTAL_PASADAS','PORC_GR/MTL_U','PORC_GR/MTL_T1','TOTAL_GR/MTL', 'MAQUINA_PINZAS',
'NUM_COLORES_U','NUM_COLORES_T','AGUA ','LUMINOSIDAD_T_1', 'LUMINOSIDAD_U_1', 'LUMINOSIDAD_T_2', 'LUMINOSIDAD_U_2', 'LUMINOSIDAD_T_3',
'LUMINOSIDAD_U_3', 'LUMINOSIDAD_T_4', 'LUMINOSIDAD_U_4', 'LUMINOSIDAD_T_5', 'LUMINOSIDAD_U_5','LUMINOSIDAD_T_6', 'LUMINOSIDAD_U_6',
'FACT_COB_U', 'FACT_COB_T', 'PORC_FACT_COB_U','FACT_COB_TOTAL_REAL', 'TUPIDEZ','Ne_prom','CV% Ne_prom','cN/tex_prom','TPI_prom','FT_prom','CV% TPI_prom',
'E%_prom','CV% E_prom','CV%R_prom','CVm%_prom','I_prom','PD(-40%)_prom','PD(-50%)_prom','PG(+35%)_prom','PG(+50%)_prom','NEPS(+140%)_prom','NEPS(+200%)_prom',
'H_prom','Sh_prom','var_Ne_prom','var_cN/tex_prom','var_TPI_prom','var_E%_prom','%falla_R_prom','%falla_E_prom'
]
list_targets = ['CMPX DE PARO POR URDIMBRE']
predictores_numericos = [i for i in list_predictors if 'float' in str(DataFrame_filtrado[i].dtype) or 'int' in str(DataFrame_filtrado[i].dtype)]
predictores_categoricos = [i for i in list_predictors if i not in predictores_numericos]
##########################
#Revisamos la correlacion
##########################
#quitar variables correlacionadas
df=DataFrame_filtrado.copy()
# Create correlation matrix
corr_matrix = df[predictores_categoricos+predictores_numericos].corr().abs()
# Select upper triangle of correlation matrix
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
# Find index of feature columns with correlation greater than 0.95
to_drop = [column for column in upper.columns if any(upper[column] > 0.95)]
for i in to_drop:
if i in predictores_numericos:
predictores_numericos.remove(i)
elif i in predictores_categoricos:
predictores_categoricos.remove(i)
# df.drop(df[to_drop], axis=1,inplace=True)
######################################################
#Separamos las variables para entremiento y validacion
######################################################
from sklearn.model_selection import train_test_split
X=df[list_predictors]
y=df[list_targets[0]]
X_train, X_test, y_train, y_test = train_test_split(X , y , test_size=0.33, random_state=0)
"""Tratamiento de variables"""
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
# Aplicamos hot enconder en los datos categoricos
OH_cols_train = pd.DataFrame(X_train[predictores_categoricos].astype(str))
OH_cols_test = pd.DataFrame(X_test[predictores_categoricos].astype(str))
# One-hot encoding elimina los indices asi que los volvemos a poner
OH_cols_train.index = X_train.index
OH_cols_test.index = X_test.index
# Eliminamos las columnas categoricas de nuestra data para luego remplazarlas con las resultantes del HOE
num_X_train = X_train[predictores_numericos]
num_X_test = X_test[predictores_numericos]
# Add one-hot encoded columns to numerical features
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_test = pd.concat([num_X_test, OH_cols_test], axis=1)
#Las variables categoricas son identificadas por su tipo de dato asi que generamos ese cambio
for feature in predictores_categoricos:
OH_X_train[feature] = pd.Series(OH_X_train[feature], dtype="category")
OH_X_test[feature] = pd.Series(OH_X_train[feature], dtype="category")
#####################################
#Se comienza la optimizacion del LGBM
#####################################
SEED = 314159265
from hyperopt import STATUS_OK, Trials, fmin, hp, tpe
from sklearn.metrics import r2_score
import lightgbm
from sklearn.model_selection import cross_validate
print(lightgbm.__version__)
def percentage_error(actual, predicted):
res = np.empty(actual.shape)
for j in range(actual.shape[0]):
if actual[j] != 0:
res[j] = (actual[j] - predicted[j]) / actual[j]
else:
res[j] = predicted[j] / np.mean(actual)
return res
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs(percentage_error(np.asarray(y_true), np.asarray(y_pred)))) * 100
def mape_objective_function(dtrain ,preds):
labels = dtrain.get_label()
grad = (preds - labels) / (0.2 + labels * np.abs(preds - labels))
hess = 0.1 + np.zeros(len(preds))
return grad, hess
def score(params):
# Se debe escoger un tipo de boosting del nest creado en el space
subsample = params['boosting_type'].get('subsample', 1.0)
#se asigna el tipo de subsample dependiendo del boosting type
params['boosting_type'] = params['boosting_type']['boosting_type']
params['subsample'] = subsample
#Se cambia el tipo de dato
for parameter_name in ['num_leaves', 'subsample_for_bin', 'min_child_samples']:
params[parameter_name] = int(params[parameter_name])
print("Entrenamiento con parametros: ")
print(params)
lgbm_model = lightgbm.LGBMRegressor(class_weight= params['class_weight'],
boosting_type= params['boosting_type'],
num_leaves= params['num_leaves'],
subsample= params['subsample'],
learning_rate= params['learning_rate'],
subsample_for_bin=params['subsample_for_bin'],
min_child_samples= params['min_child_samples'],
reg_alpha=params['reg_alpha'],
reg_lambda=params['reg_lambda'],
colsample_bytree=params['colsample_bytree']
,n_jobs= -1 )
# lgbm_model.fit(OH_X_train,y_train)
# predictions = lgbm_model.predict(OH_X_test)
# score = mean_absolute_percentage_error(y_test, predictions)
CrossValMean = 0
score_rmse = {}
score_rmse = cross_validate(estimator = lgbm_model, X = OH_X_train, y = y_train, cv = 3
, scoring= 'neg_root_mean_squared_error' , n_jobs= -1)
CrossValMean = -1 * score_rmse['test_score'].mean()
score = CrossValMean
print("\tScore {0}\n\n".format(score))
loss = score
return {'loss': loss, 'status': STATUS_OK}
def optimize(
#trials,
random_state=SEED):
"""
Esta es una funcion de optimizacion dado un espacio de busqueda
para encontrar los mejores hyperparametros de un lightgbm con un
evaluacion de mape
"""
# Para evuluar los parametros de LGBM
# https://lightgbm.readthedocs.io/en/latest/Parameters.html
space = {
'class_weight': hp.choice('class_weight', [None, 'balanced']),
'boosting_type': hp.choice('boosting_type', [{'boosting_type': 'gbdt', 'subsample': hp.uniform('gdbt_subsample', 0.5, 1)},
{'boosting_type': 'dart', 'subsample': hp.uniform('dart_subsample', 0.5, 1)},
{'boosting_type': 'goss', 'subsample': 1.0}]),
'num_leaves': hp.quniform('num_leaves', 30, 150, 1),
'learning_rate': hp.loguniform('learning_rate', np.log(0.01), np.log(0.2)),
'subsample_for_bin': hp.quniform('subsample_for_bin', 20000, 300000, 20000),
'min_child_samples': hp.quniform('min_child_samples', 20, 500, 5),
'reg_alpha': hp.uniform('reg_alpha', 0.0, 1.0),
'reg_lambda': hp.uniform('reg_lambda', 0.0, 1.0),
'colsample_bytree': hp.uniform('colsample_by_tree', 0.6, 1.0)
}
#Uso de fmin para encontrar los mejores hyperparametros
best = fmin(score, space, algo=tpe.suggest,
# trials=trials,
max_evals=80)
return best
best_hyperparams = optimize(
#trials
)
print("Los mejores hiperparametros son: ", "\n")
print(best_hyperparams)
##################################
#Escribimos los mejores resultados
##################################
dict = best_hyperparams
f = open("Mejores_resultados_LGBM.txt","w")
f.write( str(dict) )
f.close()
|
[
"asmat.jorge@pucp.edu.pe"
] |
asmat.jorge@pucp.edu.pe
|
27ba9b528c7aa59135899b8ba009ff18cdc8f98c
|
8da45a19b8f02ddabb8a2ffba7cacaafd7f46cfc
|
/passenger_wsgi.py
|
2e05e5eba87bf375ce1e5ad4b0dd76b1c463daa5
|
[] |
no_license
|
gregcodethem/blogsite_project
|
2cff5627c5a4f2dad207717dd2a5b41137612b3a
|
58b3a44f8646c48b0756354a6dc68ff75289e3e0
|
refs/heads/master
| 2023-01-09T21:45:15.428459
| 2019-12-01T21:47:16
| 2019-12-01T21:47:16
| 215,317,158
| 1
| 0
| null | 2022-12-27T15:36:12
| 2019-10-15T14:17:40
|
CSS
|
UTF-8
|
Python
| false
| false
| 726
|
py
|
import sys, os
cwd = os.getcwd()
INTERP = cwd+"/env/bin/python"
#INTERP is present twice so that the new python interpreter
#knows the actual executable path
if sys.executable != INTERP:
os.execl(INTERP, INTERP, *sys.argv)
cwd = os.getcwd()
sys.path.append(cwd)
sys.path.append(cwd + '/blogsite_project/blogsite') #You must add your project here
sys.path.insert(0, cwd + '/env/bin')
sys.path.insert(0, cwd + '/env/lib/python3.6/site-packages')
os.environ['DJANGO_SETTINGS_MODULE'] = "blogsite.settings.production"
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
#if sys.hexversion < 0x2060000: os.execl("/home/shellgreg/.envs/lucidcooking/bin/python", "python3.6", *sys.argv)
|
[
"gregname@reliant.dreamhost.com"
] |
gregname@reliant.dreamhost.com
|
625da2286613765bcf9ffe9e039458ad95265791
|
437fe22e3474686608135254fd82db79a49f8660
|
/depthai-speaks_OAK-D.py
|
e0519a6eaa1fa7b4b4b3ff9cd7069fd08e58dd85
|
[] |
no_license
|
bharath5673/depthai-speaks
|
c74eb9575d8fc83ed9e0d0ef5d68ae3a55427b1e
|
b6f9b0d525251ec667f26c91d56389c58a1a118d
|
refs/heads/main
| 2023-05-14T15:33:20.350986
| 2021-06-05T11:14:33
| 2021-06-05T11:14:33
| 372,469,813
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,046
|
py
|
code:
#!/usr/bin/env python3
from pathlib import Path
import sys
import cv2
import depthai as dai
import numpy as np
import time
import os
import imutils
import subprocess
from gtts import gTTS
from pydub import AudioSegment
'''
Spatial Tiny-yolo example
Performs inference on RGB camera and retrieves spatial location coordinates: x,y,z relative to the center of depth map.
Can be used for tiny-yolo-v3 or tiny-yolo-v4 networks
'''
# Tiny yolo v3/4 label texts
labelMap = [
"person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train",
"truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench",
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant",
"bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie",
"suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat",
"baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup",
"fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich",
"orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake",
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor",
"laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven",
"toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors",
"teddy bear", "hair drier", "toothbrush"
]
syncNN = True
# Get argument first
nnBlobPath = 'tiny-yolo-v4_openvino_2021.2_6shave.blob'
if not Path(nnBlobPath).exists():
import sys
raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - color camera
colorCam = pipeline.createColorCamera()
spatialDetectionNetwork = pipeline.createYoloSpatialDetectionNetwork()
monoLeft = pipeline.createMonoCamera()
monoRight = pipeline.createMonoCamera()
stereo = pipeline.createStereoDepth()
colorCam.setPreviewSize(416, 416)
colorCam.setBoardSocket(dai.CameraBoardSocket.RGB)
colorCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) #1080
# colorCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_12_MP) #3040
# colorCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) #2160
colorCam.setVideoSize(1000, 1000)
colorCam.setInterleaved(False)
colorCam.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
xoutRgb = pipeline.createXLinkOut()
xoutNN = pipeline.createXLinkOut()
xoutBoundingBoxDepthMapping = pipeline.createXLinkOut()
xoutDepth = pipeline.createXLinkOut()
xoutRgb1 = pipeline.createXLinkOut()
xoutRgb.setStreamName("rgb")
xoutNN.setStreamName("detections")
xoutBoundingBoxDepthMapping.setStreamName("boundingBoxDepthMapping")
xoutDepth.setStreamName("depth")
xoutRgb1.setStreamName("video")
xoutRgb1.input.setBlocking(False)
xoutRgb1.input.setQueueSize(1)
colorCam.video.link(xoutRgb1.input)
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
# setting node configs
stereo.setOutputDepth(True)
stereo.setConfidenceThreshold(255)
spatialDetectionNetwork.setBlobPath(nnBlobPath)
spatialDetectionNetwork.setConfidenceThreshold(0.5)
spatialDetectionNetwork.input.setBlocking(False)
spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)
spatialDetectionNetwork.setDepthLowerThreshold(100)
spatialDetectionNetwork.setDepthUpperThreshold(5000)
# Yolo specific parameters
spatialDetectionNetwork.setNumClasses(80)
spatialDetectionNetwork.setCoordinateSize(4)
spatialDetectionNetwork.setAnchors(np.array([10,14, 23,27, 37,58, 81,82, 135,169, 344,319]))
spatialDetectionNetwork.setAnchorMasks({ "side26": np.array([1,2,3]), "side13": np.array([3,4,5]) })
spatialDetectionNetwork.setIouThreshold(0.5)
# Create outputs
monoLeft.out.link(stereo.left)
monoRight.out.link(stereo.right)
colorCam.preview.link(spatialDetectionNetwork.input)
if syncNN:
spatialDetectionNetwork.passthrough.link(xoutRgb.input)
else:
colorCam.preview.link(xoutRgb.input)
spatialDetectionNetwork.out.link(xoutNN.input)
spatialDetectionNetwork.boundingBoxMapping.link(xoutBoundingBoxDepthMapping.input)
stereo.depth.link(spatialDetectionNetwork.inputDepth)
spatialDetectionNetwork.passthroughDepth.link(xoutDepth.input)
texts=[]
detected=[]
# Pipeline is defined, now we can connect to the device
with dai.Device(pipeline) as device:
# Start pipeline
device.startPipeline()
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
previewQueue = device.getOutputQueue(name="rgb", maxSize=1, blocking=False)
detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
xoutBoundingBoxDepthMapping = device.getOutputQueue(name="boundingBoxDepthMapping", maxSize=4, blocking=False)
depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False)
out = device.getOutputQueue(name="video", maxSize=4, blocking=False)
print("\n\npress 'd' on screen for Speech synthesis")
frame = None
detections = []
startTime = time.monotonic()
counter = 0
fps = 0
color = (255, 255, 255)
while True:
inPreview = previewQueue.get()
inNN = detectionNNQueue.get()
depth = depthQueue.get()
output = out.get()
k=0xFF & cv2.waitKey(1)
counter+=1
current_time = time.monotonic()
if (current_time - startTime) > 1 :
fps = counter / (current_time - startTime)
counter = 0
startTime = current_time
# frame = inPreview.getCvFrame()
depthFrame = depth.getFrame()
frame = output.getCvFrame()
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
detections = inNN.detections
if len(detections) != 0:
boundingBoxMapping = xoutBoundingBoxDepthMapping.get()
roiDatas = boundingBoxMapping.getConfigData()
for roiData in roiDatas:
roi = roiData.roi
roi = roi.denormalize(depthFrameColor.shape[1], depthFrameColor.shape[0])
topLeft = roi.topLeft()
bottomRight = roi.bottomRight()
xmin = int(topLeft.x)
ymin = int(topLeft.y)
xmax = int(bottomRight.x)
ymax = int(bottomRight.y)
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
# If the frame is available, draw bounding boxes on it and show the frame
height = frame.shape[0]
width = frame.shape[1]
for detection in detections:
# Denormalize bounding box
x1 = int(detection.xmin * width)
x2 = int(detection.xmax * width)
y1 = int(detection.ymin * height)
y2 = int(detection.ymax * height)
try:
label = labelMap[detection.label]
except:
label = detection.label
cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
cv2.putText(frame, "{:.2f}".format(detection.confidence*100), (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0))
cv2.putText(frame, f"X: {int(detection.spatialCoordinates.x)} mm", (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255))
cv2.putText(frame, f"Y: {int(detection.spatialCoordinates.y)} mm", (x1 + 10, y1 + 65), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255))
cv2.putText(frame, f"Z: {int(detection.spatialCoordinates.z)} mm", (x1 + 10, y1 + 80), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255))
cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
if k == ord('d'):
centerX, centerY = x1,y1
if centerX <= width/3:
W_pos = "left "
elif centerX <= (width/3 * 2):
W_pos = "center "
else:
W_pos = "right "
if centerY <= height/3:
H_pos = "top "
elif centerY <= (height/3 * 2):
H_pos = "mid "
else:
H_pos = "bottom "
# texts.append("obstical detected at "+H_pos + W_pos + "as" +label)
# texts.append(label +" detected at "+H_pos + W_pos )
texts.append(label +" at "+H_pos + W_pos )
detected.append(label)
print(texts)
description = ', '.join(texts)
tts = gTTS(description, lang='en')
tts.save('tts.mp3')
tts = AudioSegment.from_mp3("tts.mp3")
subprocess.call(["ffplay", "-nodisp", "-autoexit", "tts.mp3"])
else:
pass
cv2.putText(frame, "NN fps: {:.2f}".format(fps), (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.4, color)
# cv2.imshow("depth", depthFrameColor)
# cv2.imshow("rgb", frame)
cv2.imshow("video", frame)
if k == ord('q'):
break
cv2.destroyAllWindows()
os.remove("tts.mp3")
|
[
"s.bharath.2000@gmail.com"
] |
s.bharath.2000@gmail.com
|
74207abb588e1340233fa59944d778fb9dbab69b
|
f41471a3bff66c763b8d60f0280ac67235ecbb62
|
/gewittergefahr/gg_utils/general_utils.py
|
63593eb721d5952ee8c90c94b05d850762a398cc
|
[
"MIT"
] |
permissive
|
cil0834/GewitterGefahr
|
0f43878ba40921881c077c2218446f5fab18ba9f
|
699b995b1b90344022b1644d4b758e790402894e
|
refs/heads/master
| 2020-06-23T15:59:22.718914
| 2019-07-24T03:59:31
| 2019-07-24T03:59:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
"""General helper methods (ones that don't belong in another "utils" module)."""
import math
import numpy
from gewittergefahr.gg_utils import error_checking
def find_nearest_value(sorted_input_values, test_value):
"""Finds nearest value in array to test value.
This method is based on the following:
https://stackoverflow.com/posts/26026189/revisions
:param sorted_input_values: 1-D numpy array. Must be sorted in ascending
order.
:param test_value: Test value.
:return: nearest_value: Nearest value in `sorted_input_values` to
`test_value`.
:return: nearest_index: Array index of nearest value.
"""
nearest_index = numpy.searchsorted(
sorted_input_values, test_value, side='left')
subtract_one = nearest_index > 0 and (
nearest_index == len(sorted_input_values) or
math.fabs(test_value - sorted_input_values[nearest_index - 1]) <
math.fabs(test_value - sorted_input_values[nearest_index])
)
if subtract_one:
nearest_index -= 1
return sorted_input_values[nearest_index], nearest_index
def split_array_by_nan(input_array):
"""Splits numpy array into list of contiguous subarrays without NaN.
:param input_array: 1-D numpy array.
:return: list_of_arrays: 1-D list of 1-D numpy arrays. Each numpy array is
without NaN.
"""
error_checking.assert_is_real_numpy_array(input_array)
error_checking.assert_is_numpy_array(input_array, num_dimensions=1)
return [
input_array[i] for i in
numpy.ma.clump_unmasked(numpy.ma.masked_invalid(input_array))
]
|
[
"ryan.lagerquist@ou.edu"
] |
ryan.lagerquist@ou.edu
|
7fdc6939764e2354c648bee798e14b5694bbc243
|
ae5eb4b2e9cfd53273bf1c4fcb6b75b2dc09ddfb
|
/andelabs/MaxMinTest.py
|
50d37d99608f93ae3f77d22106357ef367cf303d
|
[] |
no_license
|
m-amoit/Bootcamp-7
|
4e0307d2819d631a8ba78d1777d509492d34a988
|
33db32c56bd49a6809ec03bd8c4dafa7d91c35e3
|
refs/heads/master
| 2021-01-01T03:54:43.030784
| 2016-06-02T15:37:37
| 2016-06-02T15:37:37
| 57,123,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,243
|
py
|
from unittest import TestCase
from find_max_min import find_max_min
class MaxMinTest(TestCase):
"""docstring for MaxMinTest"""
def test_find_max_min_four(self):
self.assertListEqual([1, 4],
find_max_min([1, 2, 3, 4]),
msg='should return [1,4] for [1, 2, 3, 4]')
def test_find_max_min_one(self):
self.assertListEqual([4, 6],
find_max_min([6, 4]),
msg='should return [4, 6] for [6, 4]')
def test_find_max_min_two(self):
self.assertListEqual([2, 78],
find_max_min([4, 66, 6, 44, 7, 78, 8, 68, 2]),
msg='should return [2, 78] for [4, 66, 6, 44, 7, 78, 8, 68, 2]')
def test_find_max_min_three(self):
self.assertListEqual([1, 4],
find_max_min([1, 2, 3, 4]),
msg='should return [1,4] for [1, 2, 3, 4]')
def test_find_max_min_identity(self):
self.assertListEqual([4],
find_max_min([4, 4, 4, 4]),
msg='Return the number of elements in the list in a new list if the `min` and `max` are equal')
|
[
"amoitjmiriam@gmail.com"
] |
amoitjmiriam@gmail.com
|
b228652b795046586f1ccc4eab807ab62a674da6
|
2cbd2f11cdee79dc13734bbaf38ef660bfb26f24
|
/main.py
|
ec0da20e468269917f385a6eeda870086101e1a6
|
[
"Apache-2.0"
] |
permissive
|
CoderXDY/NoPeekNN
|
2ac92a864c838ad2ac9c3a0a122ed4d64921e792
|
d1ae349bc023d7c0353f8d67708add8325a78b3c
|
refs/heads/master
| 2022-12-08T08:45:57.157218
| 2020-08-30T14:48:44
| 2020-08-30T14:48:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,390
|
py
|
"""Script for training a SplitNN"""
import argparse
from pathlib import Path
from typing import Tuple
import torch
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
from torchvision.datasets import MNIST
from tqdm import tqdm
import syft as sy
from src import SplitNN, NoPeekLoss, model_part1, model_part2
# Set torch-hook
hook = sy.TorchHook(torch)
def train_epoch(model, criterion, train_loader, device) -> Tuple[float, float]:
train_loss = 0.0
correct = 0
total = 0
first_model_location = model.location
last_model_location = model.models[-1].location
model.train()
for batch_idx, (inputs, targets) in enumerate(train_loader):
inputs = inputs.to(device).send(first_model_location)
targets = targets.to(device).send(last_model_location)
model.zero_grads()
outputs, intermediates = model(inputs)
losses = criterion(inputs, intermediates, outputs, targets)
_step_loss = 0.0
for loss in losses:
loss.backward()
_step_loss += loss.get().item()
model.backward()
model.step()
train_loss += _step_loss
outputs = outputs.get()
targets = targets.get()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
return 100 * correct / total, train_loss
def test(model, test_loader, device) -> float:
# Evaluate on test data
correct_test = 0
total_test = 0
first_model_location = model.location
last_model_location = model.models[-1].location
model.eval()
for test_inputs, test_targets in test_loader:
test_inputs = test_inputs.to(device).send(first_model_location)
test_targets = test_targets.to(device).send(last_model_location)
with torch.no_grad():
outputs, _ = model(test_inputs)
outputs = outputs.get()
test_targets = test_targets.get()
_, predicted = outputs.max(1)
total_test += test_targets.size(0)
correct_test += predicted.eq(test_targets).sum().item()
return 100 * correct_test / total_test
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train a SplitNN with NoPeek loss")
parser.add_argument(
"--nopeek_weight",
type=float,
required=True,
help="Weighting of NoPeek loss term. If 0.0, NoPeek is not used. Required.",
)
parser.add_argument(
"--epochs", default=5, type=int, help="Number of epochs to run for (default 5)",
)
parser.add_argument(
"--batch_size", default=64, type=int, help="Batch size (default 64)"
)
parser.add_argument(
"--learning_rate",
default=0.6,
type=float,
help="Starting learning rate (default 0.6)",
)
parser.add_argument(
"--saveas",
default="nopeek",
type=str,
help="Name of model to save as (default is 'nopeek')."
"Note that '_{nopeek_weight}weight' will be appended to the end of the name",
)
parser.add_argument(
"--n_train_data",
default=10_000,
type=int,
help="Number of training points to use (default 10'000)",
)
args = parser.parse_args()
weighting = args.nopeek_weight
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# File paths
project_root = Path(__file__).resolve().parent
data_dir = project_root / "data"
root_model_path = project_root / "models"
# Model name
model_name = args.saveas + f"_{weighting}weight".replace(".", "")
MODEL_SAVE_PATH = (root_model_path / model_name).with_suffix(".pth")
summary_writer_path = project_root / "models" / ("tb_" + model_name)
# ----- Model Parts -----
models = [model_part1, model_part2]
optims = [torch.optim.SGD(model.parameters(), lr=args.learning_rate,) for model in models]
# ----- Users -----
alice = sy.VirtualWorker(hook, id="alice")
bob = sy.VirtualWorker(hook, id="bob")
for model, location in zip(models, [alice, bob]):
model.send(location)
# Create model
split_model = SplitNN([model_part1, model_part2], optims)
split_model.train()
# ----- Data -----
data_transform = transforms.Compose(
[
transforms.ToTensor(),
# PyTorch examples; https://github.com/pytorch/examples/blob/master/mnist/main.py
transforms.Normalize((0.1307,), (0.3081,)),
]
)
train_data = MNIST(data_dir, download=True, train=True, transform=data_transform)
# We only want to use a subset of the data to force overfitting
train_data.data = train_data.data[: args.n_train_data]
train_data.targets = train_data.targets[: args.n_train_data]
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size)
# Test data
test_data = MNIST(data_dir, download=True, train=False, transform=data_transform)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=1024)
# ----- Train -----
n_epochs = args.epochs
best_accuracy = 0.0
# writer = SummaryWriter(summary_writer_path)
criterion = NoPeekLoss(weighting)
epoch_pbar = tqdm(total=n_epochs)
print("Starting training...")
for epoch in range(n_epochs):
train_acc, train_loss = train_epoch(
split_model, criterion, train_loader, DEVICE
)
test_acc = test(split_model, test_loader, DEVICE)
# Update tensorboard
# writer.add_scalars("Accuracy", {"train": train_acc, "test": test_acc}, epoch)
# writer.add_scalar("Loss/train", train_loss, epoch)
# Save model if it's an improvement
if test_acc > best_accuracy:
best_accuracy = test_acc
state_dict = {
"model_state_dict": split_model.state_dict(),
"epoch": epoch,
"train_acc": train_acc,
"test_acc": test_acc,
}
torch.save(state_dict, MODEL_SAVE_PATH)
# Update prog bar text
epoch_pbar.set_description(
f"Train {train_acc: .2f}%; "
f"Test {test_acc : .2f}%; "
f"Best test {best_accuracy : .2f}%"
)
epoch_pbar.update(1)
epoch_pbar.close()
# writer.close()
|
[
"t.j.titcombe@gmail.com"
] |
t.j.titcombe@gmail.com
|
a56bdd9716876164c6213827df6a4999c6674ca0
|
ce8a2fd8ec1cd1dd9e65c4da2dd23e00e5c9056b
|
/Trees/convert_nary_binary.py
|
7d9911911ec519af17692a8a5548d7e0af1c0606
|
[] |
no_license
|
tdavchev/algorithms
|
ad1e76b50a67228157cc7795f80e3b3b4df350eb
|
60fbee3663b2933b621fb227274adc898606df2f
|
refs/heads/master
| 2021-09-22T15:26:43.051746
| 2018-09-11T09:32:57
| 2018-09-11T09:32:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
def convert_nary_binary(root):
return None
def convert_binary_nary(root):
return None
|
[
"t.davchev@gmail.com"
] |
t.davchev@gmail.com
|
db12920dd2e491b3601268ab40c4f0e08264022b
|
31b85e2abc35fa633257cc372c8fa059c75241d9
|
/superset/views/database/forms.py
|
647d580edb0504c1eac8f3d635fbeff32bacf1ca
|
[
"Apache-2.0"
] |
permissive
|
Zandut/Superset-Funnel
|
b28cf1c0768bfcf7ab630c622c7ca3755212bfe8
|
cff832b2d584f859ceb025349b615c25afa524b7
|
refs/heads/master
| 2022-12-04T07:10:29.164996
| 2020-08-24T08:05:16
| 2020-08-24T08:05:16
| 288,700,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,812
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""Contains the logic to create cohesive forms on the explore view"""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_appbuilder.forms import DynamicForm
from flask_babel import lazy_gettext as _
from flask_wtf.file import FileAllowed, FileField, FileRequired
from wtforms import BooleanField, IntegerField, SelectField, StringField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import DataRequired, Length, NumberRange, Optional
from superset import app, db, security_manager
from superset.forms import CommaSeparatedListField, filter_not_empty_values
from superset.models import core as models
config = app.config
class CsvToDatabaseForm(DynamicForm):
# pylint: disable=E0211
def csv_allowed_dbs():
csv_allowed_dbs = []
csv_enabled_dbs = (
db.session.query(models.Database).filter_by(allow_csv_upload=True).all()
)
for csv_enabled_db in csv_enabled_dbs:
if CsvToDatabaseForm.at_least_one_schema_is_allowed(csv_enabled_db):
csv_allowed_dbs.append(csv_enabled_db)
return csv_allowed_dbs
@staticmethod
def at_least_one_schema_is_allowed(database):
"""
If the user has access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is able to upload csv without specifying schema name
b) if database supports schema
user is able to upload csv to any schema
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and upload will fail
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
elif the user does not access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is unable to upload csv
b) if database supports schema
user is unable to upload csv
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and user is unable to upload csv
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
"""
if (
security_manager.database_access(database)
or security_manager.all_datasource_access()
):
return True
schemas = database.get_schema_access_for_csv_upload()
if schemas and security_manager.schemas_accessible_by_user(
database, schemas, False
):
return True
return False
name = StringField(
_("Table Name"),
description=_("Name of table to be created from csv data."),
validators=[DataRequired()],
widget=BS3TextFieldWidget(),
)
csv_file = FileField(
_("CSV File"),
description=_("Select a CSV file to be uploaded to a database."),
validators=[FileRequired(), FileAllowed(["csv"], _("CSV Files Only!"))],
)
con = QuerySelectField(
_("Database"),
query_factory=csv_allowed_dbs,
get_pk=lambda a: a.id,
get_label=lambda a: a.database_name,
)
schema = StringField(
_("Schema"),
description=_("Specify a schema (if database flavor supports this)."),
validators=[Optional()],
widget=BS3TextFieldWidget(),
)
sep = StringField(
_("Delimiter"),
description=_("Delimiter used by CSV file (for whitespace use \\s+)."),
validators=[DataRequired()],
widget=BS3TextFieldWidget(),
)
if_exists = SelectField(
_("Table Exists"),
description=_(
"If table exists do one of the following: "
"Fail (do nothing), Replace (drop and recreate table) "
"or Append (insert data)."
),
choices=[
("fail", _("Fail")),
("replace", _("Replace")),
("append", _("Append")),
],
validators=[DataRequired()],
)
header = IntegerField(
_("Header Row"),
description=_(
"Row containing the headers to use as "
"column names (0 is first line of data). "
"Leave empty if there is no header row."
),
validators=[Optional(), NumberRange(min=0)],
widget=BS3TextFieldWidget(),
)
index_col = IntegerField(
_("Index Column"),
description=_(
"Column to use as the row labels of the "
"dataframe. Leave empty if no index column."
),
validators=[Optional(), NumberRange(min=0)],
widget=BS3TextFieldWidget(),
)
mangle_dupe_cols = BooleanField(
_("Mangle Duplicate Columns"),
description=_('Specify duplicate columns as "X.0, X.1".'),
)
skipinitialspace = BooleanField(
_("Skip Initial Space"), description=_("Skip spaces after delimiter.")
)
skiprows = IntegerField(
_("Skip Rows"),
description=_("Number of rows to skip at start of file."),
validators=[Optional(), NumberRange(min=0)],
widget=BS3TextFieldWidget(),
)
nrows = IntegerField(
_("Rows to Read"),
description=_("Number of rows of file to read."),
validators=[Optional(), NumberRange(min=0)],
widget=BS3TextFieldWidget(),
)
skip_blank_lines = BooleanField(
_("Skip Blank Lines"),
description=_(
"Skip blank lines rather than interpreting them " "as NaN values."
),
)
parse_dates = CommaSeparatedListField(
_("Parse Dates"),
description=_(
"A comma separated list of columns that should be " "parsed as dates."
),
filters=[filter_not_empty_values],
)
infer_datetime_format = BooleanField(
_("Infer Datetime Format"),
description=_("Use Pandas to interpret the datetime format " "automatically."),
)
decimal = StringField(
_("Decimal Character"),
default=".",
description=_("Character to interpret as decimal point."),
validators=[Optional(), Length(min=1, max=1)],
widget=BS3TextFieldWidget(),
)
index = BooleanField(
_("Dataframe Index"), description=_("Write dataframe index as a column.")
)
index_label = StringField(
_("Column Label(s)"),
description=_(
"Column label for index column(s). If None is given "
"and Dataframe Index is True, Index Names are used."
),
validators=[Optional()],
widget=BS3TextFieldWidget(),
)
class ExcelToDatabaseForm(DynamicForm):
# pylint: disable=E0211
def excel_allowed_dbs(): # type: ignore
excel_allowed_dbs = []
# TODO: change allow_csv_upload to allow_file_upload
excel_enabled_dbs = (
db.session.query(models.Database).filter_by(allow_csv_upload=True).all()
)
for excel_enabled_db in excel_enabled_dbs:
if ExcelToDatabaseForm.at_least_one_schema_is_allowed(excel_enabled_db):
excel_allowed_dbs.append(excel_enabled_db)
return excel_allowed_dbs
@staticmethod
def at_least_one_schema_is_allowed(database):
"""
If the user has access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is able to upload excel without specifying schema name
b) if database supports schema
user is able to upload excel to any schema
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and upload will fail
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
elif the user does not access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is unable to upload excel
b) if database supports schema
user is unable to upload excel
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and user is unable to upload excel
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
"""
if (
security_manager.database_access(database)
or security_manager.all_datasource_access()
):
return True
schemas = database.get_schema_access_for_csv_upload()
if schemas and security_manager.schemas_accessible_by_user(
database, schemas, False
):
return True
return False
name = StringField(
_("Table Name"),
description=_("Name of table to be created from excel data."),
validators=[DataRequired()],
widget=BS3TextFieldWidget(),
)
excel_file = FileField(
_("Excel File"),
description=_("Select a Excel file to be uploaded to a database."),
validators=[
FileRequired(),
FileAllowed(
config["EXCEL_EXTENSIONS"],
_(
"Only the following file extensions are allowed: "
"%(allowed_extensions)s",
allowed_extensions=", ".join(config["EXCEL_EXTENSIONS"]),
),
),
],
)
sheet_name = StringField(
_("Sheet Name"), description="Sheet Name", validators=[Optional()]
)
con = QuerySelectField(
_("Database"),
query_factory=excel_allowed_dbs,
get_pk=lambda a: a.id,
get_label=lambda a: a.database_name,
)
schema = StringField(
_("Schema"),
description=_("Specify a schema (if database flavor supports this)."),
validators=[Optional()],
widget=BS3TextFieldWidget(),
)
if_exists = SelectField(
_("Table Exists"),
description=_(
"If table exists do one of the following: "
"Fail (do nothing), Replace (drop and recreate table) "
"or Append (insert data)."
),
choices=[
("fail", _("Fail")),
("replace", _("Replace")),
("append", _("Append")),
],
validators=[DataRequired()],
)
header = IntegerField(
_("Header Row"),
description=_(
"Row containing the headers to use as "
"column names (0 is first line of data). "
"Leave empty if there is no header row."
),
validators=[Optional(), NumberRange(min=0)],
widget=BS3TextFieldWidget(),
)
index_col = IntegerField(
_("Index Column"),
description=_(
"Column to use as the row labels of the "
"dataframe. Leave empty if no index column."
),
validators=[Optional(), NumberRange(min=0)],
widget=BS3TextFieldWidget(),
)
mangle_dupe_cols = BooleanField(
_("Mangle Duplicate Columns"),
description=_('Specify duplicate columns as "X.0, X.1".'),
)
skipinitialspace = BooleanField(
_("Skip Initial Space"), description=_("Skip spaces after delimiter.")
)
skiprows = IntegerField(
_("Skip Rows"),
description=_("Number of rows to skip at start of file."),
validators=[Optional(), NumberRange(min=0)],
widget=BS3TextFieldWidget(),
)
nrows = IntegerField(
_("Rows to Read"),
description=_("Number of rows of file to read."),
validators=[Optional(), NumberRange(min=0)],
widget=BS3TextFieldWidget(),
)
decimal = StringField(
_("Decimal Character"),
default=".",
description=_("Character to interpret as decimal point."),
validators=[Optional(), Length(min=1, max=1)],
widget=BS3TextFieldWidget(),
)
index = BooleanField(
_("Dataframe Index"), description=_("Write dataframe index as a column.")
)
index_label = StringField(
_("Column Label(s)"),
description=_(
"Column label for index column(s). If None is given "
"and Dataframe Index is True, Index Names are used."
),
validators=[Optional()],
widget=BS3TextFieldWidget(),
)
|
[
"mfauzan613110035@gmail.com"
] |
mfauzan613110035@gmail.com
|
87c09b0473150398ede397ba39ccd0d6e70d2da2
|
92190f39b9019571d3b5df881cf9b26882ec972d
|
/core/fit/migrations/0007_auto_20170112_2111.py
|
6b87f670c7e4e7de3791da1ec4ca1119d5541763
|
[] |
no_license
|
nolanrbrady/FIT_Experiance
|
0432192c91c575f7f6465197536a96f313ccd42f
|
f1bb2bb6e45ed377882ce455330782e35f870cf3
|
refs/heads/master
| 2021-05-14T22:48:12.671638
| 2017-10-04T23:35:59
| 2017-10-04T23:35:59
| 105,830,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-12 21:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fit', '0006_auto_20170112_2059'),
]
operations = [
migrations.AlterField(
model_name='resource',
name='thumbnail',
field=models.FileField(blank=True, default='', upload_to='lib/%Y/%m/%d/thumbnail'),
preserve_default=False,
),
migrations.AlterField(
model_name='resource',
name='upload',
field=models.FileField(blank=True, default='', upload_to='lib/%Y/%m/%d/'),
preserve_default=False,
),
]
|
[
"nolanrbrady@gmail.com"
] |
nolanrbrady@gmail.com
|
f3bca0873dd5c520792e27a0a366b79e878b917c
|
5ab2b2a6c5e8420fa35dab332c9b9c3453d5641a
|
/viz/sgd_ez.py
|
9b16120fa2301e346f0adb7d9fc90b7204775ded
|
[] |
no_license
|
Drummersbrother/rays-2019-teamwork
|
d80705001722165d33c6de523e0eca372499a6e0
|
ae49ff5a297c9f96f1049225af87d34ad93af1eb
|
refs/heads/master
| 2022-04-10T19:11:53.525916
| 2020-03-05T12:28:03
| 2020-03-05T12:28:03
| 193,438,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import mpl_toolkits.mplot3d.axes3d as axes3d
np.random.seed(1)
fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))
N = 15
step = 0.1
X, Y = np.meshgrid(np.arange(-N/2, N/2, step), np.arange(-N/2, N/2, step))
heights = (2+np.sin(X)) * (0.2 * (Y**2)) + X**2
ax.set_zlim3d(0, 100)
ax.plot_surface(X, Y, heights, cmap=plt.get_cmap('jet'), alpha=0.8)
for a in ax.w_xaxis.get_ticklines()+ax.w_xaxis.get_ticklabels():
a.set_visible(False)
for a in ax.w_yaxis.get_ticklines()+ax.w_yaxis.get_ticklabels():
a.set_visible(False)
for a in ax.w_zaxis.get_ticklines()+ax.w_zaxis.get_ticklabels():
a.set_visible(False)
min_pos = [x*step for x in np.unravel_index(heights.argmin(), heights.shape)]
min_pos = [x - N/2 for x in min_pos]
print(min_pos)
ax.plot([min_pos[0]], [min_pos[1]], [heights.min()], c='r', marker='.', zorder=10)
plt.show()
|
[
"ilykxar@gmail.com"
] |
ilykxar@gmail.com
|
737c4b442cc3c836116fe4b83ad0f17e908ad474
|
662207b37b6d77b43c164b9feacdbf928790bf17
|
/day3/dictionary2.py
|
54ecdf16fead40bde31b032ba2d41d65ebb9c461
|
[] |
no_license
|
shamanthaka/mypython_work
|
fc26983488059eb59462f8ab966aaad2ab93532a
|
410f655939387b709cefa4d452aa323aede1c4d7
|
refs/heads/master
| 2020-07-10T03:12:59.246227
| 2019-08-24T12:34:34
| 2019-08-24T12:34:34
| 204,152,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
monthConversions = {
"Jan": "January",
"Feb": "February",
"Mar": "March",
"Apr": "April",
"May": "May",
"Jun": "June",
"Jul": "July",
"Aug": "August",
"Sep": "September",
"Oct": "October",
"Nov": "November",
"Dec": "December"
}
print(monthConversions["Nov"])
print(monthConversions.get("Dec"))
|
[
"shamanthaka.veerareddy@gmail.com"
] |
shamanthaka.veerareddy@gmail.com
|
62bf2861525dfc6f66b4f89ffe33502afbec465f
|
f4bf81d4e80468331a09401dbaeef12465aca853
|
/lib/python/helpers/coveragepy/coverage/parser.py
|
a1d48185a2cf936ffd2056132698e92ff57f008a
|
[] |
no_license
|
nottyo/intellibot
|
45c41d673608a0a1291c6387f9d33ef449f18837
|
0547d987deaad90260abe33db5284eae9704eb9b
|
refs/heads/master
| 2020-12-30T23:59:29.795725
| 2017-04-10T07:53:59
| 2017-04-10T07:53:59
| 86,574,980
| 1
| 0
| null | 2017-03-29T11:37:54
| 2017-03-29T11:37:53
| null |
UTF-8
|
Python
| false
| false
| 40,569
|
py
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Code parsing for coverage.py."""
import ast
import collections
import os
import re
import token
import tokenize
from coverage import env
from coverage.backward import range # pylint: disable=redefined-builtin
from coverage.backward import bytes_to_ints, string_class
from coverage.bytecode import CodeObjects
from coverage.debug import short_stack
from coverage.misc import contract, new_contract, nice_pair, join_regex
from coverage.misc import CoverageException, NoSource, NotPython
from coverage.phystokens import compile_unicode, generate_tokens, neuter_encoding_declaration
class PythonParser(object):
"""Parse code to find executable lines, excluded lines, etc.
This information is all based on static analysis: no code execution is
involved.
"""
@contract(text='unicode|None')
def __init__(self, text=None, filename=None, exclude=None):
"""
Source can be provided as `text`, the text itself, or `filename`, from
which the text will be read. Excluded lines are those that match
`exclude`, a regex.
"""
assert text or filename, "PythonParser needs either text or filename"
self.filename = filename or "<code>"
self.text = text
if not self.text:
from coverage.python import get_python_source
try:
self.text = get_python_source(self.filename)
except IOError as err:
raise NoSource(
"No source for code: '%s': %s" % (self.filename, err)
)
self.exclude = exclude
# The text lines of the parsed code.
self.lines = self.text.split('\n')
# The normalized line numbers of the statements in the code. Exclusions
# are taken into account, and statements are adjusted to their first
# lines.
self.statements = set()
# The normalized line numbers of the excluded lines in the code,
# adjusted to their first lines.
self.excluded = set()
# The raw_* attributes are only used in this class, and in
# lab/parser.py to show how this class is working.
# The line numbers that start statements, as reported by the line
# number table in the bytecode.
self.raw_statements = set()
# The raw line numbers of excluded lines of code, as marked by pragmas.
self.raw_excluded = set()
# The line numbers of class and function definitions.
self.raw_classdefs = set()
# The line numbers of docstring lines.
self.raw_docstrings = set()
# Internal detail, used by lab/parser.py.
self.show_tokens = False
# A dict mapping line numbers to lexical statement starts for
# multi-line statements.
self._multiline = {}
# Lazily-created ByteParser, arc data, and missing arc descriptions.
self._byte_parser = None
self._all_arcs = None
self._missing_arc_fragments = None
@property
def byte_parser(self):
"""Create a ByteParser on demand."""
if not self._byte_parser:
self._byte_parser = ByteParser(self.text, filename=self.filename)
return self._byte_parser
def lines_matching(self, *regexes):
"""Find the lines matching one of a list of regexes.
Returns a set of line numbers, the lines that contain a match for one
of the regexes in `regexes`. The entire line needn't match, just a
part of it.
"""
combined = join_regex(regexes)
if env.PY2:
# pylint: disable=redefined-variable-type
combined = combined.decode("utf8")
regex_c = re.compile(combined)
matches = set()
for i, ltext in enumerate(self.lines, start=1):
if regex_c.search(ltext):
matches.add(i)
return matches
def _raw_parse(self):
"""Parse the source to find the interesting facts about its lines.
A handful of attributes are updated.
"""
# Find lines which match an exclusion pattern.
if self.exclude:
self.raw_excluded = self.lines_matching(self.exclude)
# Tokenize, to find excluded suites, to find docstrings, and to find
# multi-line statements.
indent = 0
exclude_indent = 0
excluding = False
excluding_decorators = False
prev_toktype = token.INDENT
first_line = None
empty = True
first_on_line = True
tokgen = generate_tokens(self.text)
for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
if self.show_tokens: # pragma: not covered
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
nice_pair((slineno, elineno)), ttext, ltext
))
if toktype == token.INDENT:
indent += 1
elif toktype == token.DEDENT:
indent -= 1
elif toktype == token.NAME:
if ttext == 'class':
# Class definitions look like branches in the bytecode, so
# we need to exclude them. The simplest way is to note the
# lines with the 'class' keyword.
self.raw_classdefs.add(slineno)
elif toktype == token.OP:
if ttext == ':':
should_exclude = (elineno in self.raw_excluded) or excluding_decorators
if not excluding and should_exclude:
# Start excluding a suite. We trigger off of the colon
# token so that the #pragma comment will be recognized on
# the same line as the colon.
self.raw_excluded.add(elineno)
exclude_indent = indent
excluding = True
excluding_decorators = False
elif ttext == '@' and first_on_line:
# A decorator.
if elineno in self.raw_excluded:
excluding_decorators = True
if excluding_decorators:
self.raw_excluded.add(elineno)
elif toktype == token.STRING and prev_toktype == token.INDENT:
# Strings that are first on an indented line are docstrings.
# (a trick from trace.py in the stdlib.) This works for
# 99.9999% of cases. For the rest (!) see:
# http://stackoverflow.com/questions/1769332/x/1769794#1769794
self.raw_docstrings.update(range(slineno, elineno+1))
elif toktype == token.NEWLINE:
if first_line is not None and elineno != first_line:
# We're at the end of a line, and we've ended on a
# different line than the first line of the statement,
# so record a multi-line range.
for l in range(first_line, elineno+1):
self._multiline[l] = first_line
first_line = None
first_on_line = True
if ttext.strip() and toktype != tokenize.COMMENT:
# A non-whitespace token.
empty = False
if first_line is None:
# The token is not whitespace, and is the first in a
# statement.
first_line = slineno
# Check whether to end an excluded suite.
if excluding and indent <= exclude_indent:
excluding = False
if excluding:
self.raw_excluded.add(elineno)
first_on_line = False
prev_toktype = toktype
# Find the starts of the executable statements.
if not empty:
self.raw_statements.update(self.byte_parser._find_statements())
def first_line(self, line):
"""Return the first line number of the statement including `line`."""
return self._multiline.get(line, line)
def first_lines(self, lines):
"""Map the line numbers in `lines` to the correct first line of the
statement.
Returns a set of the first lines.
"""
return set(self.first_line(l) for l in lines)
def translate_lines(self, lines):
"""Implement `FileReporter.translate_lines`."""
return self.first_lines(lines)
def translate_arcs(self, arcs):
"""Implement `FileReporter.translate_arcs`."""
return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs]
def parse_source(self):
"""Parse source text to find executable lines, excluded lines, etc.
Sets the .excluded and .statements attributes, normalized to the first
line of multi-line statements.
"""
try:
self._raw_parse()
except (tokenize.TokenError, IndentationError) as err:
if hasattr(err, "lineno"):
lineno = err.lineno # IndentationError
else:
lineno = err.args[1][0] # TokenError
raise NotPython(
u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
self.filename, err.args[0], lineno
)
)
self.excluded = self.first_lines(self.raw_excluded)
ignore = self.excluded | self.raw_docstrings
starts = self.raw_statements - ignore
self.statements = self.first_lines(starts) - ignore
def arcs(self):
"""Get information about the arcs available in the code.
Returns a set of line number pairs. Line numbers have been normalized
to the first line of multi-line statements.
"""
if self._all_arcs is None:
self._analyze_ast()
return self._all_arcs
def _analyze_ast(self):
"""Run the AstArcAnalyzer and save its results.
`_all_arcs` is the set of arcs in the code.
"""
aaa = AstArcAnalyzer(self.text, self.raw_statements, self._multiline)
aaa.analyze()
self._all_arcs = set()
for l1, l2 in aaa.arcs:
fl1 = self.first_line(l1)
fl2 = self.first_line(l2)
if fl1 != fl2:
self._all_arcs.add((fl1, fl2))
self._missing_arc_fragments = aaa.missing_arc_fragments
def exit_counts(self):
"""Get a count of exits from that each line.
Excluded lines are excluded.
"""
exit_counts = collections.defaultdict(int)
for l1, l2 in self.arcs():
if l1 < 0:
# Don't ever report -1 as a line number
continue
if l1 in self.excluded:
# Don't report excluded lines as line numbers.
continue
if l2 in self.excluded:
# Arcs to excluded lines shouldn't count.
continue
exit_counts[l1] += 1
# Class definitions have one extra exit, so remove one for each:
for l in self.raw_classdefs:
# Ensure key is there: class definitions can include excluded lines.
if l in exit_counts:
exit_counts[l] -= 1
return exit_counts
def missing_arc_description(self, start, end, executed_arcs=None):
"""Provide an English sentence describing a missing arc."""
if self._missing_arc_fragments is None:
self._analyze_ast()
actual_start = start
if (
executed_arcs and
end < 0 and end == -start and
(end, start) not in executed_arcs and
(end, start) in self._missing_arc_fragments
):
# It's a one-line callable, and we never even started it,
# and we have a message about not starting it.
start, end = end, start
fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)])
msgs = []
for fragment_pair in fragment_pairs:
smsg, emsg = fragment_pair
if emsg is None:
if end < 0:
# Hmm, maybe we have a one-line callable, let's check.
if (-end, end) in self._missing_arc_fragments:
return self.missing_arc_description(-end, end)
emsg = "didn't jump to the function exit"
else:
emsg = "didn't jump to line {lineno}"
emsg = emsg.format(lineno=end)
msg = "line {start} {emsg}".format(start=actual_start, emsg=emsg)
if smsg is not None:
msg += ", because {smsg}".format(smsg=smsg.format(lineno=actual_start))
msgs.append(msg)
return " or ".join(msgs)
class ByteParser(object):
"""Parse bytecode to understand the structure of code."""
@contract(text='unicode')
def __init__(self, text, code=None, filename=None):
self.text = text
if code:
self.code = code
else:
try:
self.code = compile_unicode(text, filename, "exec")
except SyntaxError as synerr:
raise NotPython(
u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
filename, synerr.msg, synerr.lineno
)
)
# Alternative Python implementations don't always provide all the
# attributes on code objects that we need to do the analysis.
for attr in ['co_lnotab', 'co_firstlineno', 'co_consts']:
if not hasattr(self.code, attr):
raise CoverageException(
"This implementation of Python doesn't support code analysis.\n"
"Run coverage.py under CPython for this command."
)
def child_parsers(self):
"""Iterate over all the code objects nested within this one.
The iteration includes `self` as its first value.
"""
children = CodeObjects(self.code)
return (ByteParser(self.text, code=c) for c in children)
def _bytes_lines(self):
"""Map byte offsets to line numbers in `code`.
Uses co_lnotab described in Python/compile.c to map byte offsets to
line numbers. Produces a sequence: (b0, l0), (b1, l1), ...
Only byte offsets that correspond to line numbers are included in the
results.
"""
# Adapted from dis.py in the standard library.
byte_increments = bytes_to_ints(self.code.co_lnotab[0::2])
line_increments = bytes_to_ints(self.code.co_lnotab[1::2])
last_line_num = None
line_num = self.code.co_firstlineno
byte_num = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if line_num != last_line_num:
yield (byte_num, line_num)
last_line_num = line_num
byte_num += byte_incr
line_num += line_incr
if line_num != last_line_num:
yield (byte_num, line_num)
def _find_statements(self):
"""Find the statements in `self.code`.
Produce a sequence of line numbers that start statements. Recurses
into all code objects reachable from `self.code`.
"""
for bp in self.child_parsers():
# Get all of the lineno information from this code.
for _, l in bp._bytes_lines():
yield l
#
# AST analysis
#
class LoopBlock(object):
"""A block on the block stack representing a `for` or `while` loop."""
def __init__(self, start):
self.start = start
self.break_exits = set()
class FunctionBlock(object):
"""A block on the block stack representing a function definition."""
def __init__(self, start, name):
self.start = start
self.name = name
class TryBlock(object):
"""A block on the block stack representing a `try` block."""
def __init__(self, handler_start=None, final_start=None):
self.handler_start = handler_start
self.final_start = final_start
self.break_from = set()
self.continue_from = set()
self.return_from = set()
self.raise_from = set()
class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
"""The information needed to start an arc.
`lineno` is the line number the arc starts from. `cause` is a fragment
used as the startmsg for AstArcAnalyzer.missing_arc_fragments.
"""
def __new__(cls, lineno, cause=None):
return super(ArcStart, cls).__new__(cls, lineno, cause)
# Define contract words that PyContract doesn't have.
# ArcStarts is for a list or set of ArcStart's.
new_contract('ArcStarts', lambda seq: all(isinstance(x, ArcStart) for x in seq))
class AstArcAnalyzer(object):
"""Analyze source text with an AST to find executable code paths."""
@contract(text='unicode', statements=set)
def __init__(self, text, statements, multiline):
self.root_node = ast.parse(neuter_encoding_declaration(text))
# TODO: I think this is happening in too many places.
self.statements = set(multiline.get(l, l) for l in statements)
self.multiline = multiline
if int(os.environ.get("COVERAGE_ASTDUMP", 0)): # pragma: debugging
# Dump the AST so that failing tests have helpful output.
print("Statements: {}".format(self.statements))
print("Multiline map: {}".format(self.multiline))
ast_dump(self.root_node)
self.arcs = set()
# A map from arc pairs to a pair of sentence fragments: (startmsg, endmsg).
# For an arc from line 17, they should be usable like:
# "Line 17 {endmsg}, because {startmsg}"
self.missing_arc_fragments = collections.defaultdict(list)
self.block_stack = []
self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
def analyze(self):
"""Examine the AST tree from `root_node` to determine possible arcs.
This sets the `arcs` attribute to be a set of (from, to) line number
pairs.
"""
for node in ast.walk(self.root_node):
node_name = node.__class__.__name__
code_object_handler = getattr(self, "_code_object__" + node_name, None)
if code_object_handler is not None:
code_object_handler(node)
def add_arc(self, start, end, smsg=None, emsg=None):
"""Add an arc, including message fragments to use if it is missing."""
if self.debug:
print("\nAdding arc: ({}, {}): {!r}, {!r}".format(start, end, smsg, emsg))
print(short_stack(limit=6))
self.arcs.add((start, end))
if smsg is not None or emsg is not None:
self.missing_arc_fragments[(start, end)].append((smsg, emsg))
def nearest_blocks(self):
"""Yield the blocks in nearest-to-farthest order."""
return reversed(self.block_stack)
@contract(returns=int)
def line_for_node(self, node):
"""What is the right line number to use for this node?
This dispatches to _line__Node functions where needed.
"""
node_name = node.__class__.__name__
handler = getattr(self, "_line__" + node_name, None)
if handler is not None:
return handler(node)
else:
return node.lineno
def _line__Assign(self, node):
return self.line_for_node(node.value)
def _line__Dict(self, node):
# Python 3.5 changed how dict literals are made.
if env.PYVERSION >= (3, 5) and node.keys:
if node.keys[0] is not None:
return node.keys[0].lineno
else:
# Unpacked dict literals `{**{'a':1}}` have None as the key,
# use the value in that case.
return node.values[0].lineno
else:
return node.lineno
def _line__List(self, node):
if node.elts:
return self.line_for_node(node.elts[0])
else:
return node.lineno
def _line__Module(self, node):
if node.body:
return self.line_for_node(node.body[0])
else:
# Modules have no line number, they always start at 1.
return 1
OK_TO_DEFAULT = set([
"Assign", "Assert", "AugAssign", "Delete", "Exec", "Expr", "Global",
"Import", "ImportFrom", "Nonlocal", "Pass", "Print",
])
@contract(returns='ArcStarts')
def add_arcs(self, node):
"""Add the arcs for `node`.
Return a set of ArcStarts, exits from this node to the next.
"""
node_name = node.__class__.__name__
handler = getattr(self, "_handle__" + node_name, None)
if handler is not None:
return handler(node)
if 0:
node_name = node.__class__.__name__
if node_name not in self.OK_TO_DEFAULT:
print("*** Unhandled: {0}".format(node))
return set([ArcStart(self.line_for_node(node), cause=None)])
@contract(returns='ArcStarts')
def add_body_arcs(self, body, from_start=None, prev_starts=None):
"""Add arcs for the body of a compound statement.
`body` is the body node. `from_start` is a single `ArcStart` that can
be the previous line in flow before this body. `prev_starts` is a set
of ArcStarts that can be the previous line. Only one of them should be
given.
Returns a set of ArcStarts, the exits from this body.
"""
if prev_starts is None:
prev_starts = set([from_start])
for body_node in body:
lineno = self.line_for_node(body_node)
first_line = self.multiline.get(lineno, lineno)
if first_line not in self.statements:
continue
for prev_start in prev_starts:
self.add_arc(prev_start.lineno, lineno, prev_start.cause)
prev_starts = self.add_arcs(body_node)
return prev_starts
def is_constant_expr(self, node):
"""Is this a compile-time constant?"""
node_name = node.__class__.__name__
if node_name in ["NameConstant", "Num"]:
return True
elif node_name == "Name":
if env.PY3 and node.id in ["True", "False", "None"]:
return True
return False
# tests to write:
# TODO: while EXPR:
# TODO: while False:
# TODO: listcomps hidden deep in other expressions
# TODO: listcomps hidden in lists: x = [[i for i in range(10)]]
# TODO: nested function definitions
@contract(exits='ArcStarts')
def process_break_exits(self, exits):
"""Add arcs due to jumps from `exits` being breaks."""
for block in self.nearest_blocks():
if isinstance(block, LoopBlock):
block.break_exits.update(exits)
break
elif isinstance(block, TryBlock) and block.final_start is not None:
block.break_from.update(exits)
break
@contract(exits='ArcStarts')
def process_continue_exits(self, exits):
"""Add arcs due to jumps from `exits` being continues."""
for block in self.nearest_blocks():
if isinstance(block, LoopBlock):
for xit in exits:
self.add_arc(xit.lineno, block.start, xit.cause)
break
elif isinstance(block, TryBlock) and block.final_start is not None:
block.continue_from.update(exits)
break
@contract(exits='ArcStarts')
def process_raise_exits(self, exits):
"""Add arcs due to jumps from `exits` being raises."""
for block in self.nearest_blocks():
if isinstance(block, TryBlock):
if block.handler_start is not None:
for xit in exits:
self.add_arc(xit.lineno, block.handler_start, xit.cause)
break
elif block.final_start is not None:
block.raise_from.update(exits)
break
elif isinstance(block, FunctionBlock):
for xit in exits:
self.add_arc(
xit.lineno, -block.start, xit.cause,
"didn't except from function '{0}'".format(block.name),
)
break
@contract(exits='ArcStarts')
def process_return_exits(self, exits):
"""Add arcs due to jumps from `exits` being returns."""
for block in self.nearest_blocks():
if isinstance(block, TryBlock) and block.final_start is not None:
block.return_from.update(exits)
break
elif isinstance(block, FunctionBlock):
for xit in exits:
self.add_arc(
xit.lineno, -block.start, xit.cause,
"didn't return from function '{0}'".format(block.name),
)
break
## Handlers
@contract(returns='ArcStarts')
def _handle__Break(self, node):
here = self.line_for_node(node)
break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed")
self.process_break_exits([break_start])
return set()
@contract(returns='ArcStarts')
def _handle_decorated(self, node):
"""Add arcs for things that can be decorated (classes and functions)."""
last = self.line_for_node(node)
if node.decorator_list:
for dec_node in node.decorator_list:
dec_start = self.line_for_node(dec_node)
if dec_start != last:
self.add_arc(last, dec_start)
last = dec_start
# The definition line may have been missed, but we should have it
# in `self.statements`. For some constructs, `line_for_node` is
# not what we'd think of as the first line in the statement, so map
# it to the first one.
body_start = self.line_for_node(node.body[0])
body_start = self.multiline.get(body_start, body_start)
for lineno in range(last+1, body_start):
if lineno in self.statements:
self.add_arc(last, lineno)
last = lineno
# The body is handled in collect_arcs.
return set([ArcStart(last, cause=None)])
_handle__ClassDef = _handle_decorated
@contract(returns='ArcStarts')
def _handle__Continue(self, node):
here = self.line_for_node(node)
continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed")
self.process_continue_exits([continue_start])
return set()
@contract(returns='ArcStarts')
def _handle__For(self, node):
start = self.line_for_node(node.iter)
self.block_stack.append(LoopBlock(start=start))
from_start = ArcStart(start, cause="the loop on line {lineno} never started")
exits = self.add_body_arcs(node.body, from_start=from_start)
# Any exit from the body will go back to the top of the loop.
for xit in exits:
self.add_arc(xit.lineno, start, xit.cause)
my_block = self.block_stack.pop()
exits = my_block.break_exits
from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete")
if node.orelse:
else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
exits |= else_exits
else:
# no else clause: exit from the for line.
exits.add(from_start)
return exits
_handle__AsyncFor = _handle__For
_handle__FunctionDef = _handle_decorated
_handle__AsyncFunctionDef = _handle_decorated
@contract(returns='ArcStarts')
def _handle__If(self, node):
start = self.line_for_node(node.test)
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.add_body_arcs(node.body, from_start=from_start)
from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
exits |= self.add_body_arcs(node.orelse, from_start=from_start)
return exits
@contract(returns='ArcStarts')
def _handle__Raise(self, node):
here = self.line_for_node(node)
raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed")
self.process_raise_exits([raise_start])
# `raise` statement jumps away, no exits from here.
return set()
@contract(returns='ArcStarts')
def _handle__Return(self, node):
here = self.line_for_node(node)
return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed")
self.process_return_exits([return_start])
# `return` statement jumps away, no exits from here.
return set()
@contract(returns='ArcStarts')
def _handle__Try(self, node):
if node.handlers:
handler_start = self.line_for_node(node.handlers[0])
else:
handler_start = None
if node.finalbody:
final_start = self.line_for_node(node.finalbody[0])
else:
final_start = None
try_block = TryBlock(handler_start=handler_start, final_start=final_start)
self.block_stack.append(try_block)
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start, cause=None))
# We're done with the `try` body, so this block no longer handles
# exceptions. We keep the block so the `finally` clause can pick up
# flows from the handlers and `else` clause.
if node.finalbody:
try_block.handler_start = None
if node.handlers:
# If there are `except` clauses, then raises in the try body
# will already jump to them. Start this set over for raises in
# `except` and `else`.
try_block.raise_from = set([])
else:
self.block_stack.pop()
handler_exits = set()
if node.handlers:
last_handler_start = None
for handler_node in node.handlers:
handler_start = self.line_for_node(handler_node)
if last_handler_start is not None:
self.add_arc(last_handler_start, handler_start)
last_handler_start = handler_start
from_cause = "the exception caught by line {lineno} didn't happen"
from_start = ArcStart(handler_start, cause=from_cause)
handler_exits |= self.add_body_arcs(handler_node.body, from_start=from_start)
if node.orelse:
exits = self.add_body_arcs(node.orelse, prev_starts=exits)
exits |= handler_exits
if node.finalbody:
self.block_stack.pop()
final_from = ( # You can get to the `finally` clause from:
exits | # the exits of the body or `else` clause,
try_block.break_from | # or a `break`,
try_block.continue_from | # or a `continue`,
try_block.raise_from | # or a `raise`,
try_block.return_from # or a `return`.
)
exits = self.add_body_arcs(node.finalbody, prev_starts=final_from)
if try_block.break_from:
break_exits = self._combine_finally_starts(try_block.break_from, exits)
self.process_break_exits(break_exits)
if try_block.continue_from:
continue_exits = self._combine_finally_starts(try_block.continue_from, exits)
self.process_continue_exits(continue_exits)
if try_block.raise_from:
raise_exits = self._combine_finally_starts(try_block.raise_from, exits)
self.process_raise_exits(raise_exits)
if try_block.return_from:
return_exits = self._combine_finally_starts(try_block.return_from, exits)
self.process_return_exits(return_exits)
return exits
def _combine_finally_starts(self, starts, exits):
"""Helper for building the cause of `finally` branches."""
causes = []
for lineno, cause in sorted(starts):
if cause is not None:
causes.append(cause.format(lineno=lineno))
cause = " or ".join(causes)
exits = set(ArcStart(ex.lineno, cause) for ex in exits)
return exits
@contract(returns='ArcStarts')
def _handle__TryExcept(self, node):
# Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
# TryExcept, it means there was no finally, so fake it, and treat as
# a general Try node.
node.finalbody = []
return self._handle__Try(node)
@contract(returns='ArcStarts')
def _handle__TryFinally(self, node):
# Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
# TryFinally, see if there's a TryExcept nested inside. If so, merge
# them. Otherwise, fake fields to complete a Try node.
node.handlers = []
node.orelse = []
first = node.body[0]
if first.__class__.__name__ == "TryExcept" and node.lineno == first.lineno:
assert len(node.body) == 1
node.body = first.body
node.handlers = first.handlers
node.orelse = first.orelse
return self._handle__Try(node)
@contract(returns='ArcStarts')
def _handle__While(self, node):
constant_test = self.is_constant_expr(node.test)
start = to_top = self.line_for_node(node.test)
if constant_test:
to_top = self.line_for_node(node.body[0])
self.block_stack.append(LoopBlock(start=start))
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
exits = self.add_body_arcs(node.body, from_start=from_start)
for xit in exits:
self.add_arc(xit.lineno, to_top, xit.cause)
exits = set()
my_block = self.block_stack.pop()
exits.update(my_block.break_exits)
from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
if node.orelse:
else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
exits |= else_exits
else:
# No `else` clause: you can exit from the start.
if not constant_test:
exits.add(from_start)
return exits
@contract(returns='ArcStarts')
def _handle__With(self, node):
start = self.line_for_node(node)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
return exits
_handle__AsyncWith = _handle__With
def _code_object__Module(self, node):
start = self.line_for_node(node)
if node.body:
exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
for xit in exits:
self.add_arc(xit.lineno, -start, xit.cause, "didn't exit the module")
else:
# Empty module.
self.add_arc(-start, start)
self.add_arc(start, -start)
def _code_object__FunctionDef(self, node):
start = self.line_for_node(node)
self.block_stack.append(FunctionBlock(start=start, name=node.name))
exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
self.process_return_exits(exits)
self.block_stack.pop()
_code_object__AsyncFunctionDef = _code_object__FunctionDef
def _code_object__ClassDef(self, node):
start = self.line_for_node(node)
self.add_arc(-start, start)
exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
for xit in exits:
self.add_arc(
xit.lineno, -start, xit.cause,
"didn't exit the body of class '{0}'".format(node.name),
)
def _make_oneline_code_method(noun): # pylint: disable=no-self-argument
"""A function to make methods for online callable _code_object__ methods."""
def _code_object__oneline_callable(self, node):
start = self.line_for_node(node)
self.add_arc(-start, start, None, "didn't run the {0} on line {1}".format(noun, start))
self.add_arc(
start, -start, None,
"didn't finish the {0} on line {1}".format(noun, start),
)
return _code_object__oneline_callable
_code_object__Lambda = _make_oneline_code_method("lambda")
_code_object__GeneratorExp = _make_oneline_code_method("generator expression")
_code_object__DictComp = _make_oneline_code_method("dictionary comprehension")
_code_object__SetComp = _make_oneline_code_method("set comprehension")
if env.PY3:
_code_object__ListComp = _make_oneline_code_method("list comprehension")
SKIP_DUMP_FIELDS = ["ctx"]
def _is_simple_value(value):
"""Is `value` simple enough to be displayed on a single line?"""
return (
value in [None, [], (), {}, set()] or
isinstance(value, (string_class, int, float))
)
# TODO: a test of ast_dump?
def ast_dump(node, depth=0):
"""Dump the AST for `node`.
This recursively walks the AST, printing a readable version.
"""
indent = " " * depth
if not isinstance(node, ast.AST):
print("{0}<{1} {2!r}>".format(indent, node.__class__.__name__, node))
return
lineno = getattr(node, "lineno", None)
if lineno is not None:
linemark = " @ {0}".format(node.lineno)
else:
linemark = ""
head = "{0}<{1}{2}".format(indent, node.__class__.__name__, linemark)
named_fields = [
(name, value)
for name, value in ast.iter_fields(node)
if name not in SKIP_DUMP_FIELDS
]
if not named_fields:
print("{0}>".format(head))
elif len(named_fields) == 1 and _is_simple_value(named_fields[0][1]):
field_name, value = named_fields[0]
print("{0} {1}: {2!r}>".format(head, field_name, value))
else:
print(head)
if 0:
print("{0}# mro: {1}".format(
indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]),
))
next_indent = indent + " "
for field_name, value in named_fields:
prefix = "{0}{1}:".format(next_indent, field_name)
if _is_simple_value(value):
print("{0} {1!r}".format(prefix, value))
elif isinstance(value, list):
print("{0} [".format(prefix))
for n in value:
ast_dump(n, depth + 8)
print("{0}]".format(next_indent))
else:
print(prefix)
ast_dump(value, depth + 8)
print("{0}>".format(indent))
|
[
"traitanit.hua@ascendcorp.com"
] |
traitanit.hua@ascendcorp.com
|
5592fc6db35c8c402bec95f4a492be1c4bd8cc56
|
c0dfc5ccb760df1de0eec99cf9bfa4a982736096
|
/nebularpy/__init__.py
|
7f10e67d74d89431c6ac0dc5931d7896f074a358
|
[
"MIT"
] |
permissive
|
Alymantara/nebularpy
|
f5da127854491b1525c29a9bb1dd218d9d8ccdbf
|
28b8f226e1de12cfbc3703f1fe6d37732271b84c
|
refs/heads/master
| 2022-12-04T18:48:30.223716
| 2020-08-24T13:30:49
| 2020-08-24T13:30:49
| 289,933,912
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 82
|
py
|
__modules__ = ['nebularpy']
from .nebularpy import nebular
__version__ = "0.1.0"
|
[
"noreply@github.com"
] |
Alymantara.noreply@github.com
|
ae64e4a0629f0895ab9860abf49c0cb067d71b2d
|
19e236f55645852c89e4b938d76a7d50bb41e857
|
/ml/preprocessing/preprocessing.py
|
927192b4b34416f314932dfd445463cfaf19a467
|
[] |
no_license
|
rodrigo-veloso/hypothesis_testing
|
5f543dc83201ca356d26d4d15c1620394abab979
|
8aa7c96ea738bd8a297ae1b7b563aca40b5e7b5a
|
refs/heads/main
| 2023-06-20T10:35:20.386357
| 2021-07-19T14:46:21
| 2021-07-19T14:46:21
| 374,710,516
| 0
| 0
| null | 2021-07-19T14:40:10
| 2021-06-07T15:15:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,226
|
py
|
import pandas as pd
from ml.preprocessing.normalization import Normalizer
import logging
logging.getLogger().setLevel(logging.INFO)
class Preprocessing:
"""
Class to perform data preprocessing before training
"""
def __init__(self, normalizer_dic = None):
self.processes = []
if normalizer_dic == None:
self.normalizer = None
else:
self.normalizer = Normalizer(normalizer_dic)
def clean_data(self, df: pd.DataFrame, append = True, **kwargs):
"""
Perform data cleansing.
Parameters
----------
df : pd.Dataframe
Dataframe to be processed
append : boolean
if clean_data should be added to processes
Returns
-------
pd.Dataframe
Cleaned Data Frame
"""
logging.info("Cleaning data")
if append:
self.processes.append([self.clean_data, kwargs])
return df.dropna()
def categ_encoding(self, df: pd.DataFrame, append = True, **kwargs):
"""
Perform encoding of the categorical variables
Parameters
----------
df : pd.Dataframe
Dataframe to be processed
append : boolean
if categ_encoding should be added to processes
encoder:
encoding method, if None use
columns: list
list of columns to be encoded, if None all columns are encoded
Returns
-------
pd.Dataframe
Cleaned Data Frame
"""
logging.info("Category encoding")
encoder = kwargs.get('encoder')
columns = kwargs.get('columns')
if encoder:
encoder=encoder(cols=columns,verbose=False,)
if append:
self.processes.append([self.categ_encoding, kwargs])
return encoder.fit_transform(df)
else:
return pd.get_dummies(df)
def apply_all(self, df):
for process in self.processes:
df = process[0](df,False,**process[1])
if self.normalizer != None:
df = self.normalizer.transform(df)
return df
|
[
"rodrigoveloso1t@gmail.com"
] |
rodrigoveloso1t@gmail.com
|
979ff07cd6bd943528d457138fde2c379f63bf1f
|
8ac0c16341476758e2de8bfad724b27d661fc134
|
/appengine/db_objects.py
|
1613b618e5fa9f90e0556dd5eea3fa201421faee
|
[] |
no_license
|
mithro/chrome-buildbot-sprint
|
bc9e70d75e9f577fc309884cd023709b521bde84
|
e41171676c9b84441a3818d08380d0a7a11cd682
|
refs/heads/master
| 2020-06-04T03:01:34.831129
| 2015-02-03T12:54:22
| 2015-02-03T12:54:22
| 29,504,049
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
from google.appengine.ext import db
class TestResults(db.Model):
"""|key_name| is the test run id."""
xml_data = db.StringProperty()
timestamp = db.DateTimeProperty()
|
[
"jackhou@google.com"
] |
jackhou@google.com
|
d0b16a9682282c2fb53d031f24c8e3ae8eb6b10c
|
7546f5995ffab909ccd87da8576461bdc58951c1
|
/Scrapy/test1/test1/pipelines.py
|
9cd764550453fc875521558f33d58b5c3f953bbc
|
[] |
no_license
|
wskai1/Python_Study
|
fa6f818df3de31a37272dc013004cc982d61a6c1
|
72e668e94e3bc6b769dfc103ac62fa387d733306
|
refs/heads/master
| 2020-03-25T01:42:26.553100
| 2018-08-06T09:48:51
| 2018-08-06T09:48:51
| 143,252,364
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import json
import os
class Test1Pipeline(object):
def __init__(self):
self.file = codecs.open('items.json', 'w', encoding='utf-8')
self.file.write('[')
def process_item(self, item, spider):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line + ',')
return item
def close_spider(self, spider):
self.file.seek(-1, os.SEEK_END)
self.file.truncate();
self.file.write(']')
self.file.close()
|
[
"1729253813@qq.com"
] |
1729253813@qq.com
|
a0144fa673f04897fed9dcc53b67cbd388609a2f
|
4652cea6f1aa3f29e281616ed868a07fd1865ee8
|
/logics/check_pdf.py
|
2557279eb60edda7efc6b93a009db59e8e2f7bef
|
[] |
no_license
|
foggydae/PDFHighlight
|
448db8765ea6c649cdebf5c4d3cf196b92a390a8
|
c74061addbf0bf2f6472c8381dffcce2f6ea4020
|
refs/heads/master
| 2020-04-29T03:50:38.161352
| 2019-03-15T13:37:50
| 2019-03-15T13:37:50
| 175,825,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 955
|
py
|
# coding: utf-8
import re
import os
import glob
import json
import xlsxwriter
import pypandoc
import subprocess
from pprint import pprint
path = '../dataset/20170627-rerun/'
path_done = '../dataset/20170627/'
# tex_set = set([])
# pdf_set = set([])
# for fileName in os.listdir(path):
# if re.match(r'.*\.tex', fileName):
# tex_set.add(fileName)
# elif re.match(r'.*\.pdf', fileName):
# pdf_set.add(fileName)
# else:
# pass
# for fileName in tex_set:
# if not (fileName.split(".")[0] + ".pdf") in pdf_set:
# os.system("mv " + path + fileName + " " + path_rerun + fileName)
tex_set = set([])
txt_set = set([])
for fileName in os.listdir(path):
if re.match(r'.*\.tex', fileName):
tex_set.add(fileName)
elif re.match(r'.*\.txt', fileName):
txt_set.add(fileName)
else:
pass
for fileName in txt_set:
if not (fileName.split(".")[0] + "_processed.tex") in tex_set:
os.system("mv " + path + fileName + " " + path_done + fileName)
|
[
"foggydew3@gmail.com"
] |
foggydew3@gmail.com
|
c5df04801d2d1943ea345747e123517bdfb99504
|
fdd9e3131ead660db9485304438993a2a249fb1f
|
/tests/test_npc/test_settings/test_tags/test_tag_spec_class/test_tag_spec_repr.py
|
d5a124a1277353e8ab982a1d48b05b2b5dbc29ea
|
[
"MIT"
] |
permissive
|
aurule/npc
|
6807aa0723e765cb33fe5f5b49b0f579a6207153
|
2e1b2e92e2a4908d791846f184ee7e4de2f6682e
|
refs/heads/develop
| 2023-09-02T02:46:47.900892
| 2023-08-30T17:31:00
| 2023-08-30T17:31:00
| 47,045,977
| 14
| 2
|
MIT
| 2023-08-18T20:49:12
| 2015-11-29T01:40:18
|
Python
|
UTF-8
|
Python
| false
| false
| 371
|
py
|
import pytest
from npc.settings import TagSpec
def test_includes_tag_name():
tag_def = {"desc": "A testing tag"}
tag = TagSpec("test", tag_def)
assert "test" in repr(tag)
def test_includes_subtag_names():
tag_def = {"desc": "A testing tag", "subtags": {"with": {"desc": "A subtag"}}}
tag = TagSpec("test", tag_def)
assert "with" in repr(tag)
|
[
"pmandrews@gmail.com"
] |
pmandrews@gmail.com
|
181272a485fdc0a93b159303fa3e28cd2e1e5b66
|
d6c596d487221b1ba9e2543f11d240748dd64af5
|
/queryChinese/migrations/0002_auto_20150503_1925.py
|
b4fe6bd95fffa3677ea04776f4547f0e928d9976
|
[] |
no_license
|
noeleon930/taigiGameDB
|
7b8fdd86167ebad5ee53030532e94e11bf91156d
|
c665f4dc3371fae73167a850dc3b7fc89c6f7b9c
|
refs/heads/master
| 2021-01-19T06:47:51.760602
| 2015-05-04T14:30:40
| 2015-05-04T14:30:40
| 34,998,416
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('queryChinese', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='ChineseWords',
new_name='ChineseWord',
),
]
|
[
"noeleon930@gmail.com"
] |
noeleon930@gmail.com
|
cb01c5cae00b84dc908fc529a6cea38d36dec0b2
|
3043a21e89b67e7f3d1e420358ab6af41fc203b4
|
/0x09-Unittests_and_integration_tests/client.py
|
7c6dd375d31d82a006c4013e1b13dd5da6be56cc
|
[] |
no_license
|
zacwoll/holbertonschool-web_back_end
|
846ece845725d702d8a6ee0a1696e6d823362fd6
|
ece925eabc1d1e22055f1b4d3f052b571e1c4400
|
refs/heads/main
| 2023-05-20T12:54:07.215103
| 2021-06-03T21:21:53
| 2021-06-03T21:21:53
| 348,202,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,530
|
py
|
#!/usr/bin/env python3
"""A github org client
"""
from typing import (
List,
Dict,
)
from utils import (
get_json,
access_nested_map,
memoize,
)
class GithubOrgClient:
"""A Githib org client
"""
ORG_URL = "https://api.github.com/orgs/{org}"
def __init__(self, org_name: str) -> None:
"""Init method of GithubOrgClient"""
self._org_name = org_name
@memoize
def org(self) -> Dict:
"""Memoize org"""
return get_json(self.ORG_URL.format(org=self._org_name))
@property
def _public_repos_url(self) -> str:
"""Public repos URL"""
return self.org["repos_url"]
@memoize
def repos_payload(self) -> Dict:
"""Memoize repos payload"""
return get_json(self._public_repos_url)
def public_repos(self, license: str = None) -> List[str]:
"""Public repos"""
json_payload = self.repos_payload
public_repos = [
repo["name"] for repo in json_payload
if license is None or self.has_license(repo, license)
]
return public_repos
@staticmethod
def has_license(repo: Dict[str, Dict], license_key: str) -> bool:
"""Static: has_license"""
assert license_key is not None, "license_key cannot be None"
try:
has_license = access_nested_map(repo, ("license", "key")) \
== license_key
except KeyError:
return False
return has_license
|
[
"zacwoll@gmail.com"
] |
zacwoll@gmail.com
|
0ca98942dcdc12e213d88dec4d599ebcf14faab8
|
625940efcfe2fe4962bef1266fbe926598547bff
|
/zipcodes.py
|
0bcea81ee21f0812851fbed72744e14dcb449a76
|
[] |
no_license
|
pranavr/map-tasks
|
42bd5aa68dabce06caec36e8736f0e4d55633f3b
|
f770d1ee35c40460bafa9ec442273f6411dee6f1
|
refs/heads/master
| 2020-05-18T18:22:20.858880
| 2014-08-15T16:32:09
| 2014-08-15T16:32:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,364
|
py
|
import json
import geojson
def processNYregions():
basePath = '/Users/pranavramkrishnan/Desktop/Research/maps/static/data/zipcodes/'
filename = basePath + 'TEST_data.json'
jsonData = json.load(open(filename, 'r'))
zipcode_initial = {}
ny_geojson = geojson.load(open(basePath+'nyzipcodes.json','r'))
ny_regions = ny_geojson['features']
for zipcode in jsonData:
numCompanies = len(jsonData[zipcode])
#inefficient but works
for region in ny_regions:
if str(region['properties']['ZCTA']) == str(zipcode):
region['properties']['numCompanies'] = numCompanies
featureColleciton = geojson.FeatureCollection(ny_regions)
geojson_data = geojson.dumps(featureColleciton)
output_file = open(basePath + 'ny_zip_regions.geojson','w')
output_file.write(geojson_data)
def processCountries():
basePath = '/Users/pranavramkrishnan/Desktop/Research/maps/static/data/zipcodes/'
filename = basePath + 'manhattan_byzip.json'
jsonData = json.load(open(filename, 'r'))
# world_geojson = geojson.load(open(basePath+'world.geojson','r'))
# country_list =[]
# for country in world_geojson["features"]:
# name = country["properties"]["name"]
# country_list.append(name)
final_json = {}
for zipcode in jsonData:
jurisdiction_initial = {}
print zipcode
for data_pt in jsonData[zipcode]:
#jurisdiction = jsonData[zipcode]['jurisdiction']
for z in jsonData[zipcode]:
jurisdiction = str(z['jurisdiction'])
if jurisdiction in jurisdiction_initial.keys():
jurisdiction_initial[jurisdiction] +=1
else:
jurisdiction_initial[jurisdiction] = 1
final_json[str(zipcode)] = jurisdiction_initial
# featureColleciton = geojson.FeatureCollection(ny_regions)
# geojson_data = geojson.dumps(featureColleciton)
json_data = json.dumps(final_json)
output_file = open(basePath + 'zip_to_country.json','w')
output_file.write(json_data)
def combineGeoJSONS():
basePath = '/Users/pranavramkrishnan/Desktop/Research/maps/static/data/zipcodes/'
world_geojson = geojson.load(open(basePath+'world.geojson','r'))
countries = world_geojson["features"]
filtered= []
for country in countries:
if str(country["properties"]["name"]) != "United States":
filtered.append(country)
us_states_geojson = geojson.load(open(basePath+'us_states.geojson','r'))
states = us_states_geojson["features"]
for state in states:
state["properties"]["name"] = state['properties']['NAME']
filtered.append(state)
featureColleciton = geojson.FeatureCollection(filtered)
geojson_data = geojson.dumps(featureColleciton)
output_file = open(basePath + 'world_filtered.geojson','w')
output_file.write(geojson_data)
def zipCountryCounts():
basePath = '/Users/pranavramkrishnan/Desktop/Research/maps/static/data/zipcodes/'
data = open(basePath+'zip_jur_count.csv','r').readlines()
jsonValues = {}
maxValues = {}
for val in data:
val = val.split(',')
[zipcode, region, count] = [str(val[0]), str(val[1]), int(val[2])]
region = region.replace(' ', '_')
if zipcode in jsonValues.keys():
jsonValues[zipcode]['values'][region]=count
maxValues[zipcode].append(count)
jsonValues[zipcode]['regions'].append(region)
else:
jsonValues[zipcode] = {'values': {region:count}, 'regions':[region]}
maxValues[zipcode] = [count]
for zipcode in maxValues.keys():
jsonValues[zipcode]['max'] = max(maxValues[zipcode])
json_data = json.dumps(jsonValues)
output_file = open(basePath + 'zip_region_counts.json','w')
output_file.write(json_data)
def region_to_zip():
basePath = '/Users/pranavramkrishnan/Desktop/Research/maps/static/data/zipcodes/'
data = open(basePath+'zip_jur_count.csv','r').readlines()
jsonValues = {}
maxValues = {}
for val in data:
val = val.split(',')
[zipcode, region, count] = [str(val[0]), str(val[1]), int(val[2])]
region = region.replace(' ', '_')
if region in jsonValues.keys():
jsonValues[region]['values'][zipcode]=count
maxValues[region].append(count)
jsonValues[region]['zipcodes'].append(zipcode)
else:
jsonValues[region] = {'values': {zipcode:count}, 'zipcodes':[zipcode]}
maxValues[region] = [count]
for region in maxValues.keys():
jsonValues[region]['max'] = max(maxValues[region])
json_data = json.dumps(jsonValues)
output_file = open(basePath + 'region_zip_counts_updated.json','w')
output_file.write(json_data)
zipCountryCounts()
|
[
"pranavr@mit.edu"
] |
pranavr@mit.edu
|
f800559aa836755668ce25da6565da4cb733d81c
|
66d590651f7faf2f45f7268f24d9803831f2db4c
|
/sejavoluntario/apps/users/migrations/0009_auto__add_field_beneficiario_description.py
|
ea04f8161955aeecba5332027f7abfcb014b6a37
|
[] |
no_license
|
gustavofarias/sejavoluntario
|
0a7efe82d187ebd836db568c5983b14eb3536e0a
|
8002d1ce807782dcf9a8c07291d3ec46c531c8d3
|
refs/heads/master
| 2021-01-23T02:29:35.643764
| 2013-06-26T20:54:04
| 2013-06-26T20:54:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,382
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Beneficiario.description'
db.add_column('users_beneficiario', 'description',
self.gf('django.db.models.fields.CharField')(default='descri\xc3\xa7\xc3\xa3o teste', max_length=500),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Beneficiario.description'
db.delete_column('users_beneficiario', 'description')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'users.area': {
'Meta': {'object_name': 'Area'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'users.banco': {
'Meta': {'object_name': 'Banco'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'users.beneficiario': {
'Meta': {'object_name': 'Beneficiario', '_ormbases': ['users.UserProfile']},
'banco': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['users.DadosBancarios']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'site': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'userprofile_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['users.UserProfile']", 'unique': 'True', 'primary_key': 'True'})
},
'users.cidade': {
'Meta': {'object_name': 'Cidade'},
'estado': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Estado']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'users.dadosbancarios': {
'Meta': {'object_name': 'DadosBancarios'},
'agencia': ('django.db.models.fields.IntegerField', [], {}),
'banco': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Banco']"}),
'conta': ('django.db.models.fields.IntegerField', [], {}),
'favorecido': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'users.endereco': {
'Meta': {'object_name': 'Endereco'},
'cep': ('django.db.models.fields.IntegerField', [], {'max_length': '6'}),
'cidade': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['users.Cidade']", 'null': 'True', 'blank': 'True'}),
'complemento': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'estado': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['users.Estado']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logradouro': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
'numero': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['users.Pais']", 'null': 'True', 'blank': 'True'})
},
'users.estado': {
'Meta': {'object_name': 'Estado'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Pais']"}),
'sigla': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'users.pais': {
'Meta': {'object_name': 'Pais'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'users.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'areas': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.Area']", 'symmetrical': 'False'}),
'celphone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'document': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'endereco': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['users.Endereco']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo': ('django.db.models.fields.files.FileField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'users.voluntario': {
'Meta': {'object_name': 'Voluntario', '_ormbases': ['users.UserProfile']},
'is_volunteer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_volunteering_at': ('django.db.models.fields.related.ForeignKey', [], {'default': 'False', 'to': "orm['users.Beneficiario']", 'null': 'True', 'blank': 'True'}),
'nascimento': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sexo': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'userprofile_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['users.UserProfile']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['users']
|
[
"bug.ufrj@gmail.com"
] |
bug.ufrj@gmail.com
|
408729b8d20955feb61ea2cb9afeb17aa55d5d6b
|
487c8a48b3868b66c0ae7c714436855c87968eaa
|
/node_modules/dtrace-provider/build/config.gypi
|
4fd117e2ec28ac0050d8c77f9a8eeaef96bafb8d
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
nguyenndm1901/Spotify_app
|
f1cbc59b5a4de9b6a48fbae04bbd8d466b6a434c
|
27dc3e1aaa744750ee9fc8df587a2427249b48f4
|
refs/heads/main
| 2023-02-13T16:58:59.756562
| 2021-01-03T14:27:59
| 2021-01-03T14:27:59
| 307,390,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,765
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/nguyenducminhnguyen/Library/Caches/node-gyp/14.15.3",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/nguyenducminhnguyen/.npm-init.js",
"userconfig": "/Users/nguyenducminhnguyen/.npmrc",
"cidr": "",
"node_version": "14.15.3",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/nguyenducminhnguyen/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.9 node/v14.15.3 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/vn/7rrzx5d16pqf8nq1jxjlwnm00000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"43044839+nguyenndm1901@users.noreply.github.com"
] |
43044839+nguyenndm1901@users.noreply.github.com
|
f7a8a74c867b86bd82d1fe8a4bb8a8e6fbe22882
|
0bebe0a05537a060a7f107f21b5a2adc67475d04
|
/mac_virtual_camera/cyclegan.py
|
54499618397b32888e5b5470521e48b9011ccab5
|
[] |
no_license
|
christopher-hesse/mac-virtual-camera
|
c9b4aa41658f33686823c47e2ad480681a27f158
|
9762a260d45b80dedecd7e3839b7e228f8ef8a6f
|
refs/heads/master
| 2022-11-13T15:03:28.229079
| 2020-07-06T00:37:47
| 2020-07-06T00:38:03
| 277,175,745
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,800
|
py
|
# from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
import argparse
import functools
import cv2
import torch
import torch.nn as nn
IMAGE_SIZE = 256
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
return x + self.conv_block(x) # add skip connections
def __patch_instance_norm_state_dict(state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_checkpoint(checkpoint_path, device):
norm_layer = norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
net = ResnetGenerator(input_nc=3,output_nc=3, ngf=64, norm_layer=norm_layer, use_dropout=False, n_blocks=9)
net.to(device)
state_dict = torch.load(checkpoint_path, map_location=device)
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
return net
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input-path", required=True)
parser.add_argument("--checkpoint-path", required=True)
args = parser.parse_args()
img = cv2.imread(args.input_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_LINEAR)
input_hwc = torch.from_numpy(img).to(dtype=torch.float32) * 2 / 255 - 1
input_nhwc = input_hwc.unsqueeze(dim=0)
input_nchw = input_nhwc.permute(0, 3, 1, 2)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = load_checkpoint(args.checkpoint_path, device)
with torch.no_grad():
output_nhwc = net(input_nchw.to(device))
output_nhwc = ((output_nhwc + 1) / 2 * 255).to(dtype=torch.uint8).permute(0, 2, 3, 1)
output_hwc = output_nhwc[0]
output_img = cv2.cvtColor(output_hwc.cpu().numpy(), cv2.COLOR_BGR2RGB)
cv2.imwrite("out.png", output_img)
if __name__ == '__main__':
main()
|
[
"48501609+cshesse@users.noreply.github.com"
] |
48501609+cshesse@users.noreply.github.com
|
e1622255e97690979ddfaddd311178e9efd352fa
|
ff2d555425c8e32f15570d3b4b404a60f80c06d3
|
/scripts/manage_docker.py
|
d1354353bc5ce145d96dba30a834acc0e107e2cf
|
[] |
no_license
|
theydonthaveit/neo4j-admin-panel
|
61c34976a3d781af155f6fd11f184124bc2f8af7
|
637d6864a7413f126e97502f9d7036d476d16467
|
refs/heads/master
| 2020-03-21T07:26:19.478759
| 2018-06-28T15:18:02
| 2018-06-28T15:18:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
import docker
CLIENT = docker.from_env()
CLIENT.containers.run('neo4j:3.0',
command=['NEO4J_dbms_connector_bolt_listen__address=:7688',
'NEO4J_dbms_connector_bolt_advertised__address=:7688'],
ports={'7475': '7474', '7688': '7687'},
volumes={
'/home/neo4j/logs/template': {'bind': '/logs'},
'/home/neo4j/data/template': {'bind': '/data'}
})
containers = CLIENT.containers.list()
for container in containers:
container.attrs['NetworkSettings'].get('IPAddress')
|
[
"alan@trykiroku.com"
] |
alan@trykiroku.com
|
51d40a2f746f1ba2d1a272ca82f3ddfc7825e94d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02621/s208244624.py
|
9b225e7a79b85cb100f6ca7376b02b761256ec12
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37
|
py
|
a = int(input())
print(a*(1+a*(1+a)))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
82a3d385936b474181f00928cdd99b6cb6334b49
|
5649a06053ea9835e84e74805d8f6228399d88ec
|
/renren/pipelines.py
|
c99b965b153f7a7e032fa67141b23cee37cfe648
|
[
"MIT"
] |
permissive
|
tengbozhang/renren
|
cd5298aa6123241903d49d5b3c729d9f9edb900a
|
4218b9306f81144ca1f8dddb71eebef511a19be8
|
refs/heads/master
| 2020-04-05T06:42:54.699139
| 2018-11-21T02:01:31
| 2018-11-21T02:01:31
| 156,647,657
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class RenrenPipeline(object):
def __init__(self):
self.filet = open('movie.html', 'w',encoding="utf8")
def close_spider(self, spider):
self.filet.close()
def process_item(self, item, spider):
for i in range(len(item["movie_name"])):
self.filet.write(item["movie_name"][i]+"\n\t\n"\
+item["movie_link"][i]+"\n\n\t\n\n")
# self.filet.write(item["movie_name"][i]+"\n\t\n123\n\t\n123\n\t\n"\
# +item["movie_link"][i]+"\n\t\n"+"omjj"+"\n\t\n" + 'dzp'+"\n\n\t\n\n")
return item
|
[
"tengbozhang@gmail.com"
] |
tengbozhang@gmail.com
|
a1394f88c68305e26ce9afb48bbe90f6a4f2a0b9
|
025f92558f8bbe06cbeaeeebe28cec47a7c2621e
|
/high_performance_python_2e/figures/hll_single_reg.py
|
23820e05c633872221f917014c290f15bc11917d
|
[] |
no_license
|
cataluna84/hpp-book
|
1df8cf340398ed3976d9d28fd7ff8db839b080d4
|
2053520824896c6d79e0655f9c5940cbd1ec51f8
|
refs/heads/main
| 2023-03-15T00:51:45.245591
| 2021-03-30T07:04:43
| 2021-03-30T07:04:43
| 352,901,887
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
#!/usr/bin/env python
import countmemaybe
import numpy as np
import pylab as py
def leading_set_bit(number):
number_binary = bin(number)
return len(number_binary) - number_binary.rfind("1")
class HLL(object):
max_index = 0
def add(self, number):
index = leading_set_bit(number)
self.max_index = max(self.max_index, index)
def __len__(self):
return 2 ** self.max_index
if __name__ == "__main__":
data_list = []
h1 = HLL()
h = countmemaybe.HyperLogLog()
for i in range(100000):
item = "seee%seeeed234rsdaf" % i
x = h._hash(item)
h1.add(x)
h.add(x)
data_list.append((i + 1, len(h1), len(h)))
data_numpy = np.asarray(data_list)
py.plot(data_numpy[:, 0], data_numpy[:, 1], ":", label="Single HLL Register")
py.plot(data_numpy[:, 0], data_numpy[:, 2], "--", label="HLL with 16 registers")
py.plot(data_numpy[:, 0], data_numpy[:, 0], label="Actual Size")
py.legend(loc="upper left")
py.title("Performance of a single HLL Register")
py.xlabel("Size of the set")
py.ylabel("Predicted size of the set")
# py.show()
py.savefig("../hll_single_reg.png")
|
[
"mayankbhaskar007@gmail.com"
] |
mayankbhaskar007@gmail.com
|
8717b2e18dfd31bbca18c81aec109b4ae0ddfda3
|
fb9fefb573d51e7f263ffbd0db07155996264665
|
/9.py
|
cae41fda0a83b1c95320b146f111139aa4a4dfa8
|
[] |
no_license
|
alan7816/1206
|
7a890b957206a63d660f95130d3bc8e06751d7d4
|
3e5ef105a06b6b5f2dcebdea494a972a656c2a14
|
refs/heads/master
| 2020-04-09T21:26:16.875592
| 2018-12-19T09:15:18
| 2018-12-19T09:15:18
| 160,602,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
import os
#建立test資料夾
os.mkdir('test')
#進入test資料夾
os.chdir('test')
#建立一個qytang1文件
qytang1 = open('qytang1', 'w')
#qytang1文件寫入test.file
qytang1.write('test file\n')
#qytang1文件寫入this is tytang
qytang1.write('this is qytang\n')
#關閉qytang1文件
qytang1.close()
qytang2 = open('qytang2', 'w')
qytang2.write('test file\n')
qytang2.write('qytang python\n')
qytang2.close()
qytang3 = open('qytang3', 'w')
qytang3.write('test file\n')
qytang3.write('this is python\n')
qytang3.close()
#建立qytang4資料夾
os.mkdir('qytang4')
os.mkdir('qytang5')
qytang_file_list = []
#顯示所有檔案
file_list = os.listdir()
for file in file_list:
#檢查哪些檔案是文件
if os.path.isfile(file):
#讀取文件內容並一行一行顯示
for file_line in open(file):
if 'qytang' in file_line:
qytang_file_list.append(file)
print('文件中包含"qytang"关键字的文件为:')
for file in qytang_file_list:
print('\t', end='')
print(file)
print('\n')
|
[
"alan7816@gmail.com"
] |
alan7816@gmail.com
|
3343a8f4eb52d4e0bc68ead2311a3a6bb57112ff
|
3073d7f912b6bd8d28e56a57d3cb9d7d8f2020cf
|
/Leetcode--Python-master/Directory2/majorityElement.py
|
ae3dc21c09a988665e39010ddc34be35d6868b50
|
[] |
no_license
|
sanaydevi/leetCodeSolutions
|
0ffbbcc7bf0b6c1e6b0de94148a8851481d07963
|
c9c0d4dbeb583eaf8ec7899310bb4665ec5035d0
|
refs/heads/master
| 2020-04-29T00:58:09.596042
| 2019-04-19T02:18:42
| 2019-04-19T02:18:42
| 175,714,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
'''
Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 times. You may assume that the array is non-empty and the majority element always exist in the array.
Example 1:
Input: [3,2,3]
Output: 3
Example 2:
Input: [2,2,1,1,1,2,2]
Output: 2
'''
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
num_len = len(nums)
threshold = num_len // 2
# for very large inputs count only unique elements.
nums_unique = list(set(nums))
for i in nums_unique:
if nums.count(i) > threshold:
return i
if __name__ == "__main__":
obj = Solution()
nums = [3, 2, 3]
print(obj.majorityElement(nums))
'''
test case:
[2,2,1,1,1,2,2]
[3,2,3]
'''
|
[
"sanaydevi@gmail.com"
] |
sanaydevi@gmail.com
|
8df2d8c01df005e689e69136f8335f9ca2f9df6f
|
4a0c047f73458d089dc62bc2be7c3bd098a08ee2
|
/data_structor/time_series.py
|
906cb145585b282406f42320c92345442e3babef
|
[] |
no_license
|
sunghyungi/pandas_study
|
b53e53d88abe733b292c06e2658e2fa21428ffca
|
b861724995914a4a4644c8b08b3b38070d5abc51
|
refs/heads/master
| 2020-11-28T02:05:22.565760
| 2020-01-08T03:09:37
| 2020-01-08T03:09:37
| 229,675,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
import pandas as pd
df = pd.read_csv('stock-data.csv')
print(df, '\n')
print(df.info(), '\n')
print()
df['new_Date'] = pd.to_datetime(df['Date'])
print("# 문자열 데이터(시리즈 객체)를 판다스 Timestamp로 변환 및 데이터 내용 및 자료형 확인")
print(df, '\n', df.info(), '\n', type(df['new_Date'][0]), '\n')
print()
df = df.set_index('new_Date')
df = df.drop('Date', axis=1)
print("# 시계열 값으로 변환된 열을 새로운 행 인덱스로 지정, 기존날짜 열은 삭제, 데이터 내용 및 자료형 확인")
print(df, '\n', df.info())
|
[
"tjdgusrlek@gmail.com"
] |
tjdgusrlek@gmail.com
|
a5b1aeae9be59ff7de6ac8cd37fdce13f064e7a1
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Autodesk/Revit/DB/__init___parts/DataExchangeMessageSeverity.py
|
6de342709fc4f481fa4443648c2bb90bb6b9eecf
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,011
|
py
|
class DataExchangeMessageSeverity(Enum,IComparable,IFormattable,IConvertible):
"""
Error levels for DataExchangeLog
enum DataExchangeMessageSeverity,values: Error (2),FatalError (3),Info (0),Warning (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Error=None
FatalError=None
Info=None
value__=None
Warning=None
|
[
"gtalarico@gmail.com"
] |
gtalarico@gmail.com
|
d768a00891dd24f46827533205ca11971033d5b5
|
237cc38de0cf7a6e3661ed552ae771bd972d7438
|
/utils/test_trans.py
|
8f70192757c490fd0ed4f38758fe0316c82cf8b1
|
[] |
no_license
|
chydream/python
|
af5ad8a98c78de71e255f7b776f936c4b89c616e
|
e5bfef53a7770d4f323bd2877f93c8166c563695
|
refs/heads/master
| 2020-05-07T17:00:33.558178
| 2020-05-05T13:45:19
| 2020-05-05T13:45:19
| 180,708,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
from datetime import datetime
from utils.trans.tools import gen_trans_id
from utils.work.tools import get_file_type
def test_trans_tool():
id1 = gen_trans_id()
print(id1)
date = datetime(2015, 10, 2, 12, 30, 45)
id2 = gen_trans_id(date)
print(id2)
def test_work_tool():
rest = get_file_type('G:\\python\\python\\demo.py')
print(rest)
if __name__ == '__main__':
test_trans_tool()
|
[
"yong.chen@doone.com.cn"
] |
yong.chen@doone.com.cn
|
df894dc5d1405344d71709b8a594ce781f643515
|
12ce6095072be27e330a5e968db7b52c12f1a6a9
|
/Santhosh/SecondApp/views.py
|
d106aa00ccdb091d27ab6874518f5a7bac46f8c3
|
[] |
no_license
|
pythonlabexp/Django
|
79827bfb79c2b724c5a8869dde3de213d45eba43
|
6080db48f47ad0eea6a6b18fa9b982da2ed87248
|
refs/heads/master
| 2020-04-15T11:03:06.689973
| 2019-01-12T06:09:19
| 2019-01-12T06:09:19
| 164,611,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
from django.shortcuts import render
# Create your views here.
def home(request):
return render(request,'SecondApp/index.html')
|
[
"noreply@github.com"
] |
pythonlabexp.noreply@github.com
|
8247af0d1e8661c1113f5f7ae7a6d7be8953ddee
|
194003429076a85e3cdbf89ed3c28a538ead22e9
|
/csv_processor.py
|
8f7b87ee4f63059069e5bbda55358d25d6caded1
|
[] |
no_license
|
GustawOhler/FootballBetsPredictor
|
17a825ee5b07e127359ad5e34102ad7dce49f268
|
885649f7f644bb73d0f2010397685cd1dbb82788
|
refs/heads/master
| 2023-08-23T13:30:32.484975
| 2021-09-28T23:01:22
| 2021-09-28T23:01:22
| 296,716,475
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,189
|
py
|
from enum import Enum
from typing import List, Tuple
from collections import Counter
import numpy as np
import pandas
import pandas as pd
from datetime import datetime, timedelta
from peewee import fn
from models import Season, Team, Match, League, TeamSeason, Table, TableTeam, MatchResult
from database_helper import db
import traceback
class TieBreakerType(Enum):
ONLY_GOALS = 1
H2H_THEN_GOALS = 2
GOALS_THEN_H2H = 3
class H2HTableRecord:
def __init__(self, points, goal_diff):
self.points = points
self.goal_diff = goal_diff
def add(self, points, goal_diff):
self.points = self.points + points
self.goal_diff = self.goal_diff + goal_diff
tie_breaker_dict = {
'Premier League': TieBreakerType.ONLY_GOALS,
'Ligue 1': TieBreakerType.ONLY_GOALS,
'Ligue 2': TieBreakerType.ONLY_GOALS,
'La Liga': TieBreakerType.H2H_THEN_GOALS,
'Segunda Division': TieBreakerType.H2H_THEN_GOALS,
'Serie A': TieBreakerType.H2H_THEN_GOALS,
'Serie B': TieBreakerType.H2H_THEN_GOALS,
'Eredivisie': TieBreakerType.H2H_THEN_GOALS,
'Primeira Liga': TieBreakerType.H2H_THEN_GOALS,
'Bundesliga': TieBreakerType.GOALS_THEN_H2H,
'Championship': TieBreakerType.GOALS_THEN_H2H,
'2. Bundesliga': TieBreakerType.GOALS_THEN_H2H,
'Super Lig': TieBreakerType.H2H_THEN_GOALS,
'Superleague Ellada': TieBreakerType.H2H_THEN_GOALS,
'Jupiler League': TieBreakerType.H2H_THEN_GOALS
}
league_name_dict = {
"E0": lambda: League.get_or_create(league_name='Premier League', defaults={'country': 'EN', 'division': 1}),
"E1": lambda: League.get_or_create(league_name='Championship', defaults={'country': 'EN', 'division': 2}),
"D1": lambda: League.get_or_create(league_name='Bundesliga', defaults={'country': 'DE', 'division': 1}),
"D2": lambda: League.get_or_create(league_name='2. Bundesliga', defaults={'country': 'DE', 'division': 2}),
"F1": lambda: League.get_or_create(league_name='Ligue 1', defaults={'country': 'FR', 'division': 1}),
"F2": lambda: League.get_or_create(league_name='Ligue 2', defaults={'country': 'FR', 'division': 2}),
"SP1": lambda: League.get_or_create(league_name='La Liga', defaults={'country': 'ES', 'division': 1}),
"SP2": lambda: League.get_or_create(league_name='Segunda Division', defaults={'country': 'ES', 'division': 2}),
"I1": lambda: League.get_or_create(league_name='Serie A', defaults={'country': 'IT', 'division': 1}),
"I2": lambda: League.get_or_create(league_name='Serie B', defaults={'country': 'IT', 'division': 2}),
"N1": lambda: League.get_or_create(league_name='Eredivisie', defaults={'country': 'NL', 'division': 1}),
"B1": lambda: League.get_or_create(league_name='Jupiler League', defaults={'country': 'BE', 'division': 1}),
"P1": lambda: League.get_or_create(league_name='Primeira Liga', defaults={'country': 'PT', 'division': 1}),
"G1": lambda: League.get_or_create(league_name='Superleague Ellada', defaults={'country': 'GR', 'division': 1}),
"T1": lambda: League.get_or_create(league_name='Super Lig', defaults={'country': 'TR', 'division': 1})
}
def add_stats_to_h2h_records(home_team, away_team, home_points, away_points, home_goals, away_goals, h2h_table):
if home_team in h2h_table:
h2h_table[home_team].add(home_points, home_goals - away_goals)
else:
h2h_table[home_team] = H2HTableRecord(home_points, home_goals - away_goals)
if away_team in h2h_table:
h2h_table[away_team].add(away_points, away_goals - home_goals)
else:
h2h_table[away_team] = H2HTableRecord(away_points, away_goals - home_goals)
def process_match_to_head_to_head_table(match: Match, head_to_head_table):
if match.full_time_result == MatchResult.HOME_WIN:
add_stats_to_h2h_records(match.home_team, match.away_team, 3, 0, match.full_time_home_goals,
match.full_time_away_goals, head_to_head_table)
elif match.full_time_result == MatchResult.DRAW:
add_stats_to_h2h_records(match.home_team, match.away_team, 1, 1, match.full_time_home_goals,
match.full_time_away_goals, head_to_head_table)
elif match.full_time_result == MatchResult.AWAY_WIN:
add_stats_to_h2h_records(match.home_team, match.away_team, 0, 3, match.full_time_home_goals,
match.full_time_away_goals, head_to_head_table)
def accurate_sort_by_league(teams_with_same_points: List[TableTeam], season: Season, date, league_of_table: League):
if tie_breaker_dict[league_of_table.league_name] == TieBreakerType.ONLY_GOALS:
return sorted(teams_with_same_points, key=lambda x: (x.goal_difference, x.goals_scored), reverse=True)
else:
tied_teams = [team.team for team in teams_with_same_points]
matches_between_tied_teams = Match.select().where((Match.season == season) & (Match.date < date)
& (Match.home_team << tied_teams) & (Match.away_team << tied_teams))
if len(matches_between_tied_teams) == len(tied_teams) * (len(tied_teams) - 1):
head_to_head_table = {}
for match in matches_between_tied_teams:
process_match_to_head_to_head_table(match, head_to_head_table)
tuple_to_sort = ((team, head_to_head_table[team.team]) for team in teams_with_same_points)
if tie_breaker_dict[league_of_table.league_name] == TieBreakerType.H2H_THEN_GOALS:
sorted_tuples = sorted(tuple_to_sort, key=lambda x: (x[1].points, x[1].goal_diff, x[0].goal_difference,
x[0].goals_scored), reverse=True)
elif tie_breaker_dict[league_of_table.league_name] == TieBreakerType.GOALS_THEN_H2H:
sorted_tuples = sorted(tuple_to_sort, key=lambda x: (x[0].goal_difference, x[0].goals_scored, x[1].points),
reverse=True)
return [single_tuple[0] for single_tuple in sorted_tuples]
else:
return sorted(teams_with_same_points, key=lambda x: (x.goal_difference, x.goals_scored), reverse=True)
def find_first_and_last_index(teams_in_table: List[TableTeam], searched_points_value):
first_index = -1
last_index = -1
for index, item in enumerate(teams_in_table):
if item.points == searched_points_value:
if first_index == -1:
first_index = index
elif last_index < index:
last_index = index
return first_index, last_index
def sort_teams_in_table(teams_in_table: List[TableTeam], season: Season, date, league_of_table: League):
sorted_teams = teams_in_table
if any(team.matches_played > 0 for team in teams_in_table):
sorted_teams = sorted(teams_in_table, key=lambda x: x.points, reverse=True)
same_points_count = Counter(getattr(item, 'points') for item in sorted_teams)
for item in same_points_count:
if same_points_count[item] > 1:
teams_to_accurate_sorting = [team for team in sorted_teams if team.points == item]
teams_after_acc_sort = accurate_sort_by_league(teams_to_accurate_sorting, season, date, league_of_table)
indexes = find_first_and_last_index(sorted_teams, item)
sorted_teams[indexes[0]: indexes[1] + 1] = teams_after_acc_sort
return sorted_teams
def table_creation(season, date, league):
db_table = Table.create(season=season, date=date)
teams_in_season = Team.select().join(TeamSeason).where(TeamSeason.season == season)
table_teams = []
for team_in_season in teams_in_season:
team_in_table = TableTeam(team=team_in_season, table=db_table, points=0, loses=0, draws=0, wins=0,
goals_scored=0, goals_conceded=0, matches_played=0, goal_difference=0)
table_teams.append(team_in_table)
matches_this_season = Match.select().where((Match.season == season) & (Match.date < date))
all_team_matches = matches_this_season.where((Match.home_team == team_in_season) | (Match.away_team == team_in_season))
team_in_table.matches_played = all_team_matches.count()
home_team_goals = Match.select(fn.Sum(Match.full_time_home_goals), fn.Sum(Match.full_time_away_goals)) \
.where((Match.season == season) & (Match.date < date) & (Match.home_team == team_in_season)).scalar(as_tuple=True)
away_team_goals = Match.select(fn.Sum(Match.full_time_home_goals), fn.Sum(Match.full_time_away_goals)) \
.where((Match.season == season) & (Match.date < date) & (Match.away_team == team_in_season)).scalar(as_tuple=True)
wins = matches_this_season.where(((Match.home_team == team_in_season) & (Match.full_time_result == MatchResult.HOME_WIN))
| ((Match.away_team == team_in_season) & (Match.full_time_result == MatchResult.AWAY_WIN))).count()
loses = matches_this_season.where(((Match.home_team == team_in_season) & (Match.full_time_result == MatchResult.AWAY_WIN))
| ((Match.away_team == team_in_season) & (Match.full_time_result == MatchResult.HOME_WIN))).count()
draws = matches_this_season.where(((Match.home_team == team_in_season) & (Match.full_time_result == MatchResult.DRAW))
| ((Match.away_team == team_in_season) & (Match.full_time_result == MatchResult.DRAW))).count()
team_in_table.wins = wins
team_in_table.draws = draws
team_in_table.loses = loses
team_in_table.points = wins * 3 + draws
team_in_table.goals_scored = (home_team_goals[0] or 0) + (away_team_goals[1] or 0)
team_in_table.goals_conceded = (home_team_goals[1] or 0) + (away_team_goals[0] or 0)
team_in_table.goal_difference = team_in_table.goals_scored - team_in_table.goals_conceded
teams_in_table_sorted = sort_teams_in_table(table_teams, season, date, league)
bulk_dictionary = []
for index, sorted_team in enumerate(teams_in_table_sorted):
sorted_team.position = index + 1
bulk_dictionary.append(sorted_team.__data__)
TableTeam.insert_many(bulk_dictionary).execute()
def get_if_key_exists(single_row: pandas.Series, key, columns, default_value = None):
if key in columns:
return single_row[key]
return default_value
def save_match(season: Season, league: League, matches_data: pd.DataFrame):
numeric_columns = matches_data.select_dtypes('number').columns
matches_data.loc[:, numeric_columns] = matches_data.loc[:, numeric_columns].fillna(0)
matches_data = matches_data.fillna('undef')
matches_to_save = []
for index, single_match_row in matches_data.iterrows():
try:
match_date = datetime.strptime(single_match_row["Date"] + ' ' + single_match_row["Time"],
"%d/%m/%Y %H:%M")
except:
match_date = datetime.strptime(single_match_row["Date"] + ' ' + single_match_row["Time"],
"%d/%m/%y %H:%M")
matches_to_save.append({
'date': match_date,
'home_team': Team.get(Team.name == single_match_row["HomeTeam"]),
'away_team': Team.get(Team.name == single_match_row["AwayTeam"]),
'season': season,
'full_time_home_goals': single_match_row["FTHG"],
'full_time_away_goals': single_match_row["FTAG"],
'full_time_result': MatchResult(single_match_row["FTR"]),
'half_time_home_goals': single_match_row["HTHG"],
'half_time_away_goals': single_match_row["HTAG"],
'half_time_result': MatchResult(single_match_row["HTR"] if single_match_row["HTR"] != "undef" else 'D'),
'home_team_shots': single_match_row["HS"],
'home_team_shots_on_target': single_match_row["HST"],
'home_team_woodwork_hits': single_match_row["HHW"] if 'HHW' in matches_data.columns else None,
'home_team_corners': single_match_row["HC"],
'home_team_fouls_committed': get_if_key_exists(single_match_row, "HF", matches_data.columns),
'home_team_free_kicks_conceded': single_match_row["HFKC"] if 'HFKC' in matches_data.columns else None,
'home_team_offsides': single_match_row["HO"] if 'HO' in matches_data.columns else None,
'home_team_yellow_cards': single_match_row["HY"],
'home_team_red_cards': single_match_row["HR"],
'away_team_shots': single_match_row["AS"],
'away_team_shots_on_target': single_match_row["AST"],
'away_team_woodwork_hits': single_match_row["AHW"] if 'AHW' in matches_data.columns else None,
'away_team_corners': single_match_row["AC"],
'away_team_fouls_committed': get_if_key_exists(single_match_row, "AF", matches_data.columns),
'away_team_free_kicks_conceded': single_match_row["AFKC"] if 'AFKC' in matches_data.columns else None,
'away_team_offsides': single_match_row["AO"] if 'AO' in matches_data.columns else None,
'away_team_yellow_cards': single_match_row["AY"],
'away_team_red_cards': single_match_row["AR"],
'average_home_odds': (
single_match_row["AvgH"] if 'AvgH' in matches_data.columns else single_match_row["BbAvH"]),
'average_draw_odds': (
single_match_row["AvgD"] if 'AvgD' in matches_data.columns else single_match_row["BbAvD"]),
'average_away_odds': (
single_match_row["AvgA"] if 'AvgA' in matches_data.columns else single_match_row["BbAvA"])})
Match.insert_many(matches_to_save).execute()
for matchDate in matches_data["Date"].unique():
try:
table_creation(season, datetime.strptime(matchDate, "%d/%m/%Y"), league)
except ValueError:
table_creation(season, datetime.strptime(matchDate, "%d/%m/%y"), league)
def save_league_data_to_db(matches_data):
db_league, is_league_created = league_name_dict[matches_data["Div"].iloc[0]]()
dates = matches_data["Date"]
if 'Time' not in matches_data:
matches_data['Time'] = "00:00"
times = matches_data["Time"]
try:
league_start_date = datetime.strptime(dates.iloc[0] + ' ' + times.iloc[0], "%d/%m/%Y %H:%M")
except ValueError:
league_start_date = datetime.strptime(dates.iloc[0] + ' ' + times.iloc[0], "%d/%m/%y %H:%M")
try:
league_end_date = datetime.strptime(dates.iloc[-1] + ' ' + times.iloc[-1], "%d/%m/%Y %H:%M")
except ValueError:
league_end_date = datetime.strptime(dates.iloc[-1] + ' ' + times.iloc[-1], "%d/%m/%y %H:%M")
db_season, is_season_created = Season.get_or_create(league=db_league,
years=league_start_date.strftime(
"%y") + "/" + league_end_date.strftime("%y"),
defaults={'start_date': league_start_date,
'end_date': league_end_date})
if is_season_created:
for team_name in np.unique(np.concatenate((matches_data["HomeTeam"].unique(), matches_data["AwayTeam"].unique()))):
team_tuple = Team.get_or_create(name=team_name)
TeamSeason.create(team=team_tuple[0], season=db_season)
save_match(db_season, db_league, matches_data)
# Table for the end of the season
table_creation(db_season, league_end_date + timedelta(days=1), db_league)
else:
if db_season.end_date < league_end_date:
for team_name in matches_data["HomeTeam"].unique():
team_tuple = Team.get_or_create(name=team_name)
TeamSeason.get_or_create(team=team_tuple[0], season=db_season)
# Deleting last table in unfinished season (as it is not the final one)
table_to_delete = Table.select().where(Table.season == db_season).order_by(Table.date.desc()).limit(1).get()
TableTeam.delete().where(TableTeam.table == table_to_delete)
table_to_delete.delete_instance()
db_season.end_date = league_end_date
db_season.save()
match_to_save_indexes = []
for index, single_match_row in matches_data.iterrows():
try:
match_date = datetime.strptime(single_match_row["Date"] + ' ' + single_match_row["Time"],
"%d/%m/%Y %H:%M")
except:
match_date = datetime.strptime(single_match_row["Date"] + ' ' + single_match_row["Time"],
"%d/%m/%y %H:%M")
home_team_alias = Team.alias()
away_team_alias = Team.alias()
query = Match.select(Match, home_team_alias, away_team_alias) \
.join(home_team_alias, on=Match.home_team) \
.switch(Match).join(away_team_alias, on=Match.away_team) \
.where((Match.date == match_date) & (home_team_alias.name == single_match_row["HomeTeam"]) &
(away_team_alias.name == single_match_row["AwayTeam"]))
if not query.exists():
match_to_save_indexes.append(index)
save_match(db_season, db_league, matches_data.iloc[match_to_save_indexes])
# Table for the end of the season
table_creation(db_season, league_end_date + timedelta(days=1), db_league)
def process_csv_and_save_to_db(csv_file_path):
matches_data = pd.read_csv(csv_file_path)
with db.transaction() as txn:
try:
save_league_data_to_db(matches_data)
txn.commit()
print("League data committed to database")
except BaseException as e:
print("Transaction rolling back because of encountered exception:\n" + traceback.format_exc(limit=4))
txn.rollback()
|
[
"gustaw@ohler.pl"
] |
gustaw@ohler.pl
|
5679f87ee2971ed8a994e1847ac44500ad6a53c5
|
cd627f0c85e697a78a77ea9d6150a78eafbb6a7e
|
/profiles_api/migrations/0001_initial.py
|
e6f18968e30468255705b0655bb61a3755c01d32
|
[
"MIT"
] |
permissive
|
mantu0314/profiles-rest-api
|
09ac8a299733d983e9f921e9898b7ec6eb76a68b
|
2da0237e505053b84b706cf1afdf1c98f425c2eb
|
refs/heads/master
| 2023-07-30T06:43:48.044675
| 2020-08-07T16:51:08
| 2020-08-07T16:51:08
| 283,555,622
| 0
| 0
|
MIT
| 2021-09-22T19:32:28
| 2020-07-29T17:00:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,708
|
py
|
# Generated by Django 3.0.8 on 2020-07-31 06:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
[
"mantu031495@gmail.com"
] |
mantu031495@gmail.com
|
e4616a4f551a702638c5b140b6f474216a4d5515
|
db8b535f422749f3606b8e085da7e908d264a85f
|
/ProjectStudantPython/SimpleListExercicios.py
|
7ec1cf89c67ffc364216363a403e76b048c4889b
|
[] |
no_license
|
thallys-moura/studies
|
30497ea13ebabbdedcfd54b5051b3a8998ed04c0
|
0fcf83afc8d29c56915fa9331488420a08f895b8
|
refs/heads/main
| 2023-03-24T00:11:05.582993
| 2021-03-29T04:48:59
| 2021-03-29T04:48:59
| 343,241,028
| 1
| 0
| null | 2021-03-14T22:43:15
| 2021-02-28T23:47:07
|
Python
|
UTF-8
|
Python
| false
| false
| 29,537
|
py
|
from random import randint, random, choices
def criaExibiLista():
Lin = int(input("Quantidade de Linhas: "))
Col = int(input("Quantidade de Colunas: "))
M = []
i = 0
try:
while i < Lin:
M.append([])
j = 0
while j < Col:
M[i].append(randint(0, 20))
j += 1
i += 1
print("Esta é a lista M gerada")
print("M =", M)
print("Exibição em Matriz: ")
i = 0
while i < Lin:
j = 0
print('|', end='')
while j < Col:
print("{0:4}".format(M[i][j]), end='')
j += 1
print(" |")
i += 1
except ValueError:
print("Valor deve ser do tipo INT")
# Function sequencia de Finobacci com laço FOR
def sequenciaFibonacci():
print("Sequencia de fibonacci \n")
N = 0
while N < 2:
try:
N = int(input("Digite N(>1): "))
if N < 2:
print("Digite N >=2")
except:
print("O dado informado e invalido.")
# criacao da lista com sequencia de fibonacci
L = [0, 1]
# realiza for no intervalo de valor de entrada menos 2 ( seria a quantidade de casas ja
# ocupada pelos valores padroes da sequencia de fibonacci)
for i in range(N - 2):
L.append(L[i] + L[i + 1])
print("Sequencia gerada", L)
print("Fim do Programa")
def gerarListaOrdemCrescenteLoop():
print("Gerador de lista em ordem Crescente \n")
L = []
x = int(input("Digite um valor: "))
while x != 0:
p = 0
while p < len(L) and L[p] < x:
L.insert(p, x)
# a resposta do teclado incrementa o primeiro while, ao acrescentar um novo valor a x no final do laco
x = int(input("Digite um valor: "))
print("Lista gerada:", L)
print("Fim do Programa")
def pesquisaSequencial():
print("Lista Sequencial \n")
N = int(input("Digite um valor para tamanho da Lista: "))
x = int(input("Digite um valor: "))
L = list(range(2, N + 1, 2))
print(L)
tam = len(L)
aux1 = 2 # Obter metade do valor x/2 = 1/2
while x != 0:
ini = 0
fim = len(L) - 1 # obter o valor final da lista (key do ultimo elemento inserido na lista ordenada)
meio = (ini + fim) // 2 # obtem o indice do meio.
while ini <= fim:
print("Esse e o x: {0} ".format(x))
print("Esse e o valor meio da lista: {0} ".format(L[meio]))
print("Esse e o indice meio: {0} ".format(meio))
if x == L[meio]: # Verifica e o valor de x é igual ao valor do meio da lista
print("O valor {0} já esta na lista".format(x))
break # Achou o valor
if x < L[meio]: # Verifica se x é menor que o valor do meio da lista.
fim = meio - 1 # Atualizo o auxiliar fim informando que agora o final da minha lista é o valor de meio - 1
else:
ini = meio + 1
meio = (ini + fim) // 2 # reseta o valor da variável para um novo indice no meio da lista
else:
print("{0} nao esta na lista".format(x))
x = int(input("Digite o valor para x :"))
def algoritimoDeOrdenacao():
print("Algoritimo de ordenação \n")
L = [14, 55, 43, 1, 2, 76]
print("Lista gerada:", L)
mudouPosicao = 1
while mudouPosicao:
mudouPosicao = 0
i = 0
while i < len(L) - 1: # trabalhando com o indice da lista
# logo o tamanho da lista -1 (Lista tem de 0 há 5 (KEYS))
if L[i] > L[i + 1]:
L[i], L[i + 1] = L[i + 1], L[i]
mudouPosicao = 1
i += 1
print("Estado Parcial de L:", L)
print("\n Situacao Final")
print("Lista Ordenada:", L)
# criaExibiLista()
# sequenciaFibonacci()
# gerarListaOrdemCrescenteLoop()
# pesquisaSequencial()
# algoritimoDeOrdenacao()
# INICIA AQUI OS EXERCICOS DO CAPITULO SOBRE TIPOS E ESTRUTURAS SEQUENCIAIS.
##########################################################################################
##########################################################################################
# 1. Escreva um programa que leia do teclado uma lista com tamanho de
# 10 elementos e exiba-a na tela na ordem inversa à ordem de leitura.
def ordemInversa():
try:
x = int(input("Informe o valor para lista: \n")) # Recebo o valor para inserir na lista
L = []
tam = 10
i = 0
while len(L) < tam: # Verifico se o tamanho da minha lista é = a variaval "tam"
aux1 = x
L.append(aux1)
i += 1
x = int(input("Informe o valor para lista: \n"))
else:
print("Lista ordem de inserção: ")
print(L)
print("\n")
print("Lista ordem reversa: ")
L.reverse()
print(L)
except ValueError:
print("Informar um numero inteiro ex:1,2,3...")
finally:
print("Cheguei ao fim da minha operação. \n")
print("")
# ordemInversa()
##########################################################################################
##########################################################################################
# 2. Escreva um programa que leia do teclado duas listas com tamanho 10, com números inteiros.
# Em seguida, o programa deve juntar as duas listas em uma única com o tamanho 20.
def mergedListas():
try:
# Variaveis
L1 = []
L2 = []
L3 = []
tam = 5
i = 0
while True: # Realizo um while continuo. até que todas as interações necessárias
# do meu laço, sejam realizadas.
if len(L1) < tam: # Verifico o tamanho da lista 1 com relação ao padrão adotado.
value = int(input("Informe 10 valores para a primeira lista \n"))
L1.append(value)
elif len(L2) < tam: # Verifico o tamanho da lista 2 com relação ao padrão adotado.
value = int(input("Informe 10 valores para a Segunda Lista \n"))
L2.append(value)
else:
L3 = L1 + L2 # Alimento uma terceira lista
print(L3)
print("\n")
break
except ValueError:
print("Os Valores devem ser do tipo inteiro (INT)")
finally:
print("Lista Ordenada: \n")
L3.sort() # eexibo a lista com seus valores em ordem crescente
print(L3)
# mergedListas()
##########################################################################################
##########################################################################################
# 3. Escreva um programa que preencha com números inteiros duas listas de-nominadas A e B
# com diferentes tamanhos nA e nB, respectivamente. Em seguida, o programa deve juntar as
# duas em uma única lista com o tamanho nA + nB. Exibir na tela a lista resultante.
def gerarListasDiferentesTamanhos():
import random # IMPORT necessário dentro da class para gerar numeros aleatórios (random.randint)
try:
# Variaveis
tam1 = int(input("Informe o tamanho da primeira lista \n"))
tam2 = int(input("Informe o tamanho da segunda lista \n"))
i = 0
L1 = []
L2 = []
L3 = []
while True: # Realizo um while continuo. até que todas as interações necessárias
# do meu laço, sejam realizadas.
if len(L1) < tam1: # Verifico o tamanho da lista 1 com relação ao padrão adotado.
value = random.randrange(0, 100)
L1.append(value)
elif len(L2) < tam2: # Verifico o tamanho da lista 2 com relação ao padrão adotado.
value = random.randrange(0, 100)
L2.append(value)
else:
print("Lista 1 de tamanho {0} e elementos: {1} \n".format(tam1, L1))
print("Lista 2 de tamanho {0} e elementos: {1} \n".format(tam1, L2))
break
except ValueError:
print("Os Valores devem ser do tipo inteiro (INT)")
else:
# Após realização das condições do try. Realizo a operação abaixo
L3 = L1 + L2 # Alimento uma terceira lista
finally:
# Finalizo. Exibo dados tratados no final
if len(L3) > 0:
print("Lista Ordenada: ")
L3.sort() # exibo a lista com seus valores em ordem crescente
print(L3)
callback = str(input(
'Deseja Realizar novamente a operação S OU N')) # Atribuo a possibilidade de retonar ao inicio da função.
if callback == 'S':
gerarListasDiferentesTamanhos()
print("Registro de operações realizadas no METODO!!!")
###Interessante observar que o Finally, conta a quantidade de try_stmt
###realizadas dentro do metodo cujo o qual esta dentro de uma estrutura TRY
###interessante para percorrer algo e posteriormente, registrar processos realizados
###, para aquela instrução
# gerarListasDiferentesTamanhos()
##########################################################################################
##########################################################################################
# 4. Escreva um programa que leia uma lista com N números inteiros, em que N é um número inteiro
# previamente digitado pelo usuário. O programa não deve aceitar um número digitado que já esteja
# inserido na lista, sendo que, quando essa situação ocorrer, uma mensagem deve ser dada ao usuário.
# Por fim, exibir na tela a lista resultante
def gerarListaByTamanho():
N = int(input('Informe o tamanho da lista: '))
L1 = []
aux1 = 0
try:
while aux1 < N: # Verifico se o valor informado para tamanho de lista é maior que meu aux1
x = int(input("Digite um valor para ser inserido na lista\n"))
if x not in L1: # Verifico se o valor existe na minha lista L1
L1.append(x)
aux1 += 1
else:
print("Valor já existe na lista.")
except ValueError:
print('Os Valores solicitados neste metodo devem ser do tipo INT.')
else:
callback = str(input(
'Deseja Realizar novamente a operação S OU N')) # Atribuo a possibilidade de retonar ao inicio da função.
if callback == 'S':
gerarListaByTamanho()
finally:
print(L1) # Exibo o resultado final das minhas atividades
###Interessante observar que o Finally, conta a quantidade de try_stmt
###realizadas dentro do metodo cujo o qual esta dentro de uma estrutura TRY
###interessante para percorrer algo e posteriormente, registrar processos realizados
###, para aquela instrução
# gerarListaByTamanho()
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
# 4. Escreva um programa que leia do teclado dois números inteiros nA e nB e leia também duas
# listas denominadas A e B com os tamanhos nA e nB, respectivamente. Na leitura de cada uma
# das listas é obrigatório que não sejam aceitos valores repetidos. Em seguida, o programa deve
# juntar as duas em uma única lista R (resultante), tomando o cuidado de que R não tenha valores duplicados.
def gerarListasDiferentesTamanhosElementoUnico():
import random # IMPORT necessário dentro da class para gerar numeros aleatórios (random.randint)
try:
# Variaveis
tam1 = int(input("Informe o tamanho da primeira lista \n"))
tam2 = int(input("Informe o tamanho da segunda lista \n"))
aux1 = 0
aux2 = 0
L1 = []
L2 = []
L3 = []
while True: # Realizo um while. até que todas as interações necessárias
# do meu laço, sejam realizadas.
if aux1 < tam1: # Verifico o tamanho da lista 1 com relação ao padrão adotado.
value = int(input("Digite um valor para ser inserido na primeira lista\n"))
if value not in L1:
L1.append(value)
aux1 += 1
else:
print("Valor já existe na lista 1.")
elif aux2 < tam2: # Verifico o tamanho da lista 2 com relação ao padrão adotado.
value = int(input("Digite um valor para ser inserido na segunda lista\n"))
if value not in L2:
L2.append(value)
aux2 += 1
else:
print("Valor já existe na lista 2.")
else:
print("Lista 1 de tamanho {0} e elementos: {1} \n".format(tam1, L1))
print("Lista 2 de tamanho {0} e elementos: {1} \n".format(tam1, L2))
break
L3 = L1 + L2 # Alimento uma terceira lista
except ValueError:
print("Os Valores devem ser do tipo inteiro (INT)")
else:
callback = str(input(
'Deseja Realizar novamente a operação S OU N')) # Atribuo a possibilidade de retonar ao inicio da função.
if callback == 'S':
gerarListasDiferentesTamanhosElementoUnico()
finally:
# Finalizo. Exibo dados tratados no final
if len(L3) > 0:
print("Lista Ordenada: ")
L3.sort() # exibo a lista com seus valores em ordem crescente
print(L3)
print("Registro de operações realizadas no METODO!!!")
###Interessante observar que o Finally, conta a quantidade de try_stmt
###realizadas dentro do metodo cujo o qual esta dentro de uma estrutura TRY
###interessante para percorrer algo e posteriormente, registrar processos realizados
###, para aquela instrução
# gerarListasDiferentesTamanhosElementoUnico()
##########################################################################################
##########################################################################################
# Escreva um programa que leia três dados de entrada: o primeiro termo, a razão e a quantidade
# de termos de uma P.A., todos números inteiros. O programa deve calcular todos os termos,
# colocando-os em uma lista, e exi-bi-la no final.
# função auxiliar, gera botão)
def functionCallBack():
visualizarLog = str(input(
'Deseja Visualizar log de operações/Exceções S OU N')) # Atribuo a possibilidade de retonar ao inicio da função.
return visualizarLog
def progressaoAritimetica():
# Variáveis
Q = int
Termo = int
Razao = int
callback = str
try:
# ação do meu metodo valida/aloca valor em memoria.
Q = int(input("Informe a quantidade de elementos que vai inserir: "))
listaPA = []
Count = 0
while Count < Q:
Termo = int(input("Informe o termo"))
Razao = int(input("Digite a Razão: "))
PA = Termo + Razao
Count = Count + 1
listaPA.append(PA)
print("Lista Progressão Aritimetica")
print(listaPA)
except ValueError:
print("Os Valores devem ser do tipo inteiro (INT)")
else:
callback = functionCallBack()
if callback == 'S':
progressaoAritimetica()
finally:
# Finalizo. Exibo dados tratados no final
visualizarLog = functionCallBack() # Função para chamar um input para solicitar dado
if visualizarLog == 'S':
# verifico se nenhum dos trés itens da tupla é do type int
if int not in (type(Termo), type(Razao),
type(Q)): # Alterar para is_string para identificar se alguns dos itens é do tipo string
print("################LOG##############Os Valores devem ser do tipo inteiro (INT)\n")
callback = str(input(
'Deseja Realizar uma nova Operação? S OU N \n')) # Atribuo a possibilidade de retonar ao inicio da função.
if callback == 'S':
progressaoAritimetica()
# Estudar with > finally essa funcionalidade permite gravar o logs do que foi executado
# no metodo em questão.
# progressaoAritimetica()
##########################################################################################
##########################################################################################
# Escreva um programa que leia um número N obrigatoriamente entre 0 e 50 e, em seguida, leia
# N números reais em uma lista A. O programa deve separar os valores lidos em A em outras duas
# listas NEG e POS: a primeira contendo somente os valores negativos e a segunda contendo os valores
# positivos e zero. Apresentar na tela as listas NEG e POS e a quantidade de valores contidos em cada uma
def numerosNegativosPositivosa():
N = int(input("Informe um numero entre 0 e 50"))
listaTotal = []
listaNmerosNegativos = []
listaNumerosPositivos = []
aux1 = 0
if 0 < N < 50: # simplificando condição do if
while aux1 < N: # incremento o valor há minha lista
value = int(input("Informe um valor da lista: "))
if value == 0:
print("informado não pode ser 0")
else:
listaTotal.append(value)
aux1 += 1
# variaveis para trabalhar com os indices da lista
count = 0
while count < len(listaTotal):
# verifico se o valor inserido no indice corrente no loop é menor que 0 (negativo) ou maior que 0 (positivo)
if listaTotal[count] < 0:
listaNmerosNegativos.append(listaTotal[count])
elif listaTotal[count] > 0:
listaNumerosPositivos.append(listaTotal[count])
else:
print("O numero não pode ser 0")
count += 1
print("Lista Total Digitada")
print(listaTotal)
print("\n")
print("Numeros Posítivos ")
print(listaNumerosPositivos)
print("\n")
print("Numeros Negativos")
print(listaNmerosNegativos)
else:
print("Numero informado deve ser maior que 0 e menor que 50")
# Em caso de erro no primeiro paço, refaço a chamada.
numerosNegativosPositivosa()
# numerosNegativosPositivosa()
# NESTE ECERCICIO, PENSEI EM DISTINGUIR O CODIGO DO ESCRITO A CIMA DELE
# APENAS PARA VREIFICAR MAIS CLARAMENTE UMA DIFERENÇA ENTRE AS ESTRUTURAS
# E BUSCAR ENTEENDER DE UMA FORMA MAIS CLARA SUAS FUNCIONALIDADES E COMO
# ULTILIZALAS.
#####################################################################################
####################################################################################
# 8. Escreva um programa que leia um número N (entre 0 e 50) e, em seguida, defina uma
# lista V preenchendo-a com N números inteiros aleatórios (utilizar a função randint).
# Exiba-a na tela. Inicie um laço no qual será feita a leitura de um número X e que
# termina quando X for zero. Pesquise se X está ou não na lista V e, caso esteja,
# elimine todas as suas ocorrências.
def delElementoListaDinamica():
N = int(input('Informe um numero de 0 a 50'))
listaV = []
X = 0
tamanhoInicial = 0
if N > 0:
count = 0
while count < N:
listaV.append(randint(0, 100)) # Gero um numero aleatorio no range d 0,100 para cada um dos N elementos
count += 1
tamanhoInicial = len(
listaV) # Alimento uma variávele com o tamanho da lista para uso posterior em uma 2 acão/etapa
print(listaV)
while True: # Laço na ideia de um DoWhile onde o loop é continuo até que tenha realizado todas as operações necessárias na dada acao/etapa
if (len(listaV) < tamanhoInicial):
print(listaV)
X = int(input("Informe um numero para remover da lista digite 0 (ZERO) para cancelar a operação: "))
if X == 0:
print("Laço encerrado")
break
elif X in listaV:
listaV.remove(
X) # IMPORTANTE. NESTA LINHA, ESTOU REMOVENDO DA MEMORIA O ELEMENTO ADICIONADO NA LISTA QUE RESPONDA HÁ CONDIÇÃO
# COM ISSO. ESTE DADO PASSA A NÃO EXISTIR. PARA ESSES CASOS, IDEAL USAR VARIÁVL AUXILIAR, AFIM DE RECICLAR O DADO
# EM CASO DE NECESSIDADE.
print("\n")
print("Essa é a lista final: ")
print(listaV)
# delElementoListaDinamica()
#####################################################################################
####################################################################################
# 9. O programa deverá ler dois inteiros chamados Min e Max. Min pode ser
# qualquer valor e Max, obrigatoriamente, deve ser maior que Min.
# Em segui-da, preencher uma lista com todos os valores divisíveis
# por 7 contidos no intervalor fechado [Min, Max]. Exibir a lista resultante na tela.
def retornaDivisiveisPor7():
min = int(input("Informe o valor minimo: "))
max = int(input("Informe o valor para maximo: "))
while max <= min:
max = int(input("Informe um valor para maximo maior que minimo: "))
N = int(input("Informe a quantidade de valores da lista: "))
count = 0
aux1 = 7
listaIntervalo = []
while count < N:
if (aux1 != 7 and aux1 % 7 == 0):
if (aux1 > min and aux1 <= max):
listaIntervalo.append(aux1)
count += 1 # incremento o contador do while após confirmar a existencia do valor, então posso prosseguir para o proximo laço no loop.
aux1 += 7 # incremento o valor do aux1 somando 7, logo todo numero divisivel por ele é sua propria soma
print(listaIntervalo) # exibo a lista com os dados armazenados.
# retornaDivisiveisPor7()
#####################################################################################
####################################################################################
# 10. Escreva um programa que leia do teclado uma lista com N elementos. Em seguida,
# o programa deve eliminar os elementos que estiverem repetidos, mantendo apenas a primeira
# ocorrência de cada. Apresentar a lista resul-tante na tela. Os valores eliminados devem ser
# armazenados em outra lista que também deve ser exibida.
def deleteExibeResultado():
N = int(input('Informe a quantidade de elementos que ira inserir na lista'))
listaV, listaRemovidos, listaTotal = [], [], []
X = 0
tamanhoInicial = 0
if N > 0:
count = 0
while count < N:
value = int(input("informe um numero: "))
listaTotal.append(value) # Insere na lista com todos os valores inseridos
if value not in listaV:
listaV.append(value) # Insere na lista resultante final que tem apenas os valores não repetidos
count += 1
else:
listaRemovidos.append(value) # Insere na lista dos valores removidos
count += 1
# Exibo as três listas trabalhadas
print("\n")
print("Essa é a lista final com valores unicos: ")
print(listaV)
print("\n")
print("Essa é a lista com todos os valores removidos")
print(listaRemovidos)
print("\n")
print("Essa é a lista com todos os valores trabalhados")
print(listaTotal)
# deleteExibeResultado()
#####################################################################################
####################################################################################
# 11. Faça um programa que leia um número inteiro N bem grande (acima de 5.000).
# Preencha uma lista de tamanho N com números inteiros aleatórios positivos.
# Em seguida, inicie um laço de pesquisa, no qual o valor a ser pesquisado deve ser
# lido do teclado, e o programa deve dizer se tal valor está ou não contido na lista, bem como
# dizer sua posição. No caso de várias ocorrências, exibir todas. O laço de pesquisa termina quando for digitado o zero.
# Use o algoritmo de busca sequencial.
def algoritimoSequencial20():
print("Lista Sequencial \n")
N = int(input("Digite um valor para tamanho da Lista > 5000: "))
L = []
if N < 5000:
algoritimoSequencial20()
L = choices(range(0, 1000), k=N)
L.sort()
listaCopia = L
print(listaCopia)
listaValoresDobrados = []
listaValoresUnicos = []
i = 0
valor = int(input("Digite um valor"))
for X in listaCopia:
i += 1
if X in listaValoresUnicos:
pass
else:
if valor == listaCopia[i - 1]:
listaValoresUnicos.append((i - 1, valor))
print("Segue uma lista com tuplas refeerentes a indice:elemento")
print(listaValoresUnicos)
# algoritimoSequencial20()
#####################################################################################
####################################################################################
# 12. Escreva um programa que leia do teclado duas matrizes de dimensões 2×2 e mostre
# na tela a soma dessas duas matrize
# FUNÇÃO CRIADA FORA DO SCOPO ORIGINAL AFIM DE SE REUTILIZADO.
# AQUI REALIZO A SOMA DAS MATRIZES.
def somarMatrizes(matriz1, matriz2):
if (len(matriz1) != len(matriz2) or len(matriz1[0]) != len(matriz2[0])):
return None
result = []
for i in range(len(matriz1)):
result.append([]) # informo a linha da matriz
for j in range(len(matriz1[0])):
result[i].append(matriz1[i][j] + matriz2[i][j])
return result
def MatrizesSoma():
aux1 = 0
matrizes = []
while aux1 <= 1: # QUANTIDADE DE MATRIZES POR PADRÃO É NECESSÁRIO PREENCHER 2 RAIZES
print("Matriz {0}".format(aux1))
y = 1 # dimensões > coluna
x = 1 # dimensões > linha
i = 0
matriz = []
while i <= x:
j = 0
matriz.append([])
while j <= y:
value = int(input("Valor para linha {0} e coluna {1}".format(i, j)))
matriz[i].append(value)
j += 1
i += 1
matrizes.append(matriz)
# REALIZO A EXIBIÇÃO DAS MATRIZES.
print('exibir Matriz')
i = 0
while i <= x:
j = 0
print('|', end='')
while j <= y:
print("{0:4}".format(matriz[i][j]), end='')
j += 1
print(' | ')
i += 1
aux1 += 1
soma = somarMatrizes(matrizes[0], matrizes[1]) # soma e o nosso retorno, result
if soma is not None:
for i in soma:
print(i)
else:
print('Matrizes devem conter o mesmo numero de linhas e colunas')
# MatrizesSoma()
########################################################################################
########################################################################################
# 13. Escreva um programa que leia do teclado duas matrizes de dimensões 2×2 e mostre na
# tela a multiplicação dessas duas matrizes.
def multiplicarMatrizes(matriz1, matriz2):
def getLinha(matriz, n):
return [i for i in matriz[n]] # ou simplesmente return matriz[n]
def getColuna(matriz, n):
return [i[n] for i in matriz]
mat1 = matriz1 # uma matriz 2x2
mat1lin = len(mat1) # retorna 2
mat1col = len(mat1[0]) # retorna 2
mat2 = matriz2 # uma matriz 2x3
mat2lin = len(mat2) # retorna 2
mat2col = len(mat1[0]) # retorna 3
matRes = [] # deverá ser uma matriz 2x3
for i in range(mat1lin):
matRes.append([])
for j in range(mat2col):
# multiplica cada linha de mat1 por cada coluna de mat2;
listMult = [x * y for x, y in zip(getLinha(mat1, i), getColuna(mat2, j))]
# e em seguida adiciona a matRes a soma das multiplicações
matRes[i].append(sum(listMult))
return matRes
def MatrizesMultiplicacao():
aux1 = 0
matrizes = []
while aux1 <= 1: # QUANTIDADE DE MATRIZES POR PADRÃO É NECESSÁRIO PREENCHER 2 RAIZES
print("Matriz {0}".format(aux1))
y = 1 # dimensões > coluna
x = 1 # dimensões > linha
i = 0
matriz = []
while i <= x:
j = 0
matriz.append([])
while j <= y:
value = int(input("Valor para linha {0} e coluna {1}".format(i, j)))
matriz[i].append(value)
j += 1
i += 1
matrizes.append(matriz)
# REALIZO A EXIBIÇÃO DAS MATRIZES.
print('exibir Matriz')
i = 0
while i <= x:
j = 0
print('|', end='')
while j <= y:
print("{0:4}".format(matriz[i][j]), end='')
j += 1
print(' | ')
i += 1
aux1 += 1
multiplicao = multiplicarMatrizes(matrizes[0], matrizes[1]) # soma e o nosso retorno, result
if multiplicao is not None:
for i in multiplicao:
print(i)
else:
print('Matrizes devem conter o mesmo numero de linhas e colunas')
#MatrizesMultiplicacao()
|
[
"thallys-moura@outlook.com"
] |
thallys-moura@outlook.com
|
95b2b1a4f44b34c7d410349f928103cebabd7558
|
5bf025dc67b233a70225c194c7b37b94e719777e
|
/7 week/3.py
|
3424435f1256ffd586021afc9fa89f89ce5e47af
|
[] |
no_license
|
SunnyGrapefruit/Course
|
b9dc3fdb1bcc98fae14b7b639e41b843146bc210
|
241fe94973f72195f1dc0dec2ef7af4e45ae8d16
|
refs/heads/master
| 2020-09-16T07:55:11.002300
| 2020-01-08T17:56:33
| 2020-01-08T17:56:33
| 219,424,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,038
|
py
|
# В этой задаче вам предстоит научиться создавать ссылки.
# Вам нужно сгенерировать html-код на питоне и сдать на проверку html-файл, в котором будет таблица размером 10 на 10, которая должна содержать таблицу умножения для чисел от 1 до 10.
# Каждое число в таблице должно быть ссылкой на страницу http://<это число>.ru.
# Например, число 12 должно быть ссылкой на страницу http://12.ru
#
# При открытии вашего файла в браузере это должно выглядеть примерно так:
#
# Ваш файл должен начинаться с тегов <html> и <body> и заканчиваться </body> и </html>.
#
# Для создания таблицы можно пользоваться тегами <table> (создание таблицы), <tr> (создание строки в таблице) и <td> (создание отдельной ячейки).
# Все открытые теги нужно закрыть, причем сделать это нужно в правильном порядке.
#
# Для создания ссылки пользуйтесь тегом <a>. Например, ссылка на страницу http://hse.ru с текстом "Высшая школа экономики" должна выглядеть так: <a href=http://hse.ru>Высшая школа экономики</a>.
data = open('output3.html', 'w', encoding='utf8')
print('<html>', '<body>', '<table>', file=data, sep='\n')
for i in range(1, 11):
print('<tr>', file=data)
for j in range(1, 11):
print('<td>', '<a href=http://' + str(i*j) + '.ru>', i * j, '</a>', '</td>', file=data)
print('</tr>', file=data)
print('</table>', '</body>', '</html>', file=data, sep='\n')
data.close()
|
[
"wir_die_traumer@mail.ru"
] |
wir_die_traumer@mail.ru
|
1e1dea19b3032c109d58c3449d15ab7e38671d6e
|
adafae386bbffd96c5df3a29d789f8d4f68aa43e
|
/positivenumber.py
|
742ce50962e739612117598baf20336edcd6d647
|
[] |
no_license
|
nikhilgeroge/pythonbasics
|
44041d58c8a61a3358f74f268e38e636b6449635
|
6f49556e468da4b8f2e6985efe5606b12034ca20
|
refs/heads/master
| 2022-11-26T06:26:42.344096
| 2020-08-07T11:22:10
| 2020-08-07T11:22:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
amal=[];
n = int(input("Enter the number of terms"))
for i in range(0,n):
amal.append(int(input("Enter the term")));
print(amal);
for i in amal:
if i>=0:
print(i);
|
[
"noreply@github.com"
] |
nikhilgeroge.noreply@github.com
|
a9855530169d7b4d62b67459b4a77e7716eb9f61
|
c51b9b183b7bfb41219be0868bb01f5e166c5a7c
|
/server.py
|
54cbca9e9b207a0e56fe5aadf72916af4b8cef0e
|
[] |
no_license
|
Barathwaja/Status_Update_Bot
|
2f05eb5b8b48fd8a7105a889ee5dd9aa7f087390
|
abcf08cdfe547ba164917e1bd7e9188ace8ecf0d
|
refs/heads/master
| 2020-04-01T20:38:26.838644
| 2018-10-18T12:30:23
| 2018-10-18T12:30:23
| 153,613,525
| 1
| 0
| null | 2018-10-18T11:33:25
| 2018-10-18T11:33:25
| null |
UTF-8
|
Python
| false
| false
| 34,056
|
py
|
# -*- coding: utf-8 -*-
from flask import Flask, request, abort, Response, send_from_directory
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import requests
import json
import MySQLdb
import random
from date_methods import clean_arg_for_date, clean_date_for_plot, get_month_for_plot, get_today_date, is_valid_date, is_smaller_than_today
import datetime
import os
from bot import send_message, send_admin_message, send_error_message, send_message_to_all, reset_webhook
from database import connect, isadmin, exists_in_db, add_entry_to_user_in_db
app = Flask(__name__)
def get_pending_requests(chatId):
conn,cursor = connect()
query = "select * from pending_requests"
cursor.execute(query)
requests = cursor.fetchall()
message = str(len(requests)) + " pending requests\n"
for request in requests:
message = message + request[3] + '\n'
send_message(chatId,message)
def remove_blank_mails(new_following): #This method is used to remove all blank mails
new_following = list(filter((" ").__ne__, new_following)) #removing all space emails
new_following = list(filter(('').__ne__, new_following)) #removing all blank emails
return new_following
def get_following(chatId):
conn,cursor = connect()
query = "select following from user where chatId = '" + chatId + "';"
try:
cursor.execute(query)
conn.close()
following = list(cursor.fetchall()[0][0].split(' ')) #must convert to list format
following = remove_blank_mails(following)
return True,following
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info(e)
conn.close()
return False,[]
def is_valid_mail(mailid):
query = "select * from mails where mail_id = '" + mailid + "';"
try:
conn,cursor = connect()
result = cursor.execute(query)
conn.close()
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info(e)
conn.close()
if (result > 0):
return True
return False
def follow(chatId,new_following):
new_following = list(set(new_following))
if (len(new_following) == 0):
message = "No input given\n"
send_message(chatId,message)
return
valid,following = get_following(chatId)
if (not valid):
send_error_message(chatId)
return
already_following = []
invalid_mail = []
addedmail = []
changes_made = False
for mail in new_following:
if (mail not in following):
if (is_valid_mail(mail)):
changes_made = True
following.append(mail)
addedmail.append(mail)
else:
invalid_mail.append(mail)
else:
already_following.append(mail)
try:
conn,cursor = connect()
query = "update user set following = '" + ' '.join(following) + "' where chatId = '" + chatId + "'"
cursor.execute(query)
if (changes_made):
message = "Your preferences have been updated\n"
else:
message = "No changes have been made\n"
if (len(addedmail) > 0):
message = message + "You are now also following\n"
for mail in addedmail:
message = message + " -> " +mail + '\n'
if (len(already_following) > 0):
message = message + "You were already following\n"
for mail in already_following:
message = message + " -> " +mail + '\n'
if (len(invalid_mail) > 0):
message = message + "The following mails do not exist in the database\n"
for mail in invalid_mail:
message = message + " -> " + mail + '\n'
conn.commit()
conn.close()
send_message(chatId,message)
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info(e)
send_error_message(chatId)
conn.close()
def unfollow(chatId,new_following):
new_following = list(set(new_following))
if (len(new_following) == 0):
message = "No input given\n"
send_message(chatId,message)
return
valid,following = get_following(chatId)
if (not valid):
send_error_message(chatId)
return
not_following = []
invalid_mail = []
removedmails = []
changes_made = False
for mail in new_following:
if (mail in following):
changes_made = True
following.remove(mail)
removedmails.append(mail)
else:
if (is_valid_mail(mail)):
not_following.append(mail)
else:
invalid_mail.append(mail)
try:
conn,cursor = connect()
query = "update user set following = '" + ' '.join(following) + "' where chatId = '" + chatId + "'"
cursor.execute(query)
if (changes_made):
message = "Selected Emails have been successfully dropped\n"
else:
message = "No changes made\n"
if (len(removedmails) > 0):
message = message + "You are no longer following\n"
for mail in removedmails:
message = message + " -> " + mail + '\n'
if (len(not_following) > 0):
message = message + "You were not following the following mails\n"
for mail in not_following:
message = message + " -> " + mail + '\n'
if (len(invalid_mail) > 0):
message = message + "The following mails do not exist in the database\n"
for mail in invalid_mail:
message = message + " -> " +mail + '\n'
conn.commit()
send_message(chatId,message)
conn.close()
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info()
send_error_message(chatId)
conn.close()
def unfollow_all(chatId):
conn,cursor = connect()
try:
query = "update user set following = ' ' where chatId = '" + chatId + "'"
cursor.execute(query)
message = "Unfollowed all users\n"
conn.commit()
send_message(chatId,message)
conn.close()
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info(e)
send_error_message(chatId)
conn.close()
def list_following(chatId):
valid,following = get_following(chatId)
if (not valid):
send_error_message(chatId)
return
if (len(following) == 0):
message = "You are currently not following anyone\n"
else:
message = "You are currently following\n"
for mail in following:
message = message +" -> " +mail + '\n'
send_message(chatId,message)
def revoke_access(chatId,usernames):
usernames = list(set(usernames))
conn,cursor = connect()
revoked_for = []
for user in usernames:
cId = ""
granted = False
requesting = False
message = ""
try:
query = "select chatId from accepted_users where tusername = '{0}'".format(user)
result = cursor.execute(query)
if (result > 0):
granted = True
cId = cursor.fetchall()[0][0]
revoked_for.append(user)
query = "delete from accepted_users where tusername = '{0}'".format(user)
cursor.execute(query)
query = "select chatId from pending_requests where tusername = '{0}'".format(user)
result = cursor.execute(query)
if (result > 0):
requesting = True
cId = cursor.fetchall()[0][0]
revoked_for.append(user)
query = "delete from pending_requests where tusername = '{0}'".format(user)
cursor.execute(query)
conn.commit()
conn.close()
if (granted and not requesting):
message = "Your access has been removed by the Admin\n"
if (requesting and not granted):
message = "Your request has been cancelled by the Admin\n"
if (requesting and granted):
message = "Your access has been removed\n"
send_message(chatId,"Data Redundancy in the server database!!!!")
if (granted or requesting):
send_message(cId,message)
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info(e)
send_error_message(chatId)
conn.close()
return
revoked_for = list(set(revoked_for))
message = ""
if (len(revoked_for) > 0):
message = "Access has been revoked for \n"
for user in usernames:
message = message + " -> " + user + '\n'
else:
message = "No valid users to revoke access"
send_message(chatId,message)
def list_all_mails(chatId):
try:
conn,cursor = connect()
cursor.execute("select * from mails")
result = cursor.fetchall()
conn.close()
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info(e)
conn.close()
return
mails = [x[0] for x in result]
message = "The available mails are \n"
for mail in mails:
message = message + " ->" + mail + '\n'
send_message(chatId,message)
def request_access(request_data):
conn,cursor = connect()
chatId = str(request_data['message']['chat']['id'])
firstName = ""
if ('first_name' in request_data['message']['chat']):
firstName = request_data['message']['chat']['first_name']
lastName = ""
if ('last_name' in request_data['message']['chat']):
lastName = request_data['message']['chat']['last_name']
tusername = request_data['message']['from']['username']
message = ""
try:
query = "select * from pending_requests where chatId = {0}".format(chatId)
result = cursor.execute(query)
if (result > 0):
message = "You have already requested for access. Please wait patiently the admin will soon accept your request\n"
else:
query = "select * from accepted_users where chatId = {0}".format(chatId)
result = cursor.execute(query)
if (result > 0):
message = "You already have access\n"
else:
query = "insert into pending_requests (chatId,firstName,lastName,tusername) values ('{0}','{1}','{2}','{3}');".format(chatId,firstName,lastName,tusername)
cursor.execute(query)
conn.commit()
send_admin_message("You have a pending request from " + tusername)
message = "The admin will be notified about your request to use this bot. Once he accepts it you will be able to get emails. You can keep modifiying your preferences till then"
send_message(chatId,message)
conn.close()
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info(e)
send_error_message(chatId)
conn.close()
def grant_access(chatId,usernames):
usernames = list(set(usernames))
if (len(usernames) == 0):
message = "No input given\n"
send_message(chatId,message)
return
try:
conn,cursor = connect()
query = "select * from pending_requests"
cursor.execute(query)
pending_requests = cursor.fetchall()
query = "select tusername from accepted_users"
cursor.execute(query)
accepted_users = cursor.fetchall()
conn.commit()
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info(e)
send_error_message(chatId)
return
accepted_users = [x[0] for x in accepted_users]
toaccept = []
alreadydone = []
invalid_handle = []
for user in usernames:
flag = False
for request in pending_requests:
if (request[3] == user or user == '*'):
toaccept.append(request)
flag = True
if (not flag):
if (user in accepted_users):
alreadydone.append(user)
else:
invalid_handle.append(user)
try:
message = ""
if (len(toaccept) > 0):
for row in toaccept:
query = "insert into accepted_users (chatId,firstName,lastName,tusername) values ('{0}','{1}','{2}','{3}')".format(row[0],row[1],row[2],row[3])
cursor.execute(query)
conn.commit()
for row in toaccept:
query = "delete from pending_requests where chatId = {0}".format(row[0])
cursor.execute(query)
conn.commit()
for row in toaccept:
message = "Congratulation's you are granted access to use this bot!!!!"
send_message(row[0],message)
message = "granted access to\n"
for row in toaccept:
message = message + " ->" + row[3] +'\n'
else:
message = "No changes done\n"
if (len(alreadydone) > 0):
message = message + "The following users already have access\n"
for user in alreadydone:
message = message + " ->" + user + '\n'
if (len(invalid_handle) > 0):
message = message + "The following users either have not requested for access or they do not exist in our database\n"
for user in invalid_handle:
message = message + " ->" + user + '\n'
conn.commit()
conn.close()
send_message(chatId,message)
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info(e)
send_error_message(chatId)
conn.close()
def clean_message(message):
index = 0
message = message.strip()
newmessage = ""
while (index < len(message)):
if (message[index] == ' '):
break
else:
newmessage = newmessage + message[index].lower()
index = index + 1
newmessage = newmessage + message[index:]
return newmessage
def add_mail(chatId,mails):
mails = list(set(mails))
conn,cursor = connect()
already_exist = []
added_mail = []
for mail in mails:
try:
if (is_valid_mail(mail)):
already_exist.append(mail)
else:
query = "insert into mails (mail_id) values ('{0}');".format(mail)
cursor.execute(query)
conn.commit()
added_mail.append(mail)
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info(e)
send_error_message(chatId)
conn.close()
return
message = "No changes done\n"
if (len(added_mail) > 0):
message = "Inserted the following into the database\n"
for mail in added_mail:
message = message + " ->" + mail + '\n'
if (len(already_exist)):
message = message + "The following already exist in the database\n"
for mail in already_exist:
message = message + " ->" + mail
send_message(chatId,message)
return
def remove_mail(chatId,mails):
mails = list(set(mails))
conn,cursor = connect()
not_present = []
removed_mail = []
for mail in mails:
try:
if (is_valid_mail(mail)):
query = "delete from mail where mail_id = '{0}';".format(mail)
cursor.execute(query)
conn.commit()
removed_mail.append(mail)
else:
not_present.append(mail)
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info(e)
send_error_message(chatId)
conn.close()
return
message = "No changes done\n"
if (len(removed_mail) > 0):
message = "Deleted the following from the database\n"
for mail in removed_mail:
message = message + " ->" + mail + '\n'
if (len(not_present)):
message = message + "The following do not exist in the database\n"
for mail in not_present:
message = message + " ->" + mail + '\n'
send_message(chatId,message)
return
def view_accepted_users(chatId):
try:
conn,cursor = connect()
query = "select firstName,lastName from accepted_users"
cursor.execute(query)
names = cursor.fetchall()
conn.close()
except (MySQLdb.Error, MySQLdb.Warning) as e:
app.logger.info(e)
send_error_message(chatId)
conn.close()
return
message = "Accepted Users are \n"
for name in names:
message = message + name[0] + " " +name[1] + '\n'
send_message(chatId,message)
def draw_chart(chart_data,recieved_mails,expected_mails,pie):
expectedy_axis_data = []
y_axis_data = []
x_axis_data = []
x_axis_ticks = []
x_axis_ticks_indices = []
index = 0
current_month = 0
for tup in chart_data:
date = tup[0]
value = tup[1]
expected_value = tup[2]
if (index == 0 or index == len(chart_data) - 1):
current_month = get_month_for_plot(date)
x_axis_ticks.append(clean_date_for_plot(date))
x_axis_ticks_indices.append(index)
elif (get_month_for_plot(date) == current_month + 1):
current_month = current_month + 1
x_axis_ticks.append(clean_date_for_plot(date))
x_axis_ticks_indices.append(index)
x_axis_data.append(clean_date_for_plot(date))
y_axis_data.append(value)
expectedy_axis_data.append(expected_value)
index = index + 1
if (not pie):
ax = plt.figure().gca()
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.xticks(x_axis_ticks_indices,x_axis_ticks,rotation='horizontal')
plt.tight_layout()
plt.plot(x_axis_data,y_axis_data,label="actual")
plt.plot(x_axis_data,expectedy_axis_data,linestyle='dashed', label="expected")
else:
sizes = [recieved_mails,expected_mails-recieved_mails]
labels = 'Sent', 'Did Not Send'
explode = (0,0.1)
fig1, ax1 = plt.subplots()
ax1.pie(sizes,explode=explode,labels=labels, autopct='%1.1f%%',shadow=True, startangle=90)
ax1.axis('equal')
stringtmp = "abcdefghijklmnopqrstuvwxyz"
imgname = random.choice(stringtmp) + random.choice(stringtmp) + random.choice(stringtmp) + random.choice(stringtmp) + random.choice(stringtmp) + ".jpeg"
f = open('images/'+imgname,'w+')
f.close()
app.logger.info(imgname)
plt.savefig('images/'+imgname,format='jpeg')
pic_url = "https://sandeshghanta.pythonanywhere.com/{0}/files/".format(values['bot_token']) + imgname
return pic_url
def list_statistics(chatId,message):
batches = values['batches']
summary_message = "Statistics of "
os.chdir('/home/sandeshghanta/mysite/')
args = list(message.strip().split())
try:
if (args[0] == '-a'):
summary_message = summary_message + "all users "
args.insert(1,"allmail")
elif (args[0] == '-i'):
try:
is_valid = is_valid_mail(args[1])
if (not is_valid):
message = str(args[1]) + " is not a valid mail"
send_message(chatId,message)
return
except IndexError:
message = "Argument for -i not provided"
send_message(chatId,message)
return
summary_message = summary_message + "of the individual "+args[1] + " "
elif (args[0] == '-b'):
try:
if (args[1] not in batches):
message = str(args[1]) + " is not a valid batch. The available batches are " + ', '.join(batches)
send_message(chatId,message)
return
except IndexError:
message = "Argument for -b not provided"
send_message(chatId,message)
return
summary_message = summary_message + "of the batch " + args[1] + " "
else:
errormsg = """Please check your arguments """ + args[0] + """ is not a valid option. The available options are -i, -g, -a
-i stands for individual report. You can give an email as input
-b stands for batch report. You can give a batch name as inputself.
-a stands for overall report"""
send_message(chatId,errormsg)
return
except IndexError:
errormsg = "The first argument for /statistics method i.e -i,-a,-b is not provided"
send_message(chatId,errormsg)
return
try:
if (args[2] == '-h'):
start_date = "01-07-18"
args.insert(3,start_date)
args.insert(4,get_today_date())
summary_message = summary_message + "from " + start_date + " to " + str(get_today_date()) + ". "
elif (args[2] == '-d'):
if (len(args) == 3):
args.insert(3,get_today_date())
args.insert(4,get_today_date())
else:
is_valid,errormsg = is_valid_date(args[3])
if (not is_valid):
user_provided_chart_flag = ' '.join(args[3:])
user_provided_chart_flag = user_provided_chart_flag.lower()
if not(user_provided_chart_flag == '-line' or user_provided_chart_flag == '-pie'):
errormsg = errormsg + ". Also {0} is not a chart flag. The availabe chart flags are -line and -pie".format(user_provided_chart_flag)
message = errormsg
send_message(chatId,message)
return
else: #The user has provided a valid chart flag as the third argument. It means that the command is of the format -d -pie/-line
args.insert(3,get_today_date())
else:
args[3] = errormsg #If the date is valid then the is_valid_date method returns the modified date
is_valid,errormsg = is_smaller_than_today(args[3])
if (not is_valid):
message = errormsg
send_message(chatId,message)
return
else:
args[3] = errormsg #If the date is valid then the is_valid_date method returns the modified date
args.insert(4,args[3]) #To maintain uniformity all the dates are in the -p format. If there is only one day 'x' then the program will search from x to x
summary_message = summary_message + "on the day " + str(get_today_date()) + ". "
elif (args[2] == '-p'):
try:
for i in range(3,5): #i takes the values 3,4 args[i] denotes the dates
is_valid,errormsg = is_valid_date(args[i])
if (not is_valid):
message = errormsg
send_message(chatId,message)
return
else:
args[i] = errormsg #if is_valid is true then errormsg is actually the formatted value of the date
is_valid,errormsg = is_smaller_than_today(args[i])
if (not is_valid):
message = errormsg
send_message(chatId,message)
return
else:
args[i] = errormsg
summary_message = summary_message + "from " + str(args[3]) + " to " + str(args[4]) + ". "
except IndexError:
message = "-p flag requires two dates to be given. Two dates were not given"
send_message(chatId,message)
return
else:
errormsg = "The avaiable options for time are -d -p -a. " + args[2] + " is not a valid option " + """
-d stands for a particular day. You can give a date input after this. The format is dd-mm-yy If no input is given after the -d flag today's date is taken by default
-p stands for a period. You can give a two dates after the -p flag. Both the dates should be in dd-mm-yy format. The first date stands for the start date and the second date stands for end date. If the end date is not given the default value is the current date
-h stands for history. The statistics returned will be from the start of time to the current date """
send_message(chatId,errormsg)
return
except IndexError:
errormsg = "The /statistics must require a time flag. The avaiable options are -d, -p, -h"
send_message(chatId,errormsg)
return
#The Code below gets the statistics
user_provided_chart_flag = ""
if (len(args) > 5):
user_provided_chart_flag = ''.join(args[5:])
user_provided_chart_flag = user_provided_chart_flag.lower()
if not(user_provided_chart_flag == '-line' or user_provided_chart_flag == '-pie'):
errormsg = user_provided_chart_flag + " is not a valid chart flag. The valid options are -line and -pie"
send_message(chatId,errormsg)
return
start_date = datetime.datetime.strptime(args[3],'%d-%m-%y')
end_date = datetime.datetime.strptime(args[4],'%d-%m-%y')
if (start_date > end_date):
error_msg = "Start Date is greater than End Date. Please recheck your input!!"
send_message(chatId,error_msg)
return
file = open('maildata.json','r')
mail_json_data = json.load(file)
file.close()
count_of_students_in_batch = {}
for batch in batches:
count_of_students_in_batch[batch] = len(mail_json_data[batch])
chart_data = []
recieved_mails = 0
expected_mails = 0
while (start_date <= end_date):
file_name = start_date.strftime('%d-%m-%y')
try:
with open('jsondata/'+file_name+'.txt') as fileobj:
json_file_data = json.load(fileobj)
if (args[0] == '-b'):
recieved_mails = recieved_mails + len(json_file_data[args[1]])
expected_mails = expected_mails + count_of_students_in_batch[args[1]]
chart_data.append((file_name,len(json_file_data[args[1]]),count_of_students_in_batch[args[1]]))
elif (args[0] == '-i'):
sent = 0
for batch in json_file_data:
if (args[1] in json_file_data[batch]):
sent = 1
break
recieved_mails = recieved_mails + sent
expected_mails = expected_mails + 1
chart_data.append((file_name,sent,1))
elif (args[0] == '-a'):
sent = 0
for batch in json_file_data:
sent = sent + len(json_file_data[batch])
total_no_of_students_in_batches = 0
for batch in count_of_students_in_batch:
total_no_of_students_in_batches = total_no_of_students_in_batches + count_of_students_in_batch[batch]
recieved_mails = recieved_mails + sent
expected_mails = expected_mails + total_no_of_students_in_batches
chart_data.append((file_name,sent,total_no_of_students_in_batches))
except FileNotFoundError:
#send_admin_message(file_name+'.txt' + " is not there in the server!!!")
do_nothing = 1
start_date = start_date + datetime.timedelta(days=1)
summary_message = summary_message + str(recieved_mails) + " mails recieved," + str(expected_mails) + " were expected."
#Below we are checking what type of chart the user asked for
if (user_provided_chart_flag == ""):
if (args[2] != '-d'): #If -d flag is given the default option is pie and it cannot be changed
pie = False
if (args[0] == '-i'):
pie = True
else:
pie = True
else:
if (user_provided_chart_flag == "-line"):
pie = False
else:
pie = True
pic_url = draw_chart(chart_data,recieved_mails,expected_mails,pie)
summary_message = summary_message + " " + pic_url
send_message(chatId,summary_message)
def list_admin_methods(chatId):
message = " /grant_access *\n /view_accepted_users\n /send_message_to_all\n /add_mail\n /remove_mail\n /revoke_access\n /get_pending_requests\n"
send_message(chatId,message)
def send_help(request_data):
chat_id = str(request_data['message']['chat']['id'])
message = request_data['message']['text'].strip()
args = message.split(' ')
file = open('help.json','r')
help = json.load(file)
file.close()
if (len(args) == 1):
send_message(chat_id,'\n'.join(help['welcome_message']),False,False)
methods_help_message = ""
for method in help['methods_help']:
methods_help_message = methods_help_message + '\n'.join(help['methods_help'][method]) + '\n\n'
send_message(chat_id,methods_help_message,False,False)
send_message(chat_id,'\n'.join(help['warning_message']),False,False)
else:
args[1] = args[1].lower()
message = "The method " + args[1] +" is not a valid method. Please check the list of availabe methods"
if (args[1] in help['methods_help']):
message = '\n'.join(help['methods_help'][args[1]])
send_message(chat_id,message,False,False)
def handle_request(request_data):
message = request_data['message']['text']
chatId = str(request_data['message']['chat']['id'])
message = clean_message(message)
if ('username' not in request_data['message']['chat']):
send_message(chatId,"You do not have a telegram username please do create one in settings and then use this bot!!!")
return
if (message.startswith('/start') or message.startswith('/help')):
send_help(request_data)
elif (message.startswith('/follow')):
if (len(message) > 8):
emails = list(message[8:].split(' '))
follow(chatId,emails)
else:
message = "The /follow command requires some input to be given"
send_message(chatId,message)
elif (message.startswith('/unfollow')):
if (len(message) > 10):
emails = list(message[10:].split(' '))
unfollow(chatId,emails)
else:
message = "The /unfollow command requires some input to be given"
send_message(chatId,message)
elif (message.startswith('/unfollow_all')):
unfollow_all(chatId)
elif (message.startswith('/list_following')):
list_following(chatId)
elif (message.startswith('/request_access')):
request_access(request_data)
elif (message.startswith('/list_all_mails')):
list_all_mails(chatId)
elif (message.startswith('/statistics')):
if (len(message) == 11):
send_message(chatId,"No input given for statistics")
else:
list_statistics(chatId,message[11:])
elif (isadmin(chatId)):
if (message.startswith('/get_pending_requests')):
get_pending_requests(chatId)
elif (message.startswith('/grant_access ')):
usernames = list(message[14:].split(' '))
grant_access(chatId,usernames)
elif (message.startswith('/revoke_access ')):
usernames = list(message[15:].split(' '))
revoke_access(chatId,usernames)
elif (message.startswith('/add_mail ')):
mails = list(message[10:].split(' '))
add_mail(chatId,mails)
elif (message.startswith('/remove_mail ')):
mails = list(message[13:].split(' '))
remove_mail(chatId,mails)
elif (message.startswith('/view_accepted_users')):
view_accepted_users(chatId)
elif (message.startswith('/reset_webhook')):
reset_webhook(chatId)
elif (message.startswith('/list_admin_methods')):
list_admin_methods(chatId)
elif (message.startswith('/send_message_to_all')):
if (len(message) > 13):
message = message[13:]
send_message_to_all(message)
else:
send_message(chatId,"Blank Message Provided")
else:
message = "Invalid input"
send_message(chatId,message)
else:
message = "Invalid Input"
send_message(chatId,message)
values = {} #Defining an dictionary containing all the confidential data in global scope to make sure all functions get access to it
with open("values.json","r") as file:
values = json.load(file)
@app.route("/"+values['bot_token'], methods=['GET','POST'])
def webhook():
request_data = request.get_json()
chatId = ""
if ('message' in request_data):
try:
chatId = str(request_data['message']['chat']['id'])
except KeyError:
try:
chatId = str(request_data['message']['from']['id'])
except KeyError:
send_admin_message(str(request_data))
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
else:
return json.dumps({'success':True}), 200, {'ContentType':'application/json'} #As of now edited messages are not supported
if (not exists_in_db(chatId)):
add_entry_to_user_in_db(request_data)
handle_request(request_data)
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route("/"+values['bot_token']+'/files/<path:path>')
def get_file(path):
"""Download a file."""
UPLOAD_DIRECTORY = 'images'
return send_from_directory(UPLOAD_DIRECTORY, path, as_attachment=True)
if __name__ == '__main__':
os.chdir('/home/sandeshghanta/mysite/')
app.run(host='0.0.0.0',port="1234")
|
[
"sghanta05@gmail.com"
] |
sghanta05@gmail.com
|
7245dad299d3e839230546bf0a637f08d25cee06
|
155347f086810df5dcfdae2aceb691fcaf9286e8
|
/tools/train.py
|
7e8240d33c091d468a9c083e274e48b2a95d02c3
|
[
"Apache-2.0"
] |
permissive
|
Ixuanzhang/mmdet
|
8458c6def6b627218e6093cb7d1fcfe12232f2e1
|
a8f8397e49e3091f23262f8ace9641ee2c6be746
|
refs/heads/main
| 2022-12-27T15:03:51.696797
| 2020-10-09T12:11:20
| 2020-10-09T12:11:20
| 302,585,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,765
|
py
|
from __future__ import division
import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv import Config
from mmcv.runner import init_dist
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument(
'--resume_from', help='the checkpoint file to resume from')
parser.add_argument(
'--validate',
action='store_true',
help='whether to evaluate the checkpoint during training')
parser.add_argument(
'--gpus',
type=int,
default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([('{}: {}'.format(k, v))
for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}, deterministic: {}'.format(
args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__,
config=cfg.text,
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
prune_config,
distributed=distributed,
validate=args.validate,
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
|
[
"919747402@qq.com"
] |
919747402@qq.com
|
ce66b11d446572509c9d39ceaeacd60871c5eb81
|
d5697fea4c5a31ac5deb25691b1c59e041b9a177
|
/Data_analysis/MI_analysis.py
|
065b912bfc49aec6a2597d2fa505dc1032721195
|
[] |
no_license
|
YGJW27/CV-pytorch
|
63b26282f129e4198fd68747505091c26f799c48
|
d5f549507e5af2783ed12cef5d7e312d57d6bb81
|
refs/heads/master
| 2021-06-11T11:31:27.323854
| 2021-03-30T13:11:38
| 2021-03-30T13:11:38
| 154,324,519
| 1
| 0
| null | 2019-12-11T01:39:25
| 2018-10-23T12:30:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,609
|
py
|
import os
import glob
import pandas as pd
import networkx as nx
import numpy as np
import torch
import scipy.stats
import matplotlib.pyplot as plt
from sklearn.preprocessing import MaxAbsScaler, KBinsDiscretizer
import warnings
warnings.filterwarnings("ignore")
DATA_PATH = "D:/code/DTI_data/network_FN/"
def data_list(sample_path):
sub_dirs = [x[0] for x in os.walk(sample_path)]
sub_dirs.pop(0)
data_list = []
for sub_dir in sub_dirs:
file_list = []
dir_name = os.path.basename(sub_dir)
file_glob = os.path.join(sample_path, dir_name, '*')
file_list.extend(glob.glob(file_glob))
for file_name in file_list:
data_list.append([file_name, dir_name])
return np.array(data_list)
class MRI_Dataset(torch.utils.data.Dataset):
def __init__(self, data_list):
self.data_list = data_list
def __getitem__(self, idx):
filepath, target = self.data_list[idx][0], int(self.data_list[idx][1])
dataframe = pd.read_csv(filepath, sep="\s+", header=None)
pic = dataframe.to_numpy()
return pic, target, idx
def __len__(self):
return len(self.data_list)
filelist = data_list(DATA_PATH)
dataset = MRI_Dataset(filelist)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1000, shuffle=False)
for data, target, idx in dataloader:
x = data.numpy()
y = target.numpy()
idx = idx.numpy()
x = x.reshape(x.shape[0], -1)
# x = x[:, np.any(x, axis=0)]
def Shannon_entropy(A):
unique, counts = np.unique(A, return_counts=True)
p = counts/counts.sum()
ent = -np.sum(p * np.log2(p))
return ent
def mutual_information(A, B):
H_A = Shannon_entropy(A)
unique, counts= np.unique(B, return_counts=True)
H_A1B = 0
for idx, status in enumerate(unique):
H_A1B += Shannon_entropy(A[B==status]) * counts[idx]/counts.sum()
MI_AB = H_A - H_A1B
return MI_AB
est = KBinsDiscretizer(n_bins=10, encode='ordinal', strategy='quantile')
est.fit(x)
x_d = est.transform(x)
MI_array = np.zeros(x_d.shape[1])
for i, e in enumerate(MI_array):
MI_array[i] = Shannon_entropy(x_d[:, i])
MI_array = MI_array.reshape(data.shape[1], -1)
MI_array[MI_array>3] = 0
G_MI = nx.from_numpy_array(MI_array)
from sklearn.svm import SVC, LinearSVC
from sklearn.model_selection import KFold
from sklearn.preprocessing import MaxAbsScaler
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# seed = 1
# cv = 20
# kf = KFold(n_splits=cv, shuffle=True, random_state=seed)
# acc_sum = 0
# for idx, (train_idx, test_idx) in enumerate(kf.split(dataset)):
# x_t = x[train_idx]
# y_t = y[train_idx]
# est = KBinsDiscretizer(n_bins=10, encode='ordinal', strategy='quantile')
# est.fit(x_t)
# x_d = est.transform(x_t)
# MI_array = np.zeros(x_d.shape[1])
# for i, e in enumerate(MI_array):
# MI_array[i] = Shannon_entropy(x_d[:, i])
# x_d = x[:, MI_array>2.8]
# ynew_train = y[train_idx]
# ynew_test = y[test_idx]
# # Norm
# scaler = MaxAbsScaler()
# scaler.fit(x_d[train_idx])
# xnew_train = scaler.transform(x_d[train_idx])
# xnew_test = scaler.transform(x_d[test_idx])
# print(xnew_train.shape[1])
# # SVC
# svc = SVC(kernel='rbf', random_state=1, gamma=0.01, C=10)
# model = svc.fit(xnew_train, ynew_train)
# predict = model.predict(xnew_test)
# correct = np.sum(predict == ynew_test)
# accuracy = correct / test_idx.size
# print("cv: {}/{}, acc.: {:.1f}\n".format(idx, cv, accuracy*100))
# acc_sum += accuracy
# print("total acc.: {:.1f}\n".format(acc_sum / cv * 100))
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
node_path = 'D:/code/DTI_data/network_distance/AAL_90.node'
nodes = pd.read_csv(node_path, sep=' ', header=None)
nodes = nodes.iloc[:, 0:3]
avg = torch.mean(data, axis=0).numpy()
G = nx.from_numpy_array(avg)
pos = dict(zip(range(nodes.shape[0]), [list(row) for row in nodes.to_numpy()]))
nx.set_node_attributes(G, pos, 'coord')
nx.set_node_attributes(G_MI, pos, 'coord')
from mayavi import mlab
def draw_network(G):
mlab.clf()
pos = np.array([pos for key, pos in G.nodes('coord')])
pts = mlab.points3d(pos[:, 0], pos[:, 1], pos[:, 2], resolution=20, scale_factor=5)
pts.mlab_source.dataset.lines = np.array([row for row in G.edges(data='weight')])
tube = mlab.pipeline.tube(pts, tube_radius=0.5)
mlab.pipeline.surface(tube)
mlab.show()
draw_network(G_MI)
|
[
"sheart2v@outlook.com"
] |
sheart2v@outlook.com
|
4733c4fada16561b0df6c63c121b491434349d75
|
d3df80cdb8fff177aae5b6b2ef4fcb8a5d67dab8
|
/run.py
|
3837044b961ae841e19790f0708cafc242d52702
|
[] |
no_license
|
connormahern/Caled
|
001aeac88f276e3f8c291b42205d2376222fd8a8
|
7fce7e9c45ec338d50a3e3dfef4cec9e56124f29
|
refs/heads/master
| 2023-04-07T02:16:37.201410
| 2021-04-15T18:31:58
| 2021-04-15T18:31:58
| 342,975,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
import os
from main import create_app
config_name = os.getenv('FLASK_CONFIG')
app = create_app(config_name)
if __name__ == '__main__':
app.run()
|
[
"cjmahern@iu.edu"
] |
cjmahern@iu.edu
|
0b6a85e3dbcc6b4859a504f53d7a75018557849e
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/google/cloud/dialogflowcx_v3beta1/services/pages/async_client.py
|
440ca336ad8b82928c46637a73125a29c4707dba
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,354
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflowcx_v3beta1.services.pages import pagers
from google.cloud.dialogflowcx_v3beta1.types import fulfillment
from google.cloud.dialogflowcx_v3beta1.types import page
from google.cloud.dialogflowcx_v3beta1.types import page as gcdc_page
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from .transports.base import PagesTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import PagesGrpcAsyncIOTransport
from .client import PagesClient
class PagesAsyncClient:
"""Service for managing
[Pages][google.cloud.dialogflow.cx.v3beta1.Page].
"""
_client: PagesClient
DEFAULT_ENDPOINT = PagesClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = PagesClient.DEFAULT_MTLS_ENDPOINT
entity_type_path = staticmethod(PagesClient.entity_type_path)
parse_entity_type_path = staticmethod(PagesClient.parse_entity_type_path)
flow_path = staticmethod(PagesClient.flow_path)
parse_flow_path = staticmethod(PagesClient.parse_flow_path)
intent_path = staticmethod(PagesClient.intent_path)
parse_intent_path = staticmethod(PagesClient.parse_intent_path)
page_path = staticmethod(PagesClient.page_path)
parse_page_path = staticmethod(PagesClient.parse_page_path)
transition_route_group_path = staticmethod(PagesClient.transition_route_group_path)
parse_transition_route_group_path = staticmethod(PagesClient.parse_transition_route_group_path)
webhook_path = staticmethod(PagesClient.webhook_path)
parse_webhook_path = staticmethod(PagesClient.parse_webhook_path)
common_billing_account_path = staticmethod(PagesClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(PagesClient.parse_common_billing_account_path)
common_folder_path = staticmethod(PagesClient.common_folder_path)
parse_common_folder_path = staticmethod(PagesClient.parse_common_folder_path)
common_organization_path = staticmethod(PagesClient.common_organization_path)
parse_common_organization_path = staticmethod(PagesClient.parse_common_organization_path)
common_project_path = staticmethod(PagesClient.common_project_path)
parse_common_project_path = staticmethod(PagesClient.parse_common_project_path)
common_location_path = staticmethod(PagesClient.common_location_path)
parse_common_location_path = staticmethod(PagesClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PagesAsyncClient: The constructed client.
"""
return PagesClient.from_service_account_info.__func__(PagesAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PagesAsyncClient: The constructed client.
"""
return PagesClient.from_service_account_file.__func__(PagesAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> PagesTransport:
"""Return the transport used by the client instance.
Returns:
PagesTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(PagesClient).get_transport_class, type(PagesClient))
def __init__(self, *,
credentials: credentials.Credentials = None,
transport: Union[str, PagesTransport] = 'grpc_asyncio',
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the pages client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.PagesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = PagesClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_pages(self,
request: page.ListPagesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPagesAsyncPager:
r"""Returns the list of all pages in the specified flow.
Args:
request (:class:`google.cloud.dialogflowcx_v3beta1.types.ListPagesRequest`):
The request object. The request message for
[Pages.ListPages][google.cloud.dialogflow.cx.v3beta1.Pages.ListPages].
parent (:class:`str`):
Required. The flow to list all pages for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.services.pages.pagers.ListPagesAsyncPager:
The response message for
[Pages.ListPages][google.cloud.dialogflow.cx.v3beta1.Pages.ListPages].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = page.ListPagesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_pages,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListPagesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_page(self,
request: page.GetPageRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> page.Page:
r"""Retrieves the specified page.
Args:
request (:class:`google.cloud.dialogflowcx_v3beta1.types.GetPageRequest`):
The request object. The request message for
[Pages.GetPage][google.cloud.dialogflow.cx.v3beta1.Pages.GetPage].
name (:class:`str`):
Required. The name of the page. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/pages/<Page ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.Page:
A Dialogflow CX conversation (session) can be described and visualized as a
state machine. The states of a CX session are
represented by pages.
For each flow, you define many pages, where your
combined pages can handle a complete conversation on
the topics the flow is designed for. At any given
moment, exactly one page is the current page, the
current page is considered active, and the flow
associated with that page is considered active. Every
flow has a special start page. When a flow initially
becomes active, the start page page becomes the
current page. For each conversational turn, the
current page will either stay the same or transition
to another page.
You configure each page to collect information from
the end-user that is relevant for the conversational
state represented by the page.
For more information, see the [Page
guide](\ https://cloud.google.com/dialogflow/cx/docs/concept/page).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = page.GetPageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_page,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('name', request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def create_page(self,
request: gcdc_page.CreatePageRequest = None,
*,
parent: str = None,
page: gcdc_page.Page = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_page.Page:
r"""Creates a page in the specified flow.
Args:
request (:class:`google.cloud.dialogflowcx_v3beta1.types.CreatePageRequest`):
The request object. The request message for
[Pages.CreatePage][google.cloud.dialogflow.cx.v3beta1.Pages.CreatePage].
parent (:class:`str`):
Required. The flow to create a page for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
page (:class:`google.cloud.dialogflowcx_v3beta1.types.Page`):
Required. The page to create.
This corresponds to the ``page`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.Page:
A Dialogflow CX conversation (session) can be described and visualized as a
state machine. The states of a CX session are
represented by pages.
For each flow, you define many pages, where your
combined pages can handle a complete conversation on
the topics the flow is designed for. At any given
moment, exactly one page is the current page, the
current page is considered active, and the flow
associated with that page is considered active. Every
flow has a special start page. When a flow initially
becomes active, the start page page becomes the
current page. For each conversational turn, the
current page will either stay the same or transition
to another page.
You configure each page to collect information from
the end-user that is relevant for the conversational
state represented by the page.
For more information, see the [Page
guide](\ https://cloud.google.com/dialogflow/cx/docs/concept/page).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, page])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = gcdc_page.CreatePageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if page is not None:
request.page = page
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_page,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_page(self,
request: gcdc_page.UpdatePageRequest = None,
*,
page: gcdc_page.Page = None,
update_mask: field_mask.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_page.Page:
r"""Updates the specified page.
Args:
request (:class:`google.cloud.dialogflowcx_v3beta1.types.UpdatePageRequest`):
The request object. The request message for
[Pages.UpdatePage][google.cloud.dialogflow.cx.v3beta1.Pages.UpdatePage].
page (:class:`google.cloud.dialogflowcx_v3beta1.types.Page`):
Required. The page to update.
This corresponds to the ``page`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The mask to control which fields get
updated. If the mask is not present, all
fields will be updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.Page:
A Dialogflow CX conversation (session) can be described and visualized as a
state machine. The states of a CX session are
represented by pages.
For each flow, you define many pages, where your
combined pages can handle a complete conversation on
the topics the flow is designed for. At any given
moment, exactly one page is the current page, the
current page is considered active, and the flow
associated with that page is considered active. Every
flow has a special start page. When a flow initially
becomes active, the start page page becomes the
current page. For each conversational turn, the
current page will either stay the same or transition
to another page.
You configure each page to collect information from
the end-user that is relevant for the conversational
state represented by the page.
For more information, see the [Page
guide](\ https://cloud.google.com/dialogflow/cx/docs/concept/page).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([page, update_mask])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = gcdc_page.UpdatePageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if page is not None:
request.page = page
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_page,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('page.name', request.page.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_page(self,
request: page.DeletePageRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified page.
Args:
request (:class:`google.cloud.dialogflowcx_v3beta1.types.DeletePageRequest`):
The request object. The request message for
[Pages.DeletePage][google.cloud.dialogflow.cx.v3beta1.Pages.DeletePage].
name (:class:`str`):
Required. The name of the page to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/Flows/<flow ID>/pages/<Page ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = page.DeletePageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_page,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('name', request.name),
)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-dialogflowcx',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
'PagesAsyncClient',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
40baaa3338fe05e60770d2d4e7e16777ddef95b6
|
6a6ecc0cd30257ef5fc08353a6ffecc9b5ee2ac4
|
/manage.py
|
f0a047c4a97c2ba552198ff1df0fc6063c38a9d8
|
[
"MIT"
] |
permissive
|
rozap/aircooledrescue
|
d144f5beb1d3be4c031f7722eaa4c00ae0b60a4a
|
95c0fec2a8452205019db7e721a7da1fcc4d5507
|
refs/heads/master
| 2021-01-02T04:53:16.111189
| 2014-09-23T02:42:18
| 2014-09-23T02:42:18
| 22,563,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "buspeople.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"chrisd1891@gmail.com"
] |
chrisd1891@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.