blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f96c49a29500bb4d3ece3d0ed3f017aaf3d77cf
|
451460fae427477bdd94ca98131fe896b150f7a6
|
/VideoSummarization-master/predict.py
|
f54f33c0a8668bdbc980a4b42ade26c04ebc9a20
|
[] |
no_license
|
sundar-paul/VideoSummarization-master
|
b751a91015412c10ef14258b8cdb4ae50b2edcb9
|
b9f658e6b7efc0e73723d426c33a5aeb5bd262f8
|
refs/heads/master
| 2022-12-12T15:54:31.061301
| 2020-02-14T12:22:22
| 2020-02-14T12:22:22
| 240,499,633
| 0
| 0
| null | 2022-12-08T03:36:51
| 2020-02-14T12:06:49
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
import imageio
from utils import *
from model import get_model,get_vgg_model
import re
import numpy as np
from env_variable import *
def extract_feature_single(video_path):
model = get_model(mode="test")
feature_extractor = get_vgg_model()
videodata = read_single_video(video_path)
videodata = resize_image(videodata)
videodata = pad_data(videodata).squeeze()
feature = extract_feature(videodata,feature_extractor)
output= model.predict(np.expand_dims(feature,0)).squeeze()
print(output)
return np.nonzero(output>=0.5)[0]
def get_summarized_video(video_path,jupyter=None):
video_id = re.findall(r'\d+', video_path)[0]
key_frame = extract_feature_single(video_path)
key_frame = key_frame + 1
print("important frames",key_frame)
gifs =[]
for i in os.listdir(allframe_directory+"/"+video_id):
frame_id = int(re.findall(r'\d+', i)[0])
if frame_id in key_frame:
filename = allframe_directory+"/"+video_id+"/"+i
print(filename)
gifs.append( imageio.imread(filename))
imageio.mimsave('./results/result video.gif', gifs)
if __name__=="__main__":
print("prediction started")
path= "/media/vinodarjun/Storage/deeplearning Projects/computer vision/summary/dataset/videos/1.mp4"
get_summarized_video(path)
print("summary video saved in results folder")
|
[
"noreply@github.com"
] |
sundar-paul.noreply@github.com
|
f86419f5e1ab1cc1e28ca282380f2e27842b7486
|
564616220919877203922adc34e98bb137ed5680
|
/src/bot/bot.py
|
033899fecca3e3ad16f6e076117a6a00e48bb50a
|
[] |
no_license
|
Fumaz/LyricsBot
|
264485f95aaaf27f695645c777176661af97d212
|
964a16c8a9ce62ef1550b78fbe1fe37cbc5a8667
|
refs/heads/master
| 2023-07-25T21:45:07.968888
| 2021-09-07T17:08:32
| 2021-09-07T17:08:32
| 341,815,884
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
from pyrogram import Client
from . import config
from .db import models
client = Client(session_name=config.SESSION_NAME,
api_id=config.API_ID,
api_hash=config.API_HASH,
bot_token=config.BOT_TOKEN,
plugins=dict(root=config.PLUGINS_FOLDER),
workers=8)
def run():
models.setup()
client.run()
|
[
"mail@fumaz.dev"
] |
mail@fumaz.dev
|
ebdf0537fcfac3b3f8d113d97fe4078e961967ca
|
d72dd4af40e7f60df15d22c25df124f552a551b5
|
/values.py
|
0a4be4a55c3e7794e66556bf52c087230955f514
|
[] |
no_license
|
smrnmakhija/Ciser
|
80ecfd2d2499723802a76041e93fd02c4f76b841
|
8f610fe701b60ad9351e80d702010033597212a9
|
refs/heads/master
| 2021-09-10T14:12:25.483389
| 2018-03-27T16:19:27
| 2018-03-27T16:19:27
| 108,571,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
import random as rnd
import numpy
#bet,eps,ro,gam,sigma,wa,taw,mu=0.6,0.084,0.95,0.1,0.0714,0.0588, 9.1324e-4,6.8493e-5
BET_MIN=1
BET_MAX=1.01
EPS_MIN=0.1
EPS_MAX=1
RHO_MIN=0.9
RHO_MAX=0.95
MU_MIN=5.4795e-5
MU_MAX=6.0883e-5
GAM_MIN=0.0714
GAM_MAX=0.1
SIG_MIN=0.0357
SIG_MAX=0.1429
TAW_MIN=9.1324e-4
TAW_MAX=0.0014
OMEG_MIN=0.0333
OMEG_MAX=0.0588
dataset = []
for bet in numpy.arange(BET_MIN, BET_MAX, 0.01):
for eps in numpy.arange(EPS_MIN, EPS_MAX, 0.01):
for ro in numpy.arange(RHO_MIN, RHO_MAX, 0.001):
for mu in numpy.arange(MU_MIN, MU_MAX, 0.0001):
for gam in numpy.arange(GAM_MIN, GAM_MAX, 0.001):
for sigma in numpy.arange(SIG_MIN, SIG_MAX, 0.001):
for taw in numpy.arange(TAW_MIN, TAW_MAX, 0.0001):
for wa in numpy.arange(OMEG_MIN, OMEG_MAX, 0.01):
R0=bet*sigma*((taw+mu)+eps*ro*gam)/((sigma+mu)*(gam+mu)*(taw+mu))
#if R0 < 1:
print(bet, ",", eps, ",", ro, ",", mu, ",", gam, ",", sigma, ",", taw, ",",wa, ",", R0)
datarow = [bet, eps, ro, mu, gam, sigma, taw, R0]
dataset.append(datarow)
|
[
"noreply@github.com"
] |
smrnmakhija.noreply@github.com
|
3cd0c9805c1dfb9762cfea9de878c47f9f967626
|
851d1f57a992b69d15f0f5191aca433b507d20ef
|
/funcoes_def/velocidade_media.py
|
a093b841b2e5439fd04655434feab0e3cc578fed
|
[] |
no_license
|
brgermano/estudo_python
|
af12d1c2e808c62c18088c82dce13c798cc8bb61
|
28664a3298e5707db756e1afb65e232038f65881
|
refs/heads/main
| 2023-05-28T09:50:51.662686
| 2021-06-01T23:30:29
| 2021-06-01T23:30:29
| 355,721,908
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
#Função que calcula a velocidade média
def calcularVelocidadeMedia(distancia, tempo):
#calcular a velocidade média
velocidade_media = distancia/tempo
#exibir o resultado
print("A velocidade média é {} km/h".format(velocidade_media))
#aqui começa o programa principal
dist = float(input("Informe a distância"))
temp = float(input("Informe o tempo"))
#chamando a função com valores definidos pelo usuário
calcularVelocidadeMedia(dist, temp)
#chamando a função com valores definidos pelo programador
calcularVelocidadeMedia(15,2)
|
[
"brunoalvesgermano@gmail.com"
] |
brunoalvesgermano@gmail.com
|
379647d2a87d46df07f34d2423a3f858627dcab4
|
0f7a52c838ec76fefff058a33737fbfacb168e9a
|
/read_csv.py
|
549b06aaa86fbd77ce26d9d66363d62605485cd0
|
[] |
no_license
|
palla714/my-repo
|
8c76f22bbf58c3d3af050eeb6c32d48b3d073591
|
4fb9e03d9b572d96ba15226fdbcb6d6ea64b4e5c
|
refs/heads/master
| 2021-11-23T08:48:19.379907
| 2021-06-30T19:31:16
| 2021-06-30T19:31:16
| 199,654,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
import csv
with open('file1.csv','r') as csv_file:
reader = csv.reader(csv_file)
#next(reader)
for line in reader:
print(line)
|
[
"das.pallavi355@gmail.com"
] |
das.pallavi355@gmail.com
|
9238c3ab30fc51de5c4783e6192f517d7c209557
|
ff0dc4533cfacada33b410470ef17846c70ccb5a
|
/shop/shop/settings.py
|
e5c48d8f5a1b8c3f3dd1c11b473a381f3476fa00
|
[] |
no_license
|
km123abca/django-shop
|
170c9141e0fc5caff3fabd9d7fec8b671747a789
|
097cfccfabb57b0de142f484d74a0781ac519a35
|
refs/heads/master
| 2023-08-03T07:34:05.474977
| 2020-07-11T12:43:32
| 2020-07-11T12:43:32
| 270,274,812
| 0
| 0
| null | 2021-09-22T19:22:35
| 2020-06-07T10:47:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,361
|
py
|
"""
Django settings for shop project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&dfbkfu=$$(w&1$ek%$%-%5bmqjn8yxq!&!exdlec@dowca!qf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'billing',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'shop',
'HOST':'127.0.0.1',
'PORT':'3306',
'USER': 'root',
'PASSWORD': 'sonja',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"km123abca"
] |
km123abca
|
8cbadcfd4408fc8969b97069613353f6f16b1d5a
|
985336505d59daeb0b543f246199a6deef9f4d64
|
/lucien/__init__.py
|
7f822805d208c95a59ed12c4a323f4791334b0a3
|
[] |
no_license
|
sholiday/lucien-python-client
|
776978ec1202502e9ed65ba8f8ad6dc25b65bb39
|
8eda60e0aafee647a0ecd0638abcee6c4489d2b6
|
refs/heads/master
| 2021-01-10T21:43:43.108002
| 2011-12-29T03:02:50
| 2011-12-29T03:02:50
| 3,066,597
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 44
|
py
|
__all__ = ['ttypes', 'constants', 'Lucien']
|
[
"stephen.holiday@gmail.com"
] |
stephen.holiday@gmail.com
|
924b5c4a0e2cf3d8dfc8aecfabb65d2b9f5bde9e
|
2b3e08faaa4edb548ef9bd386247f35f50a06766
|
/djangoProject1/djangoProject1/settings.py
|
4d5c51c1b17d9b76ad05eaa428573e4d2cb9dee6
|
[] |
no_license
|
aliciawill/pythonproject
|
e495d3a9e5d65768a69ea6ac01ff559e7112dd75
|
f6d166c366522f0e3c5c74fdd11ca7a7b5489ee1
|
refs/heads/master
| 2023-07-08T04:39:59.597131
| 2021-08-22T05:15:08
| 2021-08-22T05:15:08
| 360,455,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,420
|
py
|
"""
Django settings for djangoProject1 project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-vg(5k3xwlb=qkgj#%#h92bzkb&)sss0*9(d7(_=h)lx3no$7&3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoProject1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoProject1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"noreply@github.com"
] |
aliciawill.noreply@github.com
|
0028b042057bd74a52591f222c35959cadf416b9
|
52025f56d547b5f602e451b32e75057a4e893928
|
/ziphon/forms.py
|
fbbefd3a509c98b23cc071527173bebad690236f
|
[] |
no_license
|
deone/thedebaters
|
a15a7323190ac9eb7b9b8570a192a03e13ae0a2f
|
e56d981d2ac7268c07cebf93d8e11230fb8840e3
|
refs/heads/master
| 2021-03-12T19:59:21.814716
| 2010-05-21T09:11:33
| 2010-05-21T09:11:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,011
|
py
|
from datetime import date
from django import forms
from django.conf import settings
from thedebaters.ziphon.models import Person
class DateOfBirthField(forms.Field):
def clean(self, value):
if not value:
raise forms.ValidationError("Please enter your date of birth")
try:
r_date = value.split("-")
this_year = date.today().year
if int(r_date[2]) < this_year-settings.PARTICIPANT_AGE_LIMIT or int(r_date[2]) > this_year:
raise forms.ValidationError("You are too old to participate")
date_of_birth = date(int(r_date[2]), int(r_date[1]), int(r_date[0]))
return date_of_birth
except Exception, e:
raise forms.ValidationError("Your date of birth is wrong or you're too old to participate")
class PersonForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(PersonForm, self).__init__(*args, **kwargs)
self.fields['date_of_birth'] = DateOfBirthField(help_text="Format: DD-MM-YYYY")
class Meta:
model = Person
def save(self):
first_name = self.cleaned_data["first_name"]
last_name = self.cleaned_data["last_name"]
sex = self.cleaned_data["sex"]
home_phone = self.cleaned_data["home_phone"]
office_phone = self.cleaned_data["office_phone"]
mobile_phone = self.cleaned_data["mobile_phone"]
email = self.cleaned_data["email"]
address = self.cleaned_data["address"]
date_of_birth = self.cleaned_data["date_of_birth"]
state_of_origin = self.cleaned_data["state_of_origin"]
relationship_status = self.cleaned_data["relationship_status"]
occupation = self.cleaned_data["occupation"]
hobbies = self.cleaned_data["hobbies"]
new_person = Person.objects.create(first_name=first_name, \
last_name=last_name, sex=sex, \
home_phone=home_phone, office_phone=office_phone, \
mobile_phone=mobile_phone, email=email, address=address, \
date_of_birth=date_of_birth, state_of_origin=state_of_origin, \
relationship_status=relationship_status, \
occupation=occupation, hobbies=hobbies)
new_person.save()
return new_person
|
[
"alwaysdeone@gmail.com"
] |
alwaysdeone@gmail.com
|
4cb68d4167c5f99d64445b38aec91c682c1d41f4
|
ce00c3f8e2a7aeb0a79b9ce551ecd53c9a4a8d88
|
/Abhi.py
|
2c9b1eca3cf75a3b0a0011a489f8022579acb514
|
[] |
no_license
|
abhi330/hand-gestured-mouse-controller
|
d25cbb11f98cde6af80188787d20e681f7c37109
|
6bdbf6c66c68bd272b74156c7e684f893ea5cbb0
|
refs/heads/master
| 2022-04-14T00:55:40.110777
| 2020-04-10T07:09:30
| 2020-04-10T07:09:30
| 254,568,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,027
|
py
|
import cv2
import numpy as np
from pynput.mouse import Button, Controller
import wx
mouse=Controller()
app=wx.App(False)
(sx,sy)=wx.GetDisplaySize()
(camx,camy)=(640,480)
lowerBound=np.array([33,80,40])
upperBound=np.array([102,255,255])
cam= cv2.VideoCapture(0)
kernelOpen=np.ones((5,5))
kernelClose=np.ones((20,20))
pinchFlag=0
while True:
ret, img=cam.read()
img=cv2.resize(img,(640,480))
#convert BGR to HSV
imgHSV= cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
# create the Mask
mask=cv2.inRange(imgHSV,lowerBound,upperBound)
#morphology
maskOpen=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernelOpen)
maskClose=cv2.morphologyEx(maskOpen,cv2.MORPH_CLOSE,kernelClose)
maskFinal=maskClose
conts,h=cv2.findContours(maskFinal.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
if(len(conts)==2):
if(pinchFlag==1):
pinchFlag=0
mouse.release(Button.left)
x1,y1,w1,h1=cv2.boundingRect(conts[0])
x2,y2,w2,h2=cv2.boundingRect(conts[1])
cv2.rectangle(img,(x1,y1),(x1+w1,y1+h1),(255,0,0),2)
cv2.rectangle(img,(x2,y2),(x2+w2,y2+h2),(255,0,0),2)
cx1=int(x1+w1/2)
cy1=int(y1+h1/2)
cx2=int(x2+w2/2)
cy2=int(y2+h2/2)
cx=int((cx1+cx2)/2)
cy=int((cy1+cy2)/2)
cv2.line(img, (cx1,cy1),(cx2,cy2),(255,0,0),2)
cv2.circle(img, (cx,cy),2,(0,0,255),2)
mouseLoc=(sx-(cx*sx/camx), cy*sy/camy)
mouse.position=mouseLoc
while mouse.position!=mouseLoc:
pass
elif(len(conts)==1):
x,y,w,h=cv2.boundingRect(conts[0])
if(pinchFlag==0):
pinchFlag=1
mouse.press(Button.left)
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cx=int(x+w/2)
cy=int(y+h/2)
cv2.circle(img,(cx,cy),int((w+h)/4),(0,0,255),2)
mouseLoc=(sx-(cx*sx/camx), cy*sy/camy)
mouse.position=mouseLoc
while mouse.position!=mouseLoc:
pass
cv2.imshow("cam",img)
cv2.waitKey(5)
|
[
"noreply@github.com"
] |
abhi330.noreply@github.com
|
e675e5aa191195322c959abaabe6b8820ce8d582
|
751d898301143adbc0b34bdc3bcc600a57b5b19f
|
/Code/src/Main.py
|
077cae765538fecc21146261094f8acfde573507
|
[] |
no_license
|
LuatVu/Hand-written-digits
|
dbf48d4aaff6ddd538b46d1b9ae13e810cc11b4c
|
ee8f4c6630f88ded90aad6cedcbc2bf82be2ea46
|
refs/heads/master
| 2021-04-12T09:12:01.037967
| 2018-04-23T14:55:27
| 2018-04-23T14:55:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,012
|
py
|
import sys
import os
import numpy as np
# sys.path.append('./src/')
sys.path.append('../fig/')
import mnist_loader
import network
import mnist
import gzip
import network2
import json
import random
temp1, temp2, image_tests = mnist_loader.load_data()
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
# create model by network1 module
def create_model_1():
net = network.Network([784, 30, 10])
return net
# training model by network1 module
def training_model_1(net):
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
# create model by network_2 module
def create_model_2():
net = network2.Network([784, 30, 10], cost = network2.CrossEntropyCost)
return net
# training model by network_2 module
def training_model_2(net):
net.SGD(training_data, 30, 10, 0.5,
lmbda= 5.0,
evaluation_data=validation_data,
monitor_evaluation_accuracy=True,
monitor_evaluation_cost=True,
monitor_training_accuracy=True,
monitor_training_cost=True)
def predict(net, image): #image is a numpy array 784x1 direction
result = net.feedforward(image)
result = np.argmax(result)
return result
def plot_digit_image(image):
#image argument is one image
mnist.plot_mnist_digit(image)
def get_image(image_tests): # return a image set after flattened.
flattened_images = image_tests[0]
return [np.reshape(f, (-1, 28)) for f in flattened_images]
image_tests = get_image(image_tests)
datas = zip(image_tests, test_data)
def save_Model_1(net, filename):
data = {
"sizes": net.sizes,
"weights": [w.tolist() for w in net.weights ],
"biases": [b.tolist() for b in net.biases],
}
f = open(filename, "w")
json.dump(data, f)
f.close()
def save_Model_2(net, filename):
net.save(filename)
def load_1(filename):
f = open(filename, "r")
data = json.load(f)
f.close()
net = network.Network(data["sizes"])
net.weights = [np.array(w) for w in data["weights"]]
net.biases = [np.array(b) for b in data["biases"]]
return net
def load_2(filename):
net = network2.load(filename)
return net
# def main():
# net = load_1("./data/Network1.bin")
# for x, y in datas:
# print "Predicted Result: ", np.argmax( net.feedforward(y[0]) )
# plot_digit_image(x)
# try:
# input ("Press enter to continue...")
# except SyntaxError:
# pass
# os.system('clear')
# def main():
# net = create_model_2()
# training_model_2(net)
# save_Model_2(net,"./data/Network2.bin")
def main():
net = load_2("../data/Network2.bin")
for x, y in datas:
result = net.feedforward(y[0])
print "Result: \n", result
print "Digit: ", np.argmax( result )
plot_digit_image(x)
try:
input("Press enter to continue...")
except SyntaxError:
pass
os.system("clear")
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
LuatVu.noreply@github.com
|
993a9811bc7a0b0ae28573e2d345504dde81a6c9
|
200f61d4536406c706694c8efd931eb1febd28d6
|
/leetcode/dc/lc_315.py
|
f6b1bcabb7952c36c3f9bacc6fd883a16e2f15f6
|
[] |
no_license
|
shoppon/leetcode
|
ba6747125b53f2229d6db41372b5959344387715
|
193c27273eeacc7ceb159154fd87f7d7d9a70ae0
|
refs/heads/master
| 2022-04-25T19:01:18.701344
| 2020-04-23T16:31:35
| 2020-04-23T16:31:35
| 255,083,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,357
|
py
|
from typing import List
class Solution:
def countSmaller1(self, nums: List[int]) -> List[int]:
n = len(nums)
ans = []
mins = []
maxs = []
min_v = float('inf')
max_v = float('-inf')
for i in range(n-1, -1, -1):
min_v = min(min_v, nums[i])
max_v = max(max_v, nums[i])
mins.insert(0, min_v)
maxs.insert(0, max_v)
for i in range(n):
count = 0
for j in range(i+1, n):
# 比最小的还小,肯定没有比他更小的了
if nums[i] < mins[j]:
break
# 比最大的还大,那没必须再遍历了
if nums[i] > maxs[j]:
count += (n-j)
break
if nums[j] < nums[i]:
count += 1
ans.append(count)
return ans
def countSmaller(self, nums: List[int]) -> List[int]:
size = len(nums)
if size == 0:
return []
if size == 1:
return [0]
tmp = [None] * size
indexes = [i for i in range(size)]
ans = [0] * size
self.merge_sort(nums, 0, size - 1, tmp, indexes, ans)
return ans
def merge_sort(self, nums, left, right, tmp, indexes, ans):
if left == right:
return
mid = left + (right - left) // 2
self.merge_sort(nums, left, mid, tmp, indexes, ans)
self.merge_sort(nums, mid + 1, right, tmp, indexes, ans)
if nums[indexes[mid]] <= nums[indexes[mid + 1]]:
return
self.merge(nums, left, mid, right, tmp, indexes, ans)
def merge(self, nums, left, mid, right, tmp, indexes, ans):
for i in range(left, right + 1):
tmp[i] = indexes[i]
l = left
r = mid + 1
for i in range(left, right + 1):
if l > mid:
indexes[i] = tmp[r]
r += 1
elif r > right:
indexes[i] = tmp[l]
l += 1
ans[indexes[i]] += (right - mid)
elif nums[tmp[l]] <= nums[tmp[r]]:
indexes[i] = tmp[l]
l += 1
ans[indexes[i]] += (r - mid - 1)
else:
indexes[i] = tmp[r]
r += 1
|
[
"shopppon@gmail.com"
] |
shopppon@gmail.com
|
5a622fa1d137fb50af46a7f4200d7f165bb4b736
|
09895d4ae31e91b371b005ab1cc41dc8700f7810
|
/SSCClient/core.py
|
1958ff9192466fd3b237185e585f2d9a0bc884f0
|
[
"MIT"
] |
permissive
|
qweraqq/SimplePythonFortifyWrapper
|
1ec6984264455a4c79e5fb5128f0a6af1d95febc
|
fb2bae56b93f367264223657315775a1951fa091
|
refs/heads/master
| 2022-10-23T15:41:42.494200
| 2020-06-10T07:06:00
| 2020-06-10T07:06:00
| 194,633,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,206
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import requests
import json
import logging
from collections import defaultdict
from .structs import Version_Info, Project_Info
from .utils import *
logger = logging.getLogger(__name__)
class SSCClient(object):
""" Major component of python3 wrapper for func_simple_sca_cli ssc restful api
Test on Fortify SSC 19.1.0
更多注释
"""
_auth_cookie = ""
_ssc_url = "" # Fortify SSC的地址
_ssc_api_base = "" # _SSC_URL + "/api/v1"
# 以下只是Fortify SSC默认的roles列表
_ssc_roles_list = ["admin", "appsectester", "developer",
"manager", "securitylead", "viewonly", "wiesystem"]
_session = None # requests的session
_requests_headers = {"Accept": "application/json",
"Content-Type": "application/json;charset=UTF-8"}
_requests_cookies = {"JSESSIONID": _auth_cookie}
_project_version_mapping = defaultdict(set)
def __init__(self, url=None):
logger.info('init with url: {0}'.format(url))
self._ssc_url = url
self._ssc_api_base = self._ssc_url + "/api/v1"
self._session = requests.session()
self._requests_cookies = None
def __del__(self):
if self._session is not None:
self._session.close()
def set_auth_cookie(self, auth_cookie=None):
self._auth_cookie = auth_cookie
self._requests_cookies = {"JSESSIONID": self._auth_cookie}
def set_auth_token(self, auth_token=None):
self._requests_headers['Authorization'] = "FortifyToken {}".format(
auth_token)
def func_get_fortify_roles(self):
"""
获取fortify当前的roles列表
side effect: 更新现有的roles_list
:return: fortify当前的roles列表
"""
url = self._ssc_api_base + "/roles"
r = self._session.get(
url, headers=self._requests_headers, cookies=self._requests_cookies)
logging.debug(
"func_get_fortify_roles\r\nraw response: {}".format(r.content))
if r.status_code != 200:
logging.error("func_get_fortify_roles error getting roles")
return None
roles_list = []
for _ in json.loads(r.content)["data"]:
roles_list.append(_["id"])
self._ssc_roles_list = roles_list
return roles_list
def func_get_fortify_ldap_user_info_by_name(self, user_name):
"""
根据用户名查询条件获取当前Fortify已有用户的信息
限制条件: 必须是精准匹配用户名,也就是有且仅有一个匹配结果
:param user_name: 精准的用户名
:return: 用户信息的json, 如果失败(包含多个匹配或无匹配)返回None
"""
payloads = {"limit": 2, "start": "0", "q": user_name}
url = self._ssc_api_base + '/ldapObjects'
r = self._session.get(url, headers=self._requests_headers,
cookies=self._requests_cookies, params=payloads)
logging.debug("func_get_fortify_ldap_user_info_by_name {}\r\nraw response: {}".format(
user_name, r.content))
if r.status_code != 200:
logging.error(
"func_get_fortify_ldap_user_info_by_name error getting user info")
return None
user_info_from_ssc = json.loads(r.content)
if user_info_from_ssc["count"] > 1:
logging.error(
"func_get_fortify_ldap_user_info_by_name: more than one user matched for {}".format(user_name))
return None
if user_info_from_ssc["count"] < 1:
logging.error(
"func_get_fortify_ldap_user_info_by_name: no user matched for {}".format(user_name))
return None
return user_info_from_ssc["data"][0]
def func_get_fortify_ldap_user_info_by_id(self, user_id):
"""
根据用户名查询条件获取当前Fortify已有用户的信息
:param user_id: 用户id
:return: 用户信息的json, 如果失败(包含多个匹配或无匹配)返回None
"""
url = self._ssc_api_base + '/ldapObjects/' + str(user_id)
r = self._session.get(
url, headers=self._requests_headers, cookies=self._requests_cookies)
logging.debug("func_get_fortify_ldap_user_info_by_id {} \r\nraw response: {}".format(
user_id, r.content))
if r.status_code != 200:
logging.error(
"func_get_fortify_ldap_user_info_by_id error getting user info")
return None
user_info_from_ssc = json.loads(r.content)
return user_info_from_ssc["data"]
def func_get_fortify_ldap_user_id_by_name(self, user_name):
"""
根据用户名查询条件获取当前Fortify已有用户的id
限制条件: 必须是精准匹配用户名,也就是有且仅有一个匹配结果
:param user_name: 精准匹配用户名
:return: 用户id, 如果失败返回None
"""
r = self.func_get_fortify_ldap_user_info_by_name(user_name)
if r is None:
return None
else:
return int(r["_href"].split("/")[-1])
def func_get_non_fortify_ldap_user_info(self, user_name):
url = self._ssc_api_base + '/ldapObjects'
payloads = {"ldaptype": "USER", "limit": 2, "q": user_name, "start": 0}
r = self._session.get(url, headers=self._requests_headers,
cookies=self._requests_cookies, params=payloads)
logging.debug("func_get_non_fortify_ldap_user_info {}\r\nraw response: {}".format(
user_name, r.content))
if r.status_code != 200:
logging.error(
"func_get_non_fortify_ldap_user_info error getting user info")
return None
user_info_from_ssc = json.loads(r.content)["data"]
if len(user_info_from_ssc) > 1:
logging.error(
"func_get_non_fortify_ldap_user_info: more than one user matched for {}".format(user_name))
return None
if len(user_info_from_ssc) < 1:
logging.error(
"func_get_non_fortify_ldap_user_info: no user matched for {}".format(user_name))
return None
return user_info_from_ssc[0]
def func_add_roles_to_ldap_user(self, user_info, roles=['viewonly']):
"""
为现在无法登陆的ldap用户添加权限,从而使得ldap用户可以访问fortify ssc
默认情况下ldap用户的roles为null,不能登陆fortify ssc
:param user_info: 调用func_get_ldap_user返回的userinfo的json
:param roles: 权限的string list, 默认只有'view only', 会做白名单校验
默认SSC_ROLES_LIST = ["admin", "appsectester", "developer", "manager", "securitylead", "viewonly", "wiesystem"]
:return: True表示添加成功, False表示添加失败
"""
url = self._ssc_api_base + "/ldapObjects"
user_info["roles"] = []
# role validation
for _ in roles:
if _ not in self._ssc_roles_list:
logging.error("Role {} not allowed".format(_))
else:
user_info["roles"].append({"id": _})
if len(user_info["roles"]) < 1:
user_info["roles"] = [{"id": "viewonly"}]
logging.debug("Adding user to Fortify SSC: \r\n {}".format(user_info))
r = self._session.post(url, headers=self._requests_headers,
cookies=self._requests_cookies, json=user_info)
if r.status_code == 201: # created
logging.debug(
"User {} added successfully".format(user_info['name']))
return True
else:
logging.error("Failed adding user {}, error message \r\n {}".format(
user_info['name'], r.content))
return False
def func_del_ldap_user_by_id(self, user_id):
"""
通过user_id删除用户
:param user_id:
:return: True表示删除成功, False表示删除成功
"""
url = self._ssc_api_base + "/ldapObjects"
payloads = {"ids": user_id}
r = self._session.delete(
url, headers=self._requests_headers, cookies=self._requests_cookies, params=payloads)
logging.debug(
"func_del_ldap_user_by_id {}\r\nraw response {}".format(user_id, r.content))
if r.status_code == 200:
return True
else:
logging.error("Error func_del_ldap_user_by_id {}".format(user_id))
return False
def func_del_ldap_user_by_username(self, user_name):
"""
通过user_name删除用户
限制条件: 必须是精准匹配用户名,也就是有且仅有一个匹配结果
:param user_name: 精准匹配用户名
:return: True表示删除成功, False表示删除成功
"""
user_id = self.func_get_fortify_ldap_user_id_by_name(user_name)
if user_id is None:
return False
return self.func_del_ldap_user_by_id(user_id)
def func_update_roles_for_ldap_user_by_user_id(self, user_id, roles=['viewonly']):
"""
为现有ldap用户更改权限
:param user_id:
:param roles: 权限的string list, 默认只有'view only', 会做白名单校验
默认SSC_ROLES_LIST = ["admin", "appsectester", "developer", "manager", "securitylead", "viewonly", "wiesystem"]
:return: True表示更新成功, False表示更新失败
"""
url = self._ssc_api_base + "/ldapObjects/" + str(user_id)
user_info = self.func_get_fortify_ldap_user_info_by_id(user_id)
if user_info is None:
logging.error(
"func_update_roles_for_ldap_user_by_user_id failed: invalid user id {}".format(user_id))
return False
user_info["roles"] = []
# role validation
for _ in roles:
if _ not in self._ssc_roles_list:
logging.error("Role {} not allowed".format(_))
return False
else:
user_info["roles"].append({"id": _})
if len(user_info["roles"]) < 1:
user_info["roles"] = [{"id": "viewonly"}]
logging.debug(
"Updating user id {} to Fortify SSC: \r\n {}".format(user_id, user_info))
r = self._session.put(url, headers=self._requests_headers,
cookies=self._requests_cookies, json=user_info)
logging.debug("func_update_roles_for_ldap_user_by_user_id {} \r\nraw response {}".format(
user_id, r.content))
if r.status_code == 200: # updated
logging.debug(
"func_update_roles_for_ldap_user_by_user_id {} updated successfully".format(user_id))
return True
else:
logging.error("Failed: func_update_roles_to_ldap_user_by_user_id {}, error message \r\n {}".
format(user_id, r.content))
return False
def func_update_roles_for_ldap_user_by_user_name(self, user_name, roles=['viewonly']):
"""
为现有ldap用户更改权限
限制条件: 必须是精准匹配用户名,也就是有且仅有一个匹配结果
:param user_name: 精准匹配用户名
:param roles: 权限的string list, 默认只有'view only', 会做白名单校验
默认SSC_ROLES_LIST = ["admin", "appsectester", "developer", "manager", "securitylead", "viewonly", "wiesystem"]
:return: True表示更新成功, False表示更新失败
"""
user_id = self.func_get_fortify_ldap_user_id_by_name(user_name)
if user_id is None:
logging.error(
"func_update_roles_for_ldap_user_by_user_name failed: invalid user name {}".format(user_name))
return False
return self.func_update_roles_for_ldap_user_by_user_id(user_id, roles)
def func_get_project_version_dict(self):
"""
Fortify中的project对应页面上的application
Version对应页面上的version
Fortify对于project和version的管理没有做区分(至少在api上是这样的)
这个函数的功能是返回一个dict, 得到{project_info1:[version_info1_1, version_info1_2], ...}
:return: project与version对应的dict 得到{project_info1:[version_info1_1, version_info1_2], ...}
"""
url = self._ssc_api_base + "/projectVersions"
payloads = {"limit": 1, "start": 0, "includeInactive": True, "myAssignedIssues": False, "orderby": "id",
"fields": "id,name,project,active,committed,owner,description,creationDate,currentState"}
r = self._session.get(url, headers=self._requests_headers,
cookies=self._requests_cookies, params=payloads)
logging.debug(
"func_get_project_version_dict get counts\r\nraw content: {}".format(r.content))
if r.status_code != 200:
logging.error(
"func_get_project_version_dict failed: {}".format(r.content))
return self._project_version_mapping
project_version_count = int(json.loads(r.content)["count"])
payloads = {"limit": project_version_count, "start": 0, "includeInactive": True, "myAssignedIssues": False,
"orderby": "id",
"fields": "id,name,project,active,committed,owner,description,creationDate,currentState"}
r = self._session.get(url, headers=self._requests_headers,
cookies=self._requests_cookies, params=payloads)
logging.debug(
"func_get_project_version_dict get full list\r\nraw content: {}".format(r.content))
if r.status_code != 200:
logging.error(
"func_get_project_version_dict failed: {}".format(r.content))
return self._project_version_mapping
data = json.loads(r.content)["data"]
self._project_version_mapping = defaultdict(set)
for _ in data:
project_info = Project_Info(
int(_["project"]["id"]), _["project"]["name"])
version_info = Version_Info(int(_["id"]), _["name"])
self._project_version_mapping[project_info].add(version_info)
logging.debug("func_get_project_version_dict raw_mapping\r\n{}".format(
self._project_version_mapping))
return self._project_version_mapping
def func_update_project_version_dict(self):
self.func_get_project_version_dict()
def func_get_project_version_by_user_id(self, user_id):
"""
通过user_id获取用户在Fortify SSC用户管理ACCESS中的列表
返回形式为project_info: version_info_list的形式
:param user_id:
:return: 用户权限project与version对应的dict 得到{project_info1:[version_info1_1, version_info1_2], ...}
"""
url = self._ssc_api_base + "/authEntities/" + \
str(user_id) + "/projectVersions"
payloads = {"limit": 1, "start": 0}
r = self._session.get(url, headers=self._requests_headers,
cookies=self._requests_cookies, params=payloads)
logging.debug(
"func_get_project_version_by_user_id get counts\r\nraw content: {}".format(r.content))
if r.status_code != 200:
logging.error(
"func_get_project_version_by_user_id failed: {}".format(r.content))
return None
project_version_count = int(json.loads(r.content)["count"])
ret_dict = defaultdict(set)
payloads = {"limit": project_version_count, "start": 0}
r = self._session.get(url, headers=self._requests_headers,
cookies=self._requests_cookies, params=payloads)
data = json.loads(r.content)["data"]
for _ in data:
project_info = Project_Info(
int(_["project"]["id"]), _["project"]["name"])
version_info = Version_Info(int(_["id"]), _["name"])
ret_dict[project_info].add(version_info)
logging.debug(
"func_get_project_version_by_user_id raw_mapping\r\n{}".format(ret_dict))
return ret_dict
def func_get_project_version_by_ldap_user_name(self, user_name):
"""
通过user_name获取用户在Fortify SSC用户管理ACCESS中的列表
限制user_name必须精准,有且仅有1个
返回形式为project_info: version_info_list的形式
:param user_name:
:return: 用户权限project与version对应的dict 得到{project_info1:[version_info1_1, version_info1_2], ...}
"""
user_id = self.func_get_fortify_ldap_user_id_by_name(user_name)
if user_id is None:
return None
return self.func_get_project_version_by_user_id(user_id)
def func_add_project_version_auth_by_user_id(self, user_id, version_ids):
"""
为当前用户添加可以access的version列表
SSC界面上Access中的add按钮
:param user_id:
:param version_ids: list, 需要添加访问权限的version id列表,比如[1,2,3]
:return: True表示成功, False表示失败
"""
url = self._ssc_api_base + "/authEntities/" + \
str(user_id) + "/projectVersions/action"
already_auth_versions = func_extract_all_version_ids(
self.func_get_project_version_by_user_id(user_id))
if len(set(already_auth_versions).intersection(set(version_ids))) > 0:
logging.error("func_add_project_version_auth_by_user_id error: conflict version ids {}".
format(set(already_auth_versions).intersection(set(version_ids))))
return False
# TODO: 增加version_ids的有效性判断
payloads = {"ids": version_ids, "type": "assign"}
r = self._session.post(url, headers=self._requests_headers,
cookies=self._requests_cookies, json=payloads)
logging.debug("func_add_project_version_auth_by_user_id {} version ids {}\r\nraw response{}".
format(user_id, version_ids, r.content))
if r.status_code == 200:
return True
else:
return False
def func_add_project_version_auth_by_ldap_user_name(self, user_name, version_ids):
"""
为当前用户添加可以access的version列表
SSC界面上Access中的add按钮
:param user_name:
:param version_ids: list, 需要添加访问权限的version id列表,比如[1,2,3]
:return: True表示成功, False表示失败
"""
user_id = self.func_get_fortify_ldap_user_id_by_name(user_name)
if user_id is None:
logging.error(
"func_add_project_version_auth_by_ldap_user_name {} invalid user name".format(user_name))
return False
return self.func_add_project_version_auth_by_user_id(user_id, version_ids)
def func_get_issue_count_by_id(self, version_id, showsuppressed="false", showhidden="false"):
"""
根据projectVersionId获取结果
"""
url = self._ssc_api_base + \
"/projectVersions/{}/issueGroups".format(version_id)
payloads = {"groupingtype": "FOLDER", "filterset": "a243b195-0a59-3f8b-1403-d55b7a7d78e6",
"showhidden": showhidden, "showremoved": "false", "showshortfileNames": "false", "showsuppressed": showsuppressed}
r = self._session.get(url, headers=self._requests_headers,
cookies=self._requests_cookies, params=payloads)
if r.status_code == 200:
data = json.loads(r.content)['data']
return data
else:
return None
def func_delete_by_id(self, version_id):
"""
根据id删除project_version
成功返回true, 失败返回false
"""
url = self._ssc_api_base + "/projectVersions/{}?hideProgress=true"
url = url.format(version_id)
r = self._session.delete(
url, headers=self._requests_headers, cookies=self._requests_cookies)
if r.status_code == 200:
return True
else:
return False
def func_get_artifact_info(self, version_id, start=0):
url = self._ssc_api_base + \
"/projectVersions/{}/artifacts?hideProgress=true&embed=scans&limit=1&start={}"
url = url.format(version_id, start)
r = self._session.get(
url, headers=self._requests_headers, cookies=self._requests_cookies)
if r.status_code == 200:
data = json.loads(r.content)['data']
return data
else:
return None
def func_get_project_version_authentities(self, version_id):
"""
{
"data":[
{
"firstName":"default",
"lastName":"user",
"_href":"http://192.168.22.130:8080/ssc/api/v1/projectVersions/2/authEntities/1",
"userPhoto":null,
"isLdap":false,
"displayName":"default user",
"entityName":"admin",
"id":1,
"ldapDn":null,
"type":"User",
"email":"my_email@fortify.com"
},
{
"firstName":"firstname",
"lastName":"Testuser01",
"_href":"http://192.168.22.130:8080/ssc/api/v1/projectVersions/2/authEntities/3",
"userPhoto":null,
"isLdap":true,
"displayName":"firstname Testuser01",
"entityName":"firstname",
"id":3,
"ldapDn":"cn=Testuser01,dc=ph,dc=com",
"type":"User",
"email":null
}
],
"count":2,
"responseCode":200
}
"""
url = self._ssc_api_base + "/projectVersions/{}/authEntities?limit=-1"
url = url.format(version_id)
r = self._session.get(
url, headers=self._requests_headers, cookies=self._requests_cookies)
if r.status_code == 200:
data = json.loads(r.content)['data']
return data
else:
return None
def func_add_ladpuser_to_projectverion_by_user_id(self, version_id, user_id):
url = self._ssc_api_base + "/projectVersions/{}/authEntities"
url = url.format(version_id)
user_info = []
authed_user_list = self.func_get_project_version_authentities(
version_id)
if authed_user_list is not None:
for _ in authed_user_list:
user_info.append({"id": _['id'], "isLdap": _["isLdap"]})
user_info.append({"id": user_id, "isLdap": True})
r = self._session.put(
url, headers=self._requests_headers, cookies=self._requests_cookies, json=user_info)
if r.status_code == 200:
return True
else:
return False
def func_add_ladpuser_to_projectverion_by_user_name(self, version_id, user_name):
user_id = self.func_get_fortify_ldap_user_id_by_name(user_name)
return self.func_add_ladpuser_to_projectverion_by_user_id(version_id, user_id)
def func_get_folders_by_version_id(self, version_id):
url = self._ssc_api_base + "/projectVersions/{}/folders"
url = url.format(version_id)
r = self._session.get(
url, headers=self._requests_headers, cookies=self._requests_cookies)
if r.status_code == 200:
data = json.loads(r.content)['data']
return data
else:
return None
def func_suppress_all_issues_by_folder(self, version_id, folder_name, page_size=50):
folder_infos = self.func_get_folders_by_version_id(version_id)
if folder_infos is None:
return False
folder_uuid = None
for folder_info in folder_infos:
if folder_info["name"] == folder_name:
folder_uuid = folder_info["guid"]
if folder_uuid is None:
logging.error("folder name not found")
while True:
url = self._ssc_api_base + "/projectVersions/{}/issues?start=0&limit={}&showhidden=false&showremoved=false&showsuppressed=false&showshortfilenames=false&filter={}"
url = url.format(version_id, page_size, "FOLDER:" + folder_uuid)
r = self._session.get(
url, headers=self._requests_headers, cookies=self._requests_cookies)
if r.status_code != 200:
return False
issues_remained = json.loads(r.content)["count"]
if issues_remained == 0:
break
data = json.loads(r.content)['data']
payloads = {"issues": [], "suppressed": True}
for _ in data:
payloads["issues"].append({"id": _["id"], "revision": _["revision"]})
url = self._ssc_api_base + "/projectVersions/{}/issues/action/suppress"
url = url.format(version_id)
r = self._session.post(url, headers=self._requests_headers,
cookies=self._requests_cookies, json=payloads)
if r.status_code != 200:
return False
return True
|
[
"shenxiangxiang@gmail.com"
] |
shenxiangxiang@gmail.com
|
309ae3dd4b82f59472559bad92aae23b9e6edd2c
|
e571b3139a08ac26d04ded28efbf59eacdebb4c5
|
/WordNet.py
|
d977a5b251565be7b7f64100abee30bb5a02adac
|
[] |
no_license
|
fayedraza/Natural-Language-Processing
|
b60cd9210ebab5c614339571251571b8744480a8
|
24eaa0e5bbebeb0b4f6ae02acc8d14a284fa0036
|
refs/heads/master
| 2021-03-30T08:45:32.046295
| 2020-03-20T01:59:07
| 2020-03-20T01:59:07
| 248,033,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
from nltk.corpus import wordnet
#introducing useful functions such as finding synonyms and antonyms
syns = wordnet.synsets("program")
#syn set
print(syns[0].name())
#just the word
print(syns[0].lemmas() [0].name())
#defintion
print(syns[0].definition())
#examples
print(syns[0].examples())
synonyms = []
antonynms=[]
for syn in wordnet.synsets("good"):
for l in syn.lemmas():
synonyms.append(l.name())
if l.antonyms():
antonynms.append(l.antonyms() [0].name())
print(set(synonyms))
print(set(antonynms))
|
[
"fayed1234@hotmail.com"
] |
fayed1234@hotmail.com
|
e8c74c07e3c8317d303f9de9f951b89ac3546170
|
b76abacd12671de7b3e8ab0207293f527e3d4738
|
/eshop/migrations/0001_initial.py
|
3db336b99c99b52a8d74ca1705ec504e18a0fe87
|
[] |
no_license
|
krishnamurtidutta/Online-costmetic-store
|
5f7929630714552425f3625f9b3a2c65ae2723f9
|
0dc965e0ef91bce00959e820b7944c63ae8e5e3b
|
refs/heads/main
| 2023-08-11T07:20:41.057375
| 2021-09-20T15:57:47
| 2021-09-20T15:57:47
| 408,501,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,909
|
py
|
# Generated by Django 3.2.6 on 2021-08-31 17:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(blank=True, null=True, upload_to='profile_pic/CustomerProfilePic/')),
('address', models.CharField(max_length=40)),
('mobile', models.CharField(max_length=20)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Feedback',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('feedback', models.CharField(max_length=500)),
('date', models.DateField(auto_now_add=True, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('product_image', models.ImageField(blank=True, null=True, upload_to='product_image/')),
('price', models.PositiveIntegerField()),
('description', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Orders',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=50, null=True)),
('address', models.CharField(max_length=500, null=True)),
('mobile', models.CharField(max_length=20, null=True)),
('order_date', models.DateField(auto_now_add=True, null=True)),
('status', models.CharField(choices=[('Pending', 'Pending'), ('Order Confirmed', 'Order Confirmed'), ('out of delivery', 'out of delivery'), ('Delivered', 'Delivered')], max_length=50, null=True)),
('Customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='eshop.customer')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='eshop.product')),
],
),
]
|
[
"noreply@github.com"
] |
krishnamurtidutta.noreply@github.com
|
44e5fdd2aa6336d434f44d58ccdf9323afcd14ba
|
107b1edcc0e17e6e85247b309695f2f914e4fc2b
|
/flowpatrol/wsgi.py
|
f1f3a3a0b96539361fa6d1c1aada45fe9871f97b
|
[] |
no_license
|
chrxr/flowpatrol
|
90b0c512aeb9410d2a632e8f4fe7779678209fbb
|
cb3094ca6aa446aae1f0ce69a2b337ccce250fde
|
refs/heads/master
| 2020-06-04T04:14:58.000947
| 2015-05-22T21:24:32
| 2015-05-22T21:24:32
| 29,037,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
"""
WSGI config for flowpatrol project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "flowpatrol.settings.production")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[
"chris.rogers@torchbox.com"
] |
chris.rogers@torchbox.com
|
c450c9cc3b9f7602a096adf6d3a38c8f8b8ae0e0
|
d11d0d6120386a5cd6bdc8de083a27710abd59d2
|
/dprojx/dappx/migrations/0008_auto_20181221_2124.py
|
68e80b03f784a1cb55e76067cb38f84f319948d0
|
[] |
no_license
|
RahilGupta148/ProjectsCode
|
2c54020f58ff9235deba407d055286b8b0b9f42e
|
b50f3dc340d87ed6ebfa8e5429b9bc8744426507
|
refs/heads/master
| 2020-04-13T08:04:54.937359
| 2019-01-27T10:34:11
| 2019-01-27T10:34:11
| 163,071,352
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
# Generated by Django 2.1.1 on 2018-12-21 15:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dappx', '0007_auto_20181221_2121'),
]
operations = [
migrations.DeleteModel(
name='Project',
),
migrations.AlterField(
model_name='document',
name='description',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='document',
name='document',
field=models.FileField(upload_to='documents/'),
),
]
|
[
"rahilgupta148@gmail.com"
] |
rahilgupta148@gmail.com
|
606fab937f98b33f758559b69fcf0d893e603ba5
|
dc3d85086039719aef44c82fe6fd91d29b6c876e
|
/trainer/wa.py
|
112d68205bd1f3505b1cbb587754114def1b59d3
|
[] |
no_license
|
hongjoon0805/Incremental
|
7a92b81cc5f64892b3101c8773f0b7401d20d46b
|
38d1495ea1b8a898183dec345720e3ebe90db8e0
|
refs/heads/master
| 2021-07-10T12:25:12.560456
| 2020-11-05T12:41:11
| 2020-11-05T12:41:11
| 214,089,063
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,680
|
py
|
from __future__ import print_function
import copy
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
import networks
import trainer
class Trainer(trainer.GenericTrainer):
def __init__(self, IncrementalLoader, model, args):
super().__init__(IncrementalLoader, model, args)
self.loss = torch.nn.CrossEntropyLoss(reduction='mean')
def train(self, epoch):
T=2
self.model.train()
print("Epochs %d"%epoch)
tasknum = self.incremental_loader.t
end = self.incremental_loader.end
start = end-self.args.step_size
lamb = start / end
for data, target in tqdm(self.train_iterator):
data, target = data.cuda(), target.cuda()
output = self.model(data)[:,:end]
loss_CE = self.loss(output, target)
loss_KD = 0
if tasknum > 0:
end_KD = start
start_KD = end_KD - self.args.step_size
score = self.model_fixed(data)[:,:end_KD].data
soft_target = F.softmax(score / T, dim=1)
output_log = F.log_softmax(output[:,:end_KD] / T, dim=1)
loss_KD = F.kl_div(output_log, soft_target, reduction='batchmean')
self.optimizer.zero_grad()
(lamb*loss_KD + (1-lamb)*loss_CE).backward()
self.optimizer.step()
self.model.module.fc.bias.data[:] = 0
# weight cliping 0인걸 없애기
weight = self.model.module.fc.weight.data
#print(weight.shape)
weight[weight < 0] = 0
#for p in self.model.module.fc.weight:
#print(p)
#print((p==0).sum())
def weight_align(self):
end = self.train_data_iterator.dataset.end
start = end-self.args.step_size
weight = self.model.module.fc.weight.data
prev = weight[:start, :]
new = weight[start:end, :]
print(prev.shape, new.shape)
mean_prev = torch.mean(torch.norm(prev, dim=1)).item()
mean_new = torch.mean(torch.norm(new, dim=1)).item()
gamma = mean_prev/mean_new
print(mean_prev, mean_new, gamma)
new = new * gamma
result = torch.cat((prev, new), dim=0)
weight[:end, :] = result
print(torch.mean(torch.norm(self.model.module.fc.weight.data[:start], dim=1)).item())
print(torch.mean(torch.norm(self.model.module.fc.weight.data[start:end], dim=1)).item())
|
[
"hongjoon0805@gmail.com"
] |
hongjoon0805@gmail.com
|
6f23c2b2c5f270efb0abe266f2e98db76484813f
|
faafe1ee67a5daae75c397a4c4100a4046e5f564
|
/colorwebs/colorwebs/urls.py
|
ecd4f243f43ff83a581e80aeb202c285d10facb8
|
[] |
no_license
|
DileepKommineni/colorwebs_msd
|
54c1d953ca08acdc52059d896255bc5ee1a82d99
|
d6db0a84ce5cc964bedb7a98b6238d4929374dfc
|
refs/heads/master
| 2020-12-07T01:35:33.603747
| 2020-02-06T17:30:37
| 2020-02-06T17:30:37
| 232,602,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,467
|
py
|
"""colorwebs URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls.static import static
from colorapp import views
from colorwebs import settings
urlpatterns = [
path('admin/', admin.site.urls),
#Sign up page
path('signup/', views.signup_user, name="signup-user"),
# Signup submit
path('signup/submit/', views.signup_submit, name="signup-submit"),
path('',views.home,name="home"),
path('home',views.home,name="home"),
path('about_journal',views.about_journal,name="about_journal"),
path('about_open',views.about_open,name="about_open"),
path('about_peer',views.about_peer,name="about_peer"),
path('archive_fulltext',views.archive_fulltext,name="archive_fulltext"),
path('archive',views.archive,name="archive"),
path('article_processing',views.article_processing,name="article_processing"),
path('articles_fulltext',views.articles_fulltext,name="articles_fulltext"),
path('articles',views.articles,name="articles"),
path('articlesinpress',views.articlesinpress,name="articlesinpress"),
path('author_guidelines',views.author_guidelines,name="author_guidelines"),
path('benefits',views.benefits,name="benefits"),
path('collaborations',views.collaborations,name="collaborations"),
path('company_information',views.company_information,name="company_information"),
path('contact',views.contact,name="contact"),
path('current_issue',views.current_issue,name="current_issue"),
path('current_issue_fulltext',views.current_issue_fulltext,name="current_issue_fulltext"),
path('current_issuse_fulltext',views.current_issue_fulltext,name='issuessss'),
path('ebooks',views.ebooks,name="ebooks"),
path('editor_guidelines',views.editor_guidelines,name="editor_guidelines"),
path('editorial_board',views.editor_board,name="editoral_board"),
path('faqs',views.faqs,name="faqs"),
path('indexing',views.indexing,name="indexing"),
path('membership',views.membership,name="membership"),
path('peer_review',views.peer_review,name="peer_review"),
path('publication_ethics',views.publication_ethics,name="publication_ethics"),
path('publications',views.publications,name="publications"),
path('registration_details',views.registration_details,name="registration_details"),
path('reprints',views.reprints,name="reprints"),
path('reviewer_guidelines',views.review_guidelines,name="review_guidelines"),
path('signin',views.signin,name="signin"),
path('special_issue',views.special_issue,name="special_issue"),
path('submit_articles',views.submit_articles,name="submit_articles"),
path('contact_submit',views.contact_submit,name="contact_submit"),
path('article_submit',views.article_submit,name="article_submit"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"dileepk@s7works.io"
] |
dileepk@s7works.io
|
ce223c6360dfff29015eb8b4f13ec5423c7c5965
|
a3b359f5bc233e5fb85e80b0194de1b2c2bd3c4b
|
/RNN/lstm-static-mnist.py
|
65ea6946c632a0b668bfd1bda3e534d21414e84a
|
[] |
no_license
|
CodeAchieveDream/Tensorflow-RNN
|
47ee0c7d3e62459595388d0ebf790a5ddde2aa3d
|
9764b644bcc2096fe05589b73f203cfb910e3b36
|
refs/heads/master
| 2020-06-06T12:10:59.120270
| 2019-06-19T13:25:01
| 2019-06-19T13:25:01
| 192,736,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,676
|
py
|
# /usr/bin/python
# -*- encoding:utf-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.rnn import BasicLSTMCell
from tensorflow.contrib.rnn import static_rnn
from tensorflow.contrib.layers import fully_connected
import numpy as np
mnist = input_data.read_data_sets('../MNIST_data/', one_hot=True)
n_input = 28
n_steps = 28
n_hidden = 128
n_classes = 10
x = tf.placeholder('float', [None, n_steps*n_input])
y = tf.placeholder('float', [None, n_classes])
x1 = tf.reshape(x, [-1, 28, 28])
x1 = tf.unstack(x1, n_steps, 1)
lstm_cell = BasicLSTMCell(n_hidden, forget_bias=1.0)
outputs, states = static_rnn(lstm_cell, x1, dtype=tf.float32)
pred = fully_connected(outputs[-1], n_classes, activation_fn=None)
cost = tf.reduce_mean(tf.reduce_sum(tf.square(pred - y)))
global_step = tf.Variable(0, trainable=False)
initial_learning_rate = 0.01
learning_rate = tf.train.exponential_decay(initial_learning_rate,
global_step=global_step,
decay_steps=3,
decay_rate=0.9)
add_global = global_step.assign_add(1)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
training_epochs = 10
batch_size = 100
display_step = 1
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples/batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# outputs, states = sess.run([outputs, states], feed_dict={x: batch_xs, y: batch_ys})
# print('outputs shape:', np.shape(outputs))
# print(outputs)
# print('states shape:', np.shape(states))
# print(states)
# y_pred = sess.run(pred, feed_dict={x: batch_xs, y: batch_ys})
# print('输出的y:\n', y_pred.shape, '\n')
#
# print(batch_xs.shape)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_ys})
avg_cost += c / total_batch
if (epoch + 1) % display_step == 0:
print('epoch= ', epoch+1, ' cost= ', avg_cost)
print('finished')
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('test accuracy: ', accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
print('train accuracy: ', accuracy.eval({x: mnist.train.images, y: mnist.train.labels}))
|
[
"1311817413@qq.com"
] |
1311817413@qq.com
|
bc58f9ebc246d2490ce21fb6399685568bb9c9c5
|
acc73beb5ce211c06335785490ae495969eb3fc5
|
/src/code_completion_final.py
|
9d93a16115c9880e37051324eb72fbfcad05b7a2
|
[] |
no_license
|
salisjoshi/CodeCompletion
|
b7e9637dddac994d4877091717f3da7cf74c7d8d
|
3969b1d45a6103add3258ba87b84ba2502d891aa
|
refs/heads/master
| 2020-05-23T16:38:16.186793
| 2019-05-15T15:26:08
| 2019-05-15T15:26:08
| 186,853,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,792
|
py
|
import tflearn
import numpy
import tensorflow as tf
class Code_Completion_Baseline:
def __init__(self):
self.max_sequence = 5
def token_to_string(self, token):
return token["type"] + "-@@-" + token["value"]
def string_to_token(self, string):
splitted = string.split("-@@-")
return {"type": splitted[0], "value": splitted[1]}
def one_hot(self, string):
vector = [0] * len(self.string_to_number)
vector[self.string_to_number[string]] = 1
return vector
def two_hot(self, prefix, suffix):
vector = [0] * len(self.string_to_number)
vector[self.string_to_number[prefix]] = 1
vector[self.string_to_number[suffix]] = 1
return vector
def zero_hot(self):
return [0] * len(self.string_to_number)
def prepare_data(self, token_lists):
# encode tokens into one-hot vectors
all_token_strings = set()
for token_list in token_lists:
for token in token_list:
all_token_strings.add(self.token_to_string(token))
all_token_strings = list(all_token_strings)
all_token_strings.sort()
print("Unique tokens: " + str(len(all_token_strings)))
self.string_to_number = dict()
self.number_to_string = dict()
max_number = 0
for token_string in all_token_strings:
self.string_to_number[token_string] = max_number
self.number_to_string[max_number] = token_string
max_number += 1
print(self.number_to_string)
# prepare x,y pairs
xs = []
ys = []
for token_list in token_lists:
for idx, token in enumerate(token_list):
prefix_sequences = []
suffix_sequences = []
if self.max_sequence <= idx < len(token_list) - self.max_sequence:
prefix_sequences = token_list[idx - self.max_sequence:idx]
suffix_sequences = token_list[idx + 1: idx + self.max_sequence + 1]
output_token_string = self.token_to_string(token_list[idx])
temp_xs = prefix_sequences + suffix_sequences
xs.append([self.string_to_number[self.token_to_string(token)] for token in temp_xs])
ys.append(self.one_hot(output_token_string))
print("x,y pairs: " + str(len(xs)))
return (xs, ys)
def create_network(self):
self.net = tflearn.input_data(shape=[None, self.max_sequence * 2, 1])
self.net = tflearn.lstm(self.net, 192, return_seq=True)
self.net = tflearn.lstm(self.net, 192)
self.net = tflearn.fully_connected(self.net, len(self.string_to_number), activation='softmax')
self.net = tflearn.regression(self.net, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001)
self.model = tflearn.DNN(self.net)
def load(self, token_lists, model_file):
print("Loading saved model")
self.prepare_data(token_lists)
self.create_network()
self.model.load(model_file)
print("model loaded")
def train(self, token_lists, model_file):
print("training model")
(xs, ys) = self.prepare_data(token_lists)
self.create_network()
xs = numpy.reshape(numpy.array(xs), [-1, self.max_sequence * 2, 1])
self.model.fit(xs, ys, show_metric=True, validation_set=0.1, shuffle=True, batch_size=512, run_id='lstm_all_data')
self.model.save(model_file)
print("model saved")
def query(self, prefix, suffix):
input_token = []
prefix_len = len(prefix)
suffix_len = len(suffix)
pre_seq_len = post_seq_len = self.max_sequence
while pre_seq_len > prefix_len:
input_token.append(-1)
pre_seq_len -= 1
if pre_seq_len > 0:
temp = prefix[prefix_len - pre_seq_len:]
input_token.extend([self.string_to_number[self.token_to_string(token)] for token in temp])
temp = []
while post_seq_len > suffix_len:
temp.append(-1)
post_seq_len -= 1
if post_seq_len > 0:
temp = [self.string_to_number[self.token_to_string(token)] for token in suffix[:post_seq_len]] + temp
input_token.extend(temp)
input_token = numpy.reshape(numpy.array(input_token), [-1, self.max_sequence * 2, 1])
y = self.model.predict(input_token)
predicted_seq = y[0]
if type(predicted_seq) is numpy.ndarray:
predicted_seq = predicted_seq.tolist()
best_number = predicted_seq.index(max(predicted_seq))
best_string = self.number_to_string[best_number]
best_token = self.string_to_token(best_string)
return [best_token]
|
[
"salis.joshi@gmail.com"
] |
salis.joshi@gmail.com
|
c809c9d03f49c079542f1c8d509ff55a9f4f4907
|
38f605389d8e0f774150479f83ab622639c02cb8
|
/visualize.py
|
c344d4794d08f34ccf360965419e6c0a4081fd7d
|
[] |
no_license
|
1050265390/FTANet-melodic
|
bc159afa1b140d3a9be07b28fb0eb63085f98e09
|
a0998e34b9efe853079cd16b8e518ca274c3ac42
|
refs/heads/main
| 2023-05-15T00:43:11.446892
| 2021-05-31T06:44:46
| 2021-05-31T06:44:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,468
|
py
|
import os
import math
import librosa
import librosa.display
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.python.framework import ops
from PIL import Image
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from constant import *
from loader import load_single_data_for_test, get_CenFreq, seq2map
from evaluator import evaluate
# from keract import display_heatmaps
# from network.msnet import create_msnet_model as create_model
os.environ["CUDA_VISIBLE_DEVICES"] = "4"
def scale_minmax(X, min=0.0, max=1.0):
X_std = (X - X.min()) / (X.max() - X.min())
X_scaled = X_std * (max - min) + min
return X_scaled
def display_heatmaps(activations, input_image, directory):
data_format = K.image_data_format()
index = 0
for layer_name, acts in activations.items():
print(layer_name, acts.shape, end=' ')
if acts.shape[0] != 1:
print('-> Skipped. First dimension is not 1.')
continue
if len(acts.shape) <= 2:
print('-> Skipped. 2D Activations.')
continue
print('')
# computes values required to scale the activations (which will form our heat map) to be in range 0-1
scaler = MinMaxScaler()
# reshapes to be 2D with an automaticly calculated first dimension and second
# dimension of 1 in order to keep scikitlearn happy
scaler.fit(acts.reshape(-1, 1))
# loops over each filter/neuron
for i in range(acts.shape[-1]):
dpi = 300
fig = plt.figure(figsize=(input_image.shape[1]/dpi, input_image.shape[0]/dpi), dpi=dpi)
# fig = plt.figure(figsize=(input_image.shape[1], input_image.shape[0]))
axes = fig.add_axes([0, 0, 1, 1])
axes.set_axis_off()
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
if len(acts.shape) == 3:
# gets the activation of the ith layer
if data_format == 'channels_last':
img = acts[0, :, i]
elif data_format == 'channels_first':
img = acts[0, i, :]
else:
raise Exception('Unknown data_format.')
elif len(acts.shape) == 4:
if data_format == 'channels_last':
img = acts[0, :, :, i]
elif data_format == 'channels_first':
img = acts[0, i, :, :]
else:
raise Exception('Unknown data_format.')
else:
raise Exception('Expect a tensor of 3 or 4 dimensions.')
# scales the activation (which will form our heat map) to be in range 0-1 using
# the previously calculated statistics
if len(img.shape) == 1:
img = scaler.transform(img.reshape(-1, 1))
else:
img = scaler.transform(img)
# print(img.shape)
img = Image.fromarray(img)
# resizes the activation to be same dimensions of input_image
img = img.resize((input_image.shape[1], input_image.shape[0]), Image.LANCZOS)
img = np.array(img)
# overlay the activation at 70% transparency onto the image with a heatmap colour scheme
# Lowest activations are dark, highest are dark red, mid are yellow
axes.imshow(input_image / 255.0)
axes.imshow(img, alpha=1.0, cmap='jet', interpolation='bilinear')
# save to png
if not os.path.exists(directory):
os.makedirs(directory)
output_filename = os.path.join(directory, '{}-{}_{}.png'.format(index, layer_name.split('/')[0], i))
plt.savefig(output_filename, bbox_inches='tight', dpi=dpi, pad_inches=0)
plt.close(fig)
index += 1
def display_heatmap(activation, input_image, fname, alpha):
dpi = 300
fig = plt.figure(figsize=(input_image.shape[1]/dpi, input_image.shape[0]/dpi), dpi=dpi)
axes = fig.add_axes([0, 0, 1, 1])
axes.set_axis_off()
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0,0)
img = activation
img = scale_minmax(img, min=0.0, max=1.0)
img = Image.fromarray(img)
img = img.resize((input_image.shape[1], input_image.shape[0]), Image.LANCZOS)
img = np.array(img)
# overlay the activation
axes.imshow(input_image / 255.0)
axes.imshow(img, alpha=alpha, cmap='jet', interpolation='bilinear')
# save to png
plt.savefig(fname, bbox_inches='tight', dpi=dpi, pad_inches=0)
plt.close(fig)
def display_spec(activation, input_image, fname):
dpi = 300
fig = plt.figure(figsize=(input_image.shape[1]/dpi, input_image.shape[0]/dpi), dpi=dpi)
axes = fig.add_axes([0, 0, 1, 1])
axes.set_axis_off()
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
librosa.display.specshow(activation)
#去掉边框
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
plt.savefig(fname, bbox_inches='tight', dpi=dpi, pad_inches=0)
plt.close(fig)
def visualize(model, x, mapping, i_seg, layers):
## choose one segment
data = x[i_seg]
ground = mapping[1:, i_seg*SEG_LEN: (i_seg+1)*SEG_LEN]
display_spec(ground, ground, 'visualization/{}_ground.png'.format(i_seg))
## generate image to overly
image = data[:, :, 1] * data[:, :, 2]
image = scale_minmax(image, 0, 255).astype(np.uint8)
## to input data
x_in = np.expand_dims(data, axis=0)
# visualization
for layer_name in layers:
outputs = K.function([model.get_input_at(0)], [model.get_layer(layer_name).output])([x_in])
output_mean = np.mean(outputs[0][0], axis=-1)
# output_mean = outputs[0][0]
print(output_mean.shape)
display_spec(output_mean, image, 'visualization/{}_{}.png'.format(i_seg, layer_name))
def print_tf_weights(model, x, i_seg):
data = x[i_seg]
x_in = np.expand_dims(data, axis=0)
print('Time Attn:')
layers = ['conv1d_{}'.format(i) for i in range(1, 28, 4)]
for layer in layers:
outputs = K.function([model.get_input_at(0)], [model.get_layer(layer).output])([x_in])
output_mean = np.mean(outputs[0][0], axis=-1)
print(output_mean)
print('Frequency Attn:')
layers = ['conv1d_{}'.format(i) for i in range(3, 28, 4)]
for layer in layers:
outputs = K.function([model.get_input_at(0)], [model.get_layer(layer).output])([x_in])
output_mean = np.mean(outputs[0][0], axis=-1)
print(output_mean)
if __name__ == '__main__':
# 1. load one audio segment
"""
daisy1.npy
daisy2.npy
daisy3.npy
daisy4.npy
opera_fem2.npy
opera_fem4.npy
opera_male3.npy
opera_male5.npy
pop1.npy
pop2.npy
pop3.npy
pop4.npy
"""
f = 'daisy1.npy'
xlist, ylist = load_single_data_for_test(f, seg_len=SEG_LEN)
CenFreq = get_CenFreq(StartFreq=31, StopFreq=1250, NumPerOct=60) # (321) #参数是特征提取时就固定的
mapping = seq2map(ylist[0][:, 1], CenFreq) # (321, T)
# 2. load model
from network.ftanet_2 import create_model
model = create_model(input_shape=IN_SHAPE)
model.load_weights('model/ftanet_2_1015.h5')
model.compile(loss='binary_crossentropy', optimizer=(Adam(lr=LR)))
# model.summary()
# layers = ['multiply_33', 'multiply_34']
# layers = ['multiply']
# layers.extend(['multiply_{}'.format(i) for i in range(1, 35, 5)])
# layers.extend(['multiply_{}'.format(i) for i in range(5, 35, 5)])
# layers = ['softmax_21']
layers = ['reshape_30', 'reshape_31']
# 3. visualization
# print_tf_weights(model, xlist[0], 3)
# visualize(model, xlist[0], mapping, 1, layers)
data = xlist[0][5]
for i in range(3):
x = data[:, :, i]
display_spec(x, x, 'visualization/input_{}.png'.format(i))
# for seg in range(len(xlist[0])-1):
# visualize(model, xlist[0], mapping, seg, layers)
# for seg in range(len(xlist[0])-1):
# visualize(model, xlist[0], mapping, seg, layers)
# ## choose one segment
# data = xlist[0][seg]
# ground = mapping[1:, seg*SEG_LEN: (seg+1)*SEG_LEN]
# print(ground.shape)
# display_spec(ground, ground, 'visualization/{}_ground.png'.format(seg))
# ## generate image to overly
# image = data[:, :, 1] * data[:, :, 2]
# # display_spec(image, image, 'visualization/{}_origin.png'.format(seg))
# # image = np.log(image + 1e-9)
# image = scale_minmax(image, 0, 255).astype(np.uint8)
# # image = 255 - image
# # img = Image.fromarray(image, mode='L')
# # img.save('visualization/origin.png', quality=95, subsampling=0)
# ## as input data
# x = np.expand_dims(data, axis=0)
# # 4. visualization
# for layer_name in layers:
# outputs = K.function([model.get_input_at(0)], [model.get_layer(layer_name).output])([x])
# # activations = {layer_name: outputs[0]}
# # display_heatmaps(activations, image, 'visualization/')
# output_mean = np.mean(outputs[0][0], axis=-1)
# print(output_mean.shape)
# # display_heatmap(output_mean, image, 'visualization/{}.png'.format(layer_name), 0.5)
# display_spec(output_mean, image, 'visualization/{}_{}.png'.format(seg, layer_name))
|
[
"yu@yu.local"
] |
yu@yu.local
|
3a81b8fbe1e65f9a526b2ed6c28352b2b5789211
|
50546ae1318dd72c6294c2fad4da652b2431ccd1
|
/Python/Python_code_challenges/traverseTree.py
|
5f880593ec50d0dc2ce71bb04a82de1f4f51994e
|
[] |
no_license
|
jamil-said/code-samples
|
a68550bf0b0acd340496504c07ddccc24a6aa467
|
bfec99e5f02c5fa116fe9875662fee2e1ebfff27
|
refs/heads/master
| 2021-06-06T06:41:50.143626
| 2021-02-16T03:30:41
| 2021-02-16T03:30:41
| 130,426,151
| 6
| 3
| null | 2020-04-25T04:14:42
| 2018-04-21T00:35:43
|
PHP
|
UTF-8
|
Python
| false
| false
| 1,804
|
py
|
""" traverseTree -- 30min
Note: Try to solve this task without using recursion, since this is what
you'll be asked to do during an interview.
Given a binary tree of integers t, return its node values in the following
format:
The first element should be the value of the tree root;
The next elements should be the values of the nodes at height 1 (i.e. the
root children), ordered from the leftmost to the rightmost one;
The elements after that should be the values of the nodes at height 2
(i.e. the children of the nodes at height 1) ordered in the same way; Etc.
Example
For
t = {
"value": 1,
"left": {
"value": 2,
"left": null,
"right": {
"value": 3,
"left": null,
"right": null
}
},
"right": {
"value": 4,
"left": {
"value": 5,
"left": null,
"right": null
},
"right": null
}
}
the output should be
traverseTree(t) = [1, 2, 4, 3, 5].
This t looks like this:
1
/ \
2 4
\ /
3 5
Input/Output
[execution time limit] 4 seconds (py3)
[input] tree.integer t
Guaranteed constraints:
0 ≤ tree size ≤ 104.
[output] array.integer
An array that contains the values at t's nodes, ordered as described above.
"""
# Definition for binary tree:
# class Tree(object):
# def __init__(self, x):
# self.value = x
# self.left = None
# self.right = None
from collections import deque
def traverseTree(t):
if not t: return []
stack, result = deque([t]), []
while stack:
nodeTree = stack.popleft()
result.append(nodeTree.value)
if nodeTree.left:
stack.append(nodeTree.left)
if nodeTree.right:
stack.append(nodeTree.right)
return result
|
[
"jamilsaid@hotmail.com"
] |
jamilsaid@hotmail.com
|
98ab298629ddc2844ed6eb061ee47565c5964220
|
d3b821edfcac5757b2511d58b62c391af9f48545
|
/testauto/CUartThread.py
|
277b04ba76659623970f7fff15a8d271640022de
|
[] |
no_license
|
sql7777/RFID4G-1
|
187fa48ab7ceb79e44fda4da7fb92ca04bad47de
|
f8a0ff5aeca3029068231bdcad4a71f44464529e
|
refs/heads/master
| 2020-03-07T13:52:16.527044
| 2018-06-18T03:43:23
| 2018-06-18T03:43:23
| 127,512,429
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,250
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 1 10:09:19 2018
@author: root
"""
import sys
import ctypes
from ctypes import *
import threading
import time
import queue
class CUartThread(object):
def __init__(self,Port='/dev/ttyAMA3',baudrate=115200,ReceiveCallBack=None,RCallarg=None,SoPath="./CUart.so"):
self.sopath=SoPath
self.ll = ctypes.cdll.LoadLibrary
self.lib = self.ll(SoPath)
self.fd=-1
self.port = Port
self.baudrate = baudrate
self.dd = c_ubyte * 10240
self.dest = self.dd()
self.threadalive = False
self.thread_read=None
self.thread_read_flg=False
self.thread_process=None
self.thread_process_flg=False
self.threadLock=threading.Lock()
self.qworkqueue=queue.Queue(10)
self.receivecallback=ReceiveCallBack
self.rcallarg=RCallarg
##call
self.funUART_Open=self.lib.UART_OpenCom
self.funUART_Open.restype = c_int
self.funUART_Close=self.lib.UART_Close
self.funUART_Recv=self.lib.UART_Recv
self.funUART_Recv.restype = c_int
self.funUART_Send=self.lib.UART_Send
self.funUART_Send.restype = c_int
def OpenUart(self):
if self.fd>-1:
self.CloseUart()
self.fd = self.funUART_Open(self.fd, cast(self.port, POINTER(c_byte)), self.baudrate)
return self.fd
def CloseUart(self):
if self.fd>-1:
self.funUART_Close(self.fd)
self.fd=-1
def RecvUart(self):
ret=0
retstr=b''
if self.fd>-1:
ret=self.funUART_Recv(self.fd,byref(self.dest),1024)
if ret>0:
retstr+=bytes(self.dest[:ret])
return ret, retstr
else:
return ret, retstr
def SendUart(self,data):#cast(data, POINTER(c_ubyte)),len(data)
ret=0
if self.fd>-1:
ret=self.funUART_Send(self.fd,cast(data, POINTER(c_byte)),len(data))
return ret
def ComThreadReceiveCallBack(self):
if self.receivecallback!=None:
return self.receivecallback(self.rcallarg)
def RecvUartThread(self):
if self.fd < 0:
return
while self.threadalive:
tout = b''
revnums = self.funUART_Recv(self.fd,byref(self.dest),1024)
if revnums > 0:
tout += bytes(self.dest[:revnums])
if len(tout)>0:
self.threadLock.acquire()
if self.qworkqueue.full():
print("qworkqueue00000000")
self.qworkqueue.put(tout)
#print ('qworkqueue size : {0},tounum: {1}'.format(self.qworkqueue.qsize(), len(tout)))
self.threadLock.release()
print('tout{0},{1}'.format(tout,len(tout)))
else:
time.sleep(0.05)
continue
print("ReadSerialCOM END")
def ProcessDataThread(self):
if self.fd>-1:
while self.threadalive:
time.sleep(0.05)
self.threadLock.acquire()
if not self.qworkqueue.empty():
if self.receivecallback != None:
self.rcallarg = self.qworkqueue.get()
else:
data = self.qworkqueue.get()
self.threadLock.release()
if self.receivecallback != None:
self.ComThreadReceiveCallBack()
print('callback:{0}'.format(self.rcallarg, ))
else:
print(data)
else:
self.threadLock.release()
def StartReadThread(self):
if self.fd < 0:
if self.OpenUart() < 0:
return False
if self.fd > -1 and self.thread_read == None:
self.threadalive = True
self.thread_read_flg = True
self.thread_read = threading.Thread(target=self.RecvUartThread, name='RecvUartThread')
self.thread_read.setDaemon(True)
self.thread_read.start()
return True
def ProcessThread(self):
if self.thread_process == None:
self.thread_process_flg = True
self.threadalive = True
self.thread_process = threading.Thread(target=self.ProcessDataThread, name='ProcessDataThread')
self.thread_process.setDaemon(True)
self.thread_process.start()
return True
def StopThread(self):
self.threadalive = False
if self.thread_read_flg:
self.thread_read.join()
self.thread_read = None
else:
pass
if self.thread_process_flg:
self.thread_process.join()
self.thread_process = None
else:
pass
def RunAllStart(self):
if self.OpenUart()>-1:
try:
if self.StartReadThread():
print ("StartReadThread")
else:
print ("StartReadThread err")
return False
if self.ProcessThread():
print ("ProcessThread")
else:
self.StopThread()
print ("ProcessThread err")
return False
except Exception as se:
print(str(se))
return False
else:
print ("OpenSerialCOM err")
return False
return True
def maintest():
senddata=b'12345678901234567890'
com = CUartThread(Port=b'/dev/ttyAMA3')
if com.RunAllStart():
while True:
t=com.SendUart(senddata)#(b'12345678901234567890')
print('send:{0}:{1}'.format(senddata,t))
#print(t)
time.sleep(1)
ret=com.OpenUart()
print('open com : {0}'.format(ret))
if ret>-1:
ret=com.SendUart(b'1234567890')
print(ret)
if ret>0:
while 1:
ddd=com.RecvUart()
print(ddd)
if ddd:
com.CloseUart()
break
if __name__ == '__main__':
maintest()
|
[
"tong5188@163.com"
] |
tong5188@163.com
|
1f7324a86ce39d75b5b0bcdb74486a69fd5f8d37
|
5143a5f4ded3194ca5118bc1608504b19856b38c
|
/game_server/tests/test_board.py
|
c29176b430408bb2f60886387a1bf82b6c4e2342
|
[] |
no_license
|
Kieran7741/connect_5
|
cb7725a0a428a98b9f240b71e1042fc26b5bf200
|
0f79560b55fe305d0389add4a893de3eb71542c1
|
refs/heads/master
| 2022-12-12T10:44:23.576458
| 2020-01-10T09:25:04
| 2020-01-10T09:25:04
| 224,888,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,215
|
py
|
from game_server.game_session import Board
import unittest
from unittest.mock import patch
class TestBoard(unittest.TestCase):
def setUp(self):
self.board = Board(['X', 'O'])
def test_drop_disk_successful(self):
self.board.drop_disc(1, 'X')
self.assertTrue('X' in self.board.board_matrix[1])
def test_drop_disk__invalid_disk(self):
with self.assertRaises(Exception) as e:
self.board.drop_disc(1, 'Y')
self.assertEqual('Invalid disc: Y', str(e.exception))
def test_drop_disk__no_space_left_in_column(self):
self.board.board_matrix[1] = [''] # This represents a full column
with self.assertRaises(Exception) as e:
self.board.drop_disc(1, 'X')
self.assertEqual('No space left in column: 1', str(e.exception))
def test_drop_disk__invalid_column(self):
with self.assertRaises(Exception) as e:
self.board.drop_disc(20, 'X')
self.assertEqual('Invalid column: 20', str(e.exception))
@patch('game_server.game_session.Board.check_rows_and_cols', return_value='X')
def test_check_for_winner__winner_found(self, _):
self.assertEqual('X', self.board.check_for_winner())
@patch('game_server.game_session.Board.check_rows_and_cols', return_value=None)
@patch('game_server.game_session.Board.check_diagonals', return_value=None)
def test_check_for_winner__no_winner_found(self, *_):
self.assertIsNone(self.board.check_for_winner())
def test_check_diagonals__no_winner(self):
self.assertIsNone(self.board.check_diagonals())
@patch('builtins.print')
def test_check_diagonals__winner(self, _):
self.board.board_matrix[0][0] = 'X'
self.board.board_matrix[1][1] = 'X'
self.board.board_matrix[2][2] = 'X'
self.board.board_matrix[3][3] = 'X'
self.board.board_matrix[4][4] = 'X'
self.assertEqual('X', self.board.check_diagonals())
def test_check_row_and_col__no_winner(self):
self.assertIsNone(self.board.check_rows_and_cols())
@patch('builtins.print')
def test_check_row_and_col__row_winner(self, mock_print):
self.board.board_matrix[0][0] = 'X'
self.board.board_matrix[1][0] = 'X'
self.board.board_matrix[2][0] = 'X'
self.board.board_matrix[3][0] = 'X'
self.board.board_matrix[4][0] = 'X'
self.assertEqual('X', self.board.check_rows_and_cols())
mock_print.assert_called_once_with('Winner by connecting a row')
@patch('builtins.print')
def test_check_row_and_col__col_winner(self, mock_print):
self.board.board_matrix[2] = ['X', 'X', 'X', 'X', 'X', '_']
self.assertEqual('X', self.board.check_rows_and_cols())
mock_print.assert_called_once_with('Winner by connecting a column')
def test_check_if_line_has_five_in_a_row__winner(self):
self.assertEqual('X', self.board.check_if_line_has_five_in_a_row(['X', 'X', 'X', 'X', 'X', '_']))
def test_check_if_line_has_five_in_a_row__no_winner(self):
self.assertIsNone(self.board.check_if_line_has_five_in_a_row(['X', 'X', '_', '_', '_', '_']))
if __name__ == '__main__':
unittest.main()
|
[
"kieran7741@gmail.com"
] |
kieran7741@gmail.com
|
4f0a82705b4090c087f38ba7d91a314c6ac954a0
|
53a67e07e13441568d087537ee4fe95a79eb0d3a
|
/lib/loops.py
|
1695dc1c19d1b6365a4ba1847cfc462dfdecfcbc
|
[] |
no_license
|
ondigitalDesarrollo/curso-python
|
eeb5e6b2b980f64807e68aa0ffcbb7415c4e7dc2
|
00f8efaefab7b841997fa15d34f8322a5d43f4ee
|
refs/heads/master
| 2021-01-23T07:09:08.130849
| 2017-09-20T21:30:58
| 2017-09-20T21:30:58
| 102,498,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
# -*- coding: utf-8 -*-
i = 10
while i > 0:
print('Ando en un loop ${}'.format(i))
i -= 1
|
[
"ondigital.desarrollo@gmail.com"
] |
ondigital.desarrollo@gmail.com
|
fa9c7503fbeacbfa7b221a7c6b4459b5273c1287
|
8a42e8ef22dd15a62cd407910de96b0873fe5252
|
/schedule/migrations/0012_auto_20190621_1620.py
|
1d90630aca8b34e57f4a50955096f5dfe33c4dbb
|
[] |
no_license
|
Vini-S/Fintek_Project
|
9293300c798cb5e9f9b84d34972392b411849320
|
406b939832f4a3f03ff8645500502a98c4d7ca75
|
refs/heads/master
| 2020-06-19T11:31:37.413669
| 2019-07-13T08:16:41
| 2019-07-13T08:16:41
| 196,693,104
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
# Generated by Django 2.2.2 on 2019-06-21 10:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('schedule', '0011_remove_chapters_f_id'),
]
operations = [
migrations.RemoveField(
model_name='course',
name='m_id',
),
migrations.AddField(
model_name='chapters',
name='c_code',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='schedule.Course'),
),
]
|
[
"noreply@github.com"
] |
Vini-S.noreply@github.com
|
21bb0d135f78036e6a436bd9570a5d12ad034bba
|
21f98d8bb31264c94e7a98fb8eb806d7f5bd396e
|
/408. Valid Word Abbreviation.py
|
a3e49945b9f4eea46fb3a81b6e38ddb114650e08
|
[] |
no_license
|
mcfair/Algo
|
e1500d862a685e598ab85e8ed5b68170632fdfd0
|
051e2a9f6c918907cc8b665353c46042e7674e66
|
refs/heads/master
| 2021-10-31T01:25:46.917428
| 2021-10-08T04:52:44
| 2021-10-08T04:52:44
| 141,460,584
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
#Easy
class Solution(object):
def validWordAbbreviation(self, word, abbr):
"""
:type word: str
:type abbr: str
:rtype: bool
"""
i = j = 0
m, n = len(word), len(abbr)
while i < m and j < n:
if word[i] == abbr[j]:
i, j = i+1, j+1
elif abbr[j] == "0":
return False
elif abbr[j].isnumeric():
k = j
while k < n and abbr[k].isnumeric():
k += 1
i += int(abbr[j:k])
j = k
else:
return False
return i == m and j == n
|
[
"noreply@github.com"
] |
mcfair.noreply@github.com
|
c2553f1bd1117197176b028bd068473aad43ce05
|
a0f0e97a2e666ad08aaf3be534f91941810da66e
|
/results_processing_filter_vector.py
|
188fbf2301c5cfc19d41ac033060b80755cf4a09
|
[] |
no_license
|
DinosaurInSpace/MS2_fragments_in_imaging_MS
|
26023a78e194826d58bba64c27493be6356ad56e
|
5b1d55c361b312e0e34738a3e1d468edb8aae214
|
refs/heads/master
| 2022-11-11T03:26:21.212865
| 2020-06-25T16:16:45
| 2020-06-25T16:16:45
| 256,277,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,558
|
py
|
#!/usr/bin/env python
"""
To write!
"""
import pandas as pd
import numpy as np
import argparse
__author__ = "Christopher M Baxter Rath"
__copyright__ = "Copyright 2020"
__credits__ = ["Christopher M Baxter Rath"]
__license__ = "GPL"
__version__ = "0.1"
__maintainer__ = "Christopher M Baxter Rath"
__email__ = "chrisrath@gmail.com"
__status__ = "Development"
def filter_me(df, f):
# Filters input df based on inputs
if f.analyzer == 'all':
inst = ['FTICR', 'Orbitrap']
else:
inst = [f.analyzer]
groups = None
if f.group != 'all':
if type(f.group) == str:
groups = [f.group]
else:
groups = f.group
# expt_type
if f.expt_type == 'all':
if f.group == 'all':
return df[(df.polarity == f.polarity) &
(df.analyzer.isin(inst))]
elif f.group != 'all':
return df[(df.polarity == f.polarity) &
(df.analyzer.isin(inst)) &
(df.group.isin(groups))]
else:
if f.group == 'all':
return df[(df.polarity == f.polarity) &
(df.analyzer.isin(inst)) &
(df.expt_type == f.expt_type)]
elif f.group != 'all':
return df[(df.polarity == f.polarity) &
(df.analyzer.isin(inst)) &
(df.group.isin(groups)) &
(df.expt_type == f.expt_type)]
def frag_per_par(df, result_type):
# Calculates fragments per parent
df = df[['id_x', 'ds_id', 'par_formula', 'cos']].copy(deep=True)
df['cos'] = 1
df = df.groupby(['id_x', 'ds_id', 'par_formula']).sum().reset_index()
if result_type == 'avg':
return df.cos.mean()
else:
return df.cos.std()
def find_top_10_frags(df):
# Calculates top-10 most abundant fragments present at least once
df = df[['id_x', 'par_formula', 'formula', 'par_frag']].copy(deep=True)
df['n'] = 1
df = df.groupby(['id_x', 'par_formula', 'formula', 'par_frag']).sum().reset_index()
df.sort_values(by=['n'], inplace=True, ascending=False)
df = df[df.n > 1].copy(deep=True)
if df.shape[0] > 10:
df = df.iloc[0:10, :].copy(deep=True)
else:
df = df.iloc[0:df.shape[0], :].copy(deep=True)
df['out'] = df['id_x'] + "_" + df['par_frag'] + "_" + df['formula'] + '_' + df['n'].astype(str)
return list(df.out)
def score_filtered(f, df):
# Scores results on a per filter level, and exports dict.
df1 = df[['id_x', 'ds_id', 'ion_mass', 'cos', 'db_n_isobar',
'ds_n_isobar', 'par_frag', 'par_or_frag', 'formula',
'par_formula']].copy(deep=True)
df = df1.copy(deep=True)
n_ds_id = float(len(df.ds_id.unique()))
f['n_ds_id'] = n_ds_id
# Divide by zero issues below with empty df
if n_ds_id == 0:
return f
# Parent results
f['n_par'] = df[df.par_or_frag == 'P'].shape[0] / n_ds_id
# Fragment results
df = df[df.par_or_frag == 'F']
f['cos_avg'] = df.cos.mean()
f['cos_std'] = df.cos.std()
f['n_frag_00'] = df[df.cos >= 0.00].shape[0] / n_ds_id
f['n_frag_50'] = df[df.cos >= 0.50].shape[0] / n_ds_id
f['n_frag_75'] = df[df.cos >= 0.75].shape[0] / n_ds_id
f['n_frag_90'] = df[df.cos >= 0.90].shape[0] / n_ds_id
f['f_per_p_avg_00'] = frag_per_par(df[df.cos >= 0.00], 'avg')
f['f_per_p_std_00'] = frag_per_par(df[df.cos >= 0.00], 'stdev')
f['f_per_p_avg_50'] = frag_per_par(df[df.cos >= 0.50], 'avg')
f['f_per_p_std_50'] = frag_per_par(df[df.cos >= 0.50], 'stdev')
f['f_per_p_avg_75'] = frag_per_par(df[df.cos >= 0.75], 'avg')
f['f_per_p_std_75'] = frag_per_par(df[df.cos >= 0.75], 'stdev')
f['f_per_p_avg_90'] = frag_per_par(df[df.cos >= 0.90], 'avg')
f['f_per_p_std_90'] = frag_per_par(df[df.cos >= 0.90], 'stdev')
# Unique and 1 isobar results
df = df1[df1.par_or_frag == 'F'].copy(deep=True)
df = df[df.db_n_isobar == 0]
f['n_u_db_frag_00'] = df[df.cos >= 0.00].shape[0] / n_ds_id
f['n_u_db_frag_50'] = df[df.cos >= 0.50].shape[0] / n_ds_id
f['n_u_db_frag_75'] = df[df.cos >= 0.75].shape[0] / n_ds_id
f['n_u_db_frag_90'] = df[df.cos >= 0.90].shape[0] / n_ds_id
df = df1[df1.par_or_frag == 'F'].copy(deep=True)
df = df[df.db_n_isobar == 1]
f['n_1_db_frag_00'] = df[df.cos >= 0.00].shape[0] / n_ds_id
f['n_1_db_frag_50'] = df[df.cos >= 0.50].shape[0] / n_ds_id
f['n_1_db_frag_75'] = df[df.cos >= 0.75].shape[0] / n_ds_id
f['n_1_db_frag_90'] = df[df.cos >= 0.90].shape[0] / n_ds_id
df = df1[df1.par_or_frag == 'F'].copy(deep=True)
df = df[df.ds_n_isobar == 0]
f['n_u_ds_frag_00'] = df[df.cos >= 0.00].shape[0] / n_ds_id
f['n_u_ds_frag_50'] = df[df.cos >= 0.50].shape[0] / n_ds_id
f['n_u_ds_frag_75'] = df[df.cos >= 0.75].shape[0] / n_ds_id
f['n_u_ds_frag_90'] = df[df.cos >= 0.90].shape[0] / n_ds_id
df = df1[df1.par_or_frag == 'F'].copy(deep=True)
df = df[df.ds_n_isobar == 1]
f['n_1_ds_frag_00'] = df[df.cos >= 0.00].shape[0] / n_ds_id
f['n_1_ds_frag_50'] = df[df.cos >= 0.50].shape[0] / n_ds_id
f['n_1_ds_frag_75'] = df[df.cos >= 0.75].shape[0] / n_ds_id
f['n_1_ds_frag_90'] = df[df.cos >= 0.90].shape[0] / n_ds_id
# Finds top-10 fragments by count present at least once
df = df1[df1.par_or_frag == 'F'].copy(deep=True)
f['top_10'] = find_top_10_frags(df)
return f
def generate_vector(df, counter):
# Generates a vector for analyzing result per Theo's request.
vector = ['cos', 'db_n_isobar_par', 'db_n_isobar_frag',
'ds_n_isobar_frag']
metadata = ['ds_id', 'id_x', 'formula', 'par_frag', 'polarity',
'analyzer', 'group', 'expt_type', 'filter']
cols = vector + metadata
p_df = df[df.par_or_frag == 'P'].copy(deep=True)
p_df['db_n_isobar_par'] = df['db_n_isobar']
p_df = p_df[['id_x', 'db_n_isobar_par']]
f_df = df[df.par_or_frag == 'F'].copy(deep=True)
f_df['db_n_isobar_frag'] = f_df['db_n_isobar']
f_df['ds_n_isobar_frag'] = f_df['ds_n_isobar']
df = pd.merge(f_df, p_df, how='left', on='id_x')
df['filter'] = counter
return df[cols].copy(deep=True)
def generate_results(filter_df, scored_df):
# Loops through filters to analyze results
m = filter_df.shape[0]
counter = 0
out_list = []
vect_list = []
while counter < m:
print(counter)
# Filter level results
f = filter_df.iloc[counter, :]
df = filter_me(scored_df, f)
f_dict = score_filtered(dict(f), df)
out_list.append(f_dict)
# Annotated vectors
vect = generate_vector(df, counter)
vect_list.append(vect)
counter += 1
return out_list, vect_list
def main():
# Main captures input variables when called as command line script.
# Not yet updated!
parser = argparse.ArgumentParser(description='')
parser.add_argument("--limit_list",
default=[],
type=list,
help="List of ids to limit results to, e.g. MS1 hits")
parser.add_argument("--out_name",
default=None,
type=str,
help="Name of output METASPACE db")
args = parser.parse_args()
generate_results(args.limit_list,
args.out_name,
)
print('Sirius spectra with formulas were converted to METASPACE MS1 and MS2 db')
return 1
if __name__ == "__main__":
main()
|
[
"chrisrath@gmail.com"
] |
chrisrath@gmail.com
|
23ce27436597600f2adac0dafef4003a390aea37
|
809a49e2bcc0870012d5e84fd9e8f454d3adbc41
|
/batchprocessing/models.py
|
c92f1eb71d3a3281e8ddd80251c991391559ab24
|
[] |
no_license
|
AYUSHOPSHARMA/wealthadvisory
|
c16ee92c40b1817ce62f4a8b8345a94f62d4d069
|
9356c5e433f6346e1028260dd337fd303fd6a9ca
|
refs/heads/master
| 2020-03-17T21:19:31.892921
| 2018-06-24T17:05:42
| 2018-06-24T17:05:42
| 133,953,758
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 44,318
|
py
|
from django.db import models
# Create your models here.
from mongoengine import *
import datetime
connect('wealth_management_indices')
class nifty_500_companies(Document):
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Symbol = StringField(max_length=100,required=True)
Series = StringField(max_length=100,required=True)
ISIN_Code = StringField(max_length=100,required=False)
class nifty_200_companies(Document):
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Symbol = StringField(max_length=100,required=True)
Series = StringField(max_length=100,required=True)
ISIN_Code = StringField(max_length=100,required=False)
class nifty_100_companies(Document):
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Symbol = StringField(max_length=100,required=True)
Series = StringField(max_length=100,required=True)
ISIN_Code = StringField(max_length=100,required=False)
class nifty_50_companies(Document):
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Symbol = StringField(max_length=100,required=True)
Series = StringField(max_length=100,required=True)
ISIN_Code = StringField(max_length=100,required=False)
class nifty_500_companies_fundamental_data(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker=StringField(max_length=100,required=False)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Market_Cap= StringField(max_length=100,required=True)
Enterprise_Value= StringField(max_length=100,required=True)
Trailing_P_E= StringField(required=True)
Forward_P_E= StringField(required=True)
PEG_Ratio= StringField(required=True)
Price_Sales= StringField(required=True)
Price_Book= StringField(required=True)
Enterprise_Value_Revenue= StringField(required=True)
Enterprise_Value_EBITDA= StringField(required=True)
Fiscal_Year_Ends= StringField(default=datetime.datetime.utcnow)
Most_Recent_Quarter= StringField(default=datetime.datetime.utcnow)
Profit_Margin= StringField(required=True)
Operating_Margin= StringField(required=True)
Return_on_Assets= StringField(required=True)
Return_on_Equity= StringField(required=True)
Revenue= StringField(max_length=100,required=True)
Revenue_Per_Share= StringField(required=True)
Quarterly_Revenue_Growth= StringField(required=True)
Gross_Profit= StringField(max_length=100,required=True)
EBITDA= StringField(max_length=100,required=True)
Net_Income_Avi_to_Common= StringField(max_length=100,required=True)
Diluted_EPS= StringField(required=True)
Quarterly_Earnings_Growth= StringField(required=True)
Total_Cash= StringField(max_length=100,required=True)
Total_Cash_Per_Share= StringField(required=True)
Total_Debt= StringField(max_length=100,required=True)
Total_Debt_Equity= StringField(required=True)
Current_Ratio= StringField(required=True)
Book_Value_Per_Share= StringField(required=True)
Operating_Cash_Flow= StringField(max_length=100,required=True)
Levered_Free_Cash_Flow= StringField(max_length=100,required=True)
Beta= StringField(required=True)
Week_52_Change= StringField(required=True)
SP500_52_Week_Change= StringField(required=True)
Week_52_High= StringField(required=True)
Week_52_Low= StringField(required=True)
Day_50_Moving_Average= StringField(required=True)
Day_200_Moving_Average= StringField(required=True)
Avg_Vol_3_month= StringField(max_length=100,required=True)
Avg_Vol_10_day= StringField(max_length=100,required=True)
Shares_Outstanding= StringField(max_length=100,required=True)
Float= StringField(max_length=100,required=True)
Held_by_Insiders= StringField(required=True)
Held_by_Institutions= StringField(required=True)
Shares_Short= StringField(max_length=100,required=True)
Short_Ratio= StringField(required=True)
Short_of_Float= StringField(required=True)
Shares_Short_prior_month= StringField(max_length=100,required=True)
Forward_Annual_Dividend_Rate= StringField(required=True)
Forward_Annual_Dividend_Yield= StringField(required=True)
Trailing_Annual_Dividend_Yield= StringField(required=True)
Trailing_Annual_Dividend_Rate= StringField(required=True)
Year_5_Average_Dividend_Yield= StringField(required=True)
Payout_Ratio= StringField(required=True)
Dividend_Date= StringField(default=datetime.datetime.utcnow)
Ex_Dividend_Date= StringField(default=datetime.datetime.utcnow)
Last_Split_Factor_new_per_old= StringField(max_length=100,required=True)
Last_Split_Date= StringField(default=datetime.datetime.utcnow)
class nifty_200_companies_fundamental_data(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=False)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Market_Cap= StringField(max_length=100,required=True)
Enterprise_Value= StringField(max_length=100,required=True)
Trailing_P_E= StringField(required=True)
Forward_P_E= StringField(required=True)
PEG_Ratio= StringField(required=True)
Price_Sales= StringField(required=True)
Price_Book= StringField(required=True)
Enterprise_Value_Revenue= StringField(required=True)
Enterprise_Value_EBITDA= StringField(required=True)
Fiscal_Year_Ends= StringField(default=datetime.datetime.utcnow)
Most_Recent_Quarter= StringField(default=datetime.datetime.utcnow)
Profit_Margin= StringField(required=True)
Operating_Margin= StringField(required=True)
Return_on_Assets= StringField(required=True)
Return_on_Equity= StringField(required=True)
Revenue= StringField(max_length=100,required=True)
Revenue_Per_Share= StringField(required=True)
Quarterly_Revenue_Growth= StringField(required=True)
Gross_Profit= StringField(max_length=100,required=True)
EBITDA= StringField(max_length=100,required=True)
Net_Income_Avi_to_Common= StringField(max_length=100,required=True)
Diluted_EPS= StringField(required=True)
Quarterly_Earnings_Growth= StringField(required=True)
Total_Cash= StringField(max_length=100,required=True)
Total_Cash_Per_Share= StringField(required=True)
Total_Debt= StringField(max_length=100,required=True)
Total_Debt_Equity= StringField(required=True)
Current_Ratio= StringField(required=True)
Book_Value_Per_Share= StringField(required=True)
Operating_Cash_Flow= StringField(max_length=100,required=True)
Levered_Free_Cash_Flow= StringField(max_length=100,required=True)
Beta= StringField(required=True)
Week_52_Change= StringField(required=True)
SP500_52_Week_Change= StringField(required=True)
Week_52_High= StringField(required=True)
Week_52_Low= StringField(required=True)
Day_50_Moving_Average= StringField(required=True)
Day_200_Moving_Average= StringField(required=True)
Avg_Vol_3_month= StringField(max_length=100,required=True)
Avg_Vol_10_day= StringField(max_length=100,required=True)
Shares_Outstanding= StringField(max_length=100,required=True)
Float= StringField(max_length=100,required=True)
Held_by_Insiders= StringField(required=True)
Held_by_Institutions= StringField(required=True)
Shares_Short= StringField(max_length=100,required=True)
Short_Ratio= StringField(required=True)
Short_of_Float= StringField(required=True)
Shares_Short_prior_month= StringField(max_length=100,required=True)
Forward_Annual_Dividend_Rate= StringField(required=True)
Forward_Annual_Dividend_Yield= StringField(required=True)
Trailing_Annual_Dividend_Yield= StringField(required=True)
Trailing_Annual_Dividend_Rate= StringField(required=True)
Year_5_Average_Dividend_Yield= StringField(required=True)
Payout_Ratio= StringField(required=True)
Dividend_Date= StringField(default=datetime.datetime.utcnow)
Ex_Dividend_Date= StringField(default=datetime.datetime.utcnow)
Last_Split_Factor_new_per_old= StringField(max_length=100,required=True)
Last_Split_Date= StringField(default=datetime.datetime.utcnow)
class nifty_100_companies_fundamental_data(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=False)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Market_Cap= StringField(max_length=100,required=True)
Enterprise_Value= StringField(max_length=100,required=True)
Trailing_P_E= StringField(required=True)
Forward_P_E= StringField(required=True)
PEG_Ratio= StringField(required=True)
Price_Sales= StringField(required=True)
Price_Book= StringField(required=True)
Enterprise_Value_Revenue= StringField(required=True)
Enterprise_Value_EBITDA= StringField(required=True)
Fiscal_Year_Ends= StringField(default=datetime.datetime.utcnow)
Most_Recent_Quarter= StringField(default=datetime.datetime.utcnow)
Profit_Margin= StringField(required=True)
Operating_Margin= StringField(required=True)
Return_on_Assets= StringField(required=True)
Return_on_Equity= StringField(required=True)
Revenue= StringField(max_length=100,required=True)
Revenue_Per_Share= StringField(required=True)
Quarterly_Revenue_Growth= StringField(required=True)
Gross_Profit= StringField(max_length=100,required=True)
EBITDA= StringField(max_length=100,required=True)
Net_Income_Avi_to_Common= StringField(max_length=100,required=True)
Diluted_EPS= StringField(required=True)
Quarterly_Earnings_Growth= StringField(required=True)
Total_Cash= StringField(max_length=100,required=True)
Total_Cash_Per_Share= StringField(required=True)
Total_Debt= StringField(max_length=100,required=True)
Total_Debt_Equity= StringField(required=True)
Current_Ratio= StringField(required=True)
Book_Value_Per_Share= StringField(required=True)
Operating_Cash_Flow= StringField(max_length=100,required=True)
Levered_Free_Cash_Flow= StringField(max_length=100,required=True)
Beta= StringField(required=True)
Week_52_Change= StringField(required=True)
SP500_52_Week_Change= StringField(required=True)
Week_52_High= StringField(required=True)
Week_52_Low= StringField(required=True)
Day_50_Moving_Average= StringField(required=True)
Day_200_Moving_Average= StringField(required=True)
Avg_Vol_3_month= StringField(max_length=100,required=True)
Avg_Vol_10_day= StringField(max_length=100,required=True)
Shares_Outstanding= StringField(max_length=100,required=True)
Float= StringField(max_length=100,required=True)
Held_by_Insiders= StringField(required=True)
Held_by_Institutions= StringField(required=True)
Shares_Short= StringField(max_length=100,required=True)
Short_Ratio= StringField(required=True)
Short_of_Float= StringField(required=True)
Shares_Short_prior_month= StringField(max_length=100,required=True)
Forward_Annual_Dividend_Rate= StringField(required=True)
Forward_Annual_Dividend_Yield= StringField(required=True)
Trailing_Annual_Dividend_Yield= StringField(required=True)
Trailing_Annual_Dividend_Rate= StringField(required=True)
Year_5_Average_Dividend_Yield= StringField(required=True)
Payout_Ratio= StringField(required=True)
Dividend_Date= StringField(default=datetime.datetime.utcnow)
Ex_Dividend_Date= StringField(default=datetime.datetime.utcnow)
Last_Split_Factor_new_per_old= StringField(max_length=100,required=True)
Last_Split_Date= StringField(default=datetime.datetime.utcnow)
class nifty_50_companies_fundamental_data(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=False)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Market_Cap= StringField(max_length=100,required=True)
Enterprise_Value= StringField(max_length=100,required=True)
Trailing_P_E= StringField(required=True)
Forward_P_E= StringField(required=True)
PEG_Ratio= StringField(required=True)
Price_Sales= StringField(required=True)
Price_Book= StringField(required=True)
Enterprise_Value_Revenue= StringField(required=True)
Enterprise_Value_EBITDA= StringField(required=True)
Fiscal_Year_Ends= StringField(default=datetime.datetime.utcnow)
Most_Recent_Quarter= StringField(default=datetime.datetime.utcnow)
Profit_Margin= StringField(required=True)
Operating_Margin= StringField(required=True)
Return_on_Assets= StringField(required=True)
Return_on_Equity= StringField(required=True)
Revenue= StringField(max_length=100,required=True)
Revenue_Per_Share= StringField(required=True)
Quarterly_Revenue_Growth= StringField(required=True)
Gross_Profit= StringField(max_length=100,required=True)
EBITDA= StringField(max_length=100,required=True)
Net_Income_Avi_to_Common= StringField(max_length=100,required=True)
Diluted_EPS= StringField(required=True)
Quarterly_Earnings_Growth= StringField(required=True)
Total_Cash= StringField(max_length=100,required=True)
Total_Cash_Per_Share= StringField(required=True)
Total_Debt= StringField(max_length=100,required=True)
Total_Debt_Equity= StringField(required=True)
Current_Ratio= StringField(required=True)
Book_Value_Per_Share= StringField(required=True)
Operating_Cash_Flow= StringField(max_length=100,required=True)
Levered_Free_Cash_Flow= StringField(max_length=100,required=True)
Beta= StringField(required=True)
Week_52_Change= StringField(required=True)
SP500_52_Week_Change= StringField(required=True)
Week_52_High= StringField(required=True)
Week_52_Low= StringField(required=True)
Day_50_Moving_Average= StringField(required=True)
Day_200_Moving_Average= StringField(required=True)
Avg_Vol_3_month= StringField(max_length=100,required=True)
Avg_Vol_10_day= StringField(max_length=100,required=True)
Shares_Outstanding= StringField(max_length=100,required=True)
Float= StringField(max_length=100,required=True)
Held_by_Insiders= StringField(required=True)
Held_by_Institutions= StringField(required=True)
Shares_Short= StringField(max_length=100,required=True)
Short_Ratio= StringField(required=True)
Short_of_Float= StringField(required=True)
Shares_Short_prior_month= StringField(max_length=100,required=True)
Forward_Annual_Dividend_Rate= StringField(required=True)
Forward_Annual_Dividend_Yield= StringField(required=True)
Trailing_Annual_Dividend_Yield= StringField(required=True)
Trailing_Annual_Dividend_Rate= StringField(required=True)
Year_5_Average_Dividend_Yield= StringField(required=True)
Payout_Ratio= StringField(required=True)
Dividend_Date= StringField(default=datetime.datetime.utcnow)
Ex_Dividend_Date= StringField(default=datetime.datetime.utcnow)
Last_Split_Factor_new_per_old= StringField(max_length=100,required=True)
Last_Split_Date= StringField(default=datetime.datetime.utcnow)
class nifty_50_fundamental_data_old(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=True)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=False)
Market_Cap= DecimalField(max_length=100,required=False)
Enterprise_Value= StringField(max_length=100,required=False)
Trailing_P_E= DecimalField(required=False)
Forward_P_E= DecimalField(required=False)
PEG_Ratio= DecimalField(required=False)
Price_Sales= DecimalField(required=False)
Price_Book= DecimalField(required=False)
Enterprise_Value_Revenue= DecimalField(required=False)
Enterprise_Value_EBITDA= DecimalField(required=False)
Fiscal_Year_Ends= DateTimeField(default=datetime.datetime.utcnow)
Most_Recent_Quarter= DateTimeField(default=datetime.datetime.utcnow)
Profit_Margin= DecimalField(required=False)
Operating_Margin= DecimalField(required=False)
Return_on_Assets= StringField(required=False)
Return_on_Equity= StringField(required=False)
Revenue= DecimalField(max_length=100,required=False)
Revenue_Per_Share= DecimalField(required=False)
Quarterly_Revenue_Growth= DecimalField(required=False)
Gross_Profit= DecimalField(max_length=100,required=False)
EBITDA= DecimalField(max_length=100,required=False)
Net_Income_Avi_to_Common= DecimalField(max_length=100,required=False)
Diluted_EPS= DecimalField(required=False)
Quarterly_Earnings_Growth= DecimalField(required=False)
Total_Cash= DecimalField(max_length=100,required=False)
Total_Cash_Per_Share= DecimalField(required=False)
Total_Debt= DecimalField(max_length=100,required=False)
Total_Debt_Equity= DecimalField(required=False)
Current_Ratio= StringField(required=False)
Book_Value_Per_Share= DecimalField(required=False)
Operating_Cash_Flow= StringField(max_length=100,required=False)
Levered_Free_Cash_Flow= StringField(max_length=100,required=False)
Beta= DecimalField(required=False)
Week_52_Change= DecimalField(required=False)
SP500_52_Week_Change= DecimalField(required=False)
Week_52_High= DecimalField(required=False)
Week_52_Low= DecimalField(required=False)
Day_50_Moving_Average= DecimalField(required=False)
Day_200_Moving_Average= DecimalField(required=False)
Avg_Vol_3_month= DecimalField(max_length=100,required=False)
Avg_Vol_10_day= DecimalField(max_length=100,required=False)
Shares_Outstanding= DecimalField(max_length=100,required=False)
Float= DecimalField(max_length=100,required=False)
Held_by_Insiders= StringField(required=False)
Held_by_Institutions= StringField(required=False)
Shares_Short= StringField(max_length=100,required=False)
Short_Ratio= StringField(required=False)
Short_of_Float= StringField(required=False)
Shares_Short_prior_month= StringField(max_length=100,required=False)
Forward_Annual_Dividend_Rate= DecimalField(required=False)
Forward_Annual_Dividend_Yield= DecimalField(required=False)
Trailing_Annual_Dividend_Yield= DecimalField(required=False)
Trailing_Annual_Dividend_Rate= DecimalField(required=False)
Year_5_Average_Dividend_Yield= DecimalField(required=False)
Payout_Ratio= DecimalField(required=False)
Dividend_Date= DateTimeField(default=datetime.datetime.utcnow)
Ex_Dividend_Date= DateTimeField(default=datetime.datetime.utcnow)
Last_Split_Factor_new_per_old= StringField(max_length=100,required=False)
Last_Split_Date= DateTimeField(default=datetime.datetime.utcnow)
class nift50Indices(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=False)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Open = DecimalField(max_length=100,required=True)
High = DecimalField(max_length=100,required=True)
Low = DecimalField(max_length=100,required=True)
Close =DecimalField(max_length=100,required=True)
Adj_Close =DecimalField(required=False)
Volume = LongField(required=False)
class nift100Indices(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=False)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Open = DecimalField(max_length=100,required=True)
High = DecimalField(max_length=100,required=True)
Low = DecimalField(max_length=100,required=True)
Close =DecimalField(max_length=100,required=True)
Adj_Close =DecimalField(required=False)
Volume = LongField(required=False)
class nift200Indices(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=False)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Open = DecimalField(max_length=100,required=True)
High = DecimalField(max_length=100,required=True)
Low = DecimalField(max_length=100,required=True)
Close =DecimalField(max_length=100,required=True)
Adj_Close =DecimalField(required=False)
Volume = LongField(required=False)
class nift500Indices(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=False)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Open = DecimalField(max_length=100,required=True)
High = DecimalField(max_length=100,required=True)
Low = DecimalField(max_length=100,required=True)
Close =DecimalField(max_length=100,required=True)
Adj_Close =DecimalField(required=False)
Volume = LongField(required=False)
class nift500Indicesdata(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=False)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Open = DecimalField(max_length=100,required=True)
High = DecimalField(max_length=100,required=True)
Low = DecimalField(max_length=100,required=True)
Close =DecimalField(max_length=100,required=True)
Adj_Close =DecimalField(required=False)
Volume = LongField(required=False)
class portfolio(Document):
Portfolio_Name=StringField(max_length=100,required=True)
Company_Type=StringField(max_length=100,required=False)
Ticker_List =ListField(StringField())
Trailing_P_E=StringField(max_length=100,required=False)
Forward_P_E=StringField(max_length=100,required=False)
Beta=StringField(max_length=100,required=False)
PEG=StringField(max_length=100,required=False)
PS=StringField(max_length=100,required=False)
PB=StringField(max_length=100,required=False)
Price_Cash=StringField(max_length=100,required=False)
Ticker=StringField(max_length=100,required=False)
Price_Free_Cash_Flow=StringField(max_length=100,required=False)
EPS_growth_this_year=StringField(max_length=100,required=False)
Return_on_Assets=StringField(max_length=100,required=False)
Return_on_Equity=StringField(max_length=100,required=False)
Current_Ratio=StringField(max_length=100,required=False)
Quick_Ratio=StringField(max_length=100,required=False)
Lt_Debt_Equity=StringField(max_length=100,required=False)
Debt_Equity=StringField(max_length=100,required=False)
Gross_Margin=StringField(max_length=100,required=False)
Net_Profit_Margin=StringField(max_length=100,required=False)
Payout_Ratio=StringField(max_length=100,required=False)
Insider_Ownership=StringField(max_length=100,required=False)
Institutional_Ownership=StringField(max_length=100,required=False)
class portfolioDetail(Document):
Portfolio_Name= StringField(max_length=100,required=True)
Company_Type= StringField(max_length=100,required=False)
Ticker_List = ListField(StringField())
correlationData= StringField(required=False)
heatMapData= StringField(required=False)
riskandreturnData= StringField(required=False)
violationData= StringField(required=False)
minvariance= StringField(required=False)
callmapData= StringField(required=False)
banchmarkData= StringField(required=False)
weightplotData= StringField(required=False)
Open = DecimalField(max_length=100,required=False)
High = DecimalField(max_length=100,required=False)
Low = DecimalField(max_length=100,required=False)
Close =DecimalField(max_length=100,required=False)
Adj_Close =DecimalField(required=False)
Volume = LongField(required=False)
Company_Name = StringField(max_length=100,required=False)
fundamentalDataList=ListField()
portfolioobj = portfolio()
class niftBanchMarkIndices(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=False)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=True)
Open = DecimalField(max_length=100,required=True)
High = DecimalField(max_length=100,required=True)
Low = DecimalField(max_length=100,required=True)
Close =DecimalField(max_length=100,required=True)
Adj_Close =DecimalField(required=False)
Volume = LongField(required=False)
class portfolioAsset(Document):
Portfolio_Name= StringField(max_length=100,required=True)
Company_Type= StringField(max_length=100,required=False)
Ticker_List = ListField(StringField())
TotalAsset = LongField(required=False)
UnrealizedPS = LongField(required=False)
class nifty_500_fundamental_data(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=True)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=False)
Market_Cap= DecimalField(max_length=100,required=False)
Enterprise_Value= StringField(max_length=100,required=False)
Trailing_P_E= DecimalField(required=False)
Forward_P_E= DecimalField(required=False)
PEG_Ratio= DecimalField(required=False)
Price_Sales= DecimalField(required=False)
Price_Book= DecimalField(required=False)
Enterprise_Value_Revenue= DecimalField(required=False)
Enterprise_Value_EBITDA= DecimalField(required=False)
Fiscal_Year_Ends= DateTimeField(default=datetime.datetime.utcnow)
Most_Recent_Quarter= DateTimeField(default=datetime.datetime.utcnow)
Profit_Margin= DecimalField(required=False)
Operating_Margin= DecimalField(required=False)
Return_on_Assets= DecimalField(required=False)
Return_on_Equity= DecimalField(required=False)
Revenue= DecimalField(max_length=100,required=False)
Revenue_Per_Share= DecimalField(required=False)
Quarterly_Revenue_Growth= DecimalField(required=False)
Gross_Profit= DecimalField(max_length=100,required=False)
EBITDA= DecimalField(max_length=100,required=False)
Net_Income_Avi_to_Common= DecimalField(max_length=100,required=False)
Diluted_EPS= DecimalField(required=False)
Quarterly_Earnings_Growth= DecimalField(required=False)
Total_Cash= DecimalField(max_length=100,required=False)
Total_Cash_Per_Share= DecimalField(required=False)
Total_Debt= DecimalField(max_length=100,required=False)
Total_Debt_Equity= DecimalField(required=False)
Current_Ratio= DecimalField(required=False)
Book_Value_Per_Share= DecimalField(required=False)
Operating_Cash_Flow= DecimalField(max_length=100,required=False)
Levered_Free_Cash_Flow= DecimalField(max_length=100,required=False)
Beta= DecimalField(required=False)
Week_52_Change= DecimalField(required=False)
SP500_52_Week_Change= DecimalField(required=False)
Week_52_High= DecimalField(required=False)
Week_52_Low= DecimalField(required=False)
Day_50_Moving_Average= DecimalField(required=False)
Day_200_Moving_Average= DecimalField(required=False)
Avg_Vol_3_month= DecimalField(max_length=100,required=False)
Avg_Vol_10_day= DecimalField(max_length=100,required=False)
Shares_Outstanding= DecimalField(max_length=100,required=False)
Float= DecimalField(max_length=100,required=False)
Held_by_Insiders= DecimalField(required=False)
Held_by_Institutions= DecimalField(required=False)
Shares_Short= DecimalField(max_length=100,required=False)
Short_Ratio= DecimalField(required=False)
Short_of_Float= DecimalField(required=False)
Shares_Short_prior_month= DecimalField(max_length=100,required=False)
Forward_Annual_Dividend_Rate= DecimalField(required=False)
Forward_Annual_Dividend_Yield= DecimalField(required=False)
Trailing_Annual_Dividend_Yield= DecimalField(required=False)
Trailing_Annual_Dividend_Rate= DecimalField(required=False)
Year_5_Average_Dividend_Yield= DecimalField(required=False)
Payout_Ratio= DecimalField(required=False)
Dividend_Date= DateTimeField(default=datetime.datetime.utcnow)
Ex_Dividend_Date= DateTimeField(default=datetime.datetime.utcnow)
Last_Split_Factor_new_per_old= StringField(max_length=100,required=False)
Last_Split_Date= DateTimeField(default=datetime.datetime.utcnow)
class nifty_200_fundamental_data(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=True)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=False)
Market_Cap= DecimalField(max_length=100,required=False)
Enterprise_Value= StringField(max_length=100,required=False)
Trailing_P_E= DecimalField(required=False)
Forward_P_E= DecimalField(required=False)
PEG_Ratio= DecimalField(required=False)
Price_Sales= DecimalField(required=False)
Price_Book= DecimalField(required=False)
Enterprise_Value_Revenue= DecimalField(required=False)
Enterprise_Value_EBITDA= DecimalField(required=False)
Fiscal_Year_Ends= DateTimeField(default=datetime.datetime.utcnow)
Most_Recent_Quarter= DateTimeField(default=datetime.datetime.utcnow)
Profit_Margin= DecimalField(required=False)
Operating_Margin= DecimalField(required=False)
Return_on_Assets= DecimalField(required=False)
Return_on_Equity= DecimalField(required=False)
Revenue= DecimalField(max_length=100,required=False)
Revenue_Per_Share= DecimalField(required=False)
Quarterly_Revenue_Growth= DecimalField(required=False)
Gross_Profit= DecimalField(max_length=100,required=False)
EBITDA= DecimalField(max_length=100,required=False)
Net_Income_Avi_to_Common= DecimalField(max_length=100,required=False)
Diluted_EPS= DecimalField(required=False)
Quarterly_Earnings_Growth= DecimalField(required=False)
Total_Cash= DecimalField(max_length=100,required=False)
Total_Cash_Per_Share= DecimalField(required=False)
Total_Debt= DecimalField(max_length=100,required=False)
Total_Debt_Equity= DecimalField(required=False)
Current_Ratio= DecimalField(required=False)
Book_Value_Per_Share= DecimalField(required=False)
Operating_Cash_Flow= DecimalField(max_length=100,required=False)
Levered_Free_Cash_Flow= DecimalField(max_length=100,required=False)
Beta= DecimalField(required=False)
Week_52_Change= DecimalField(required=False)
SP500_52_Week_Change= DecimalField(required=False)
Week_52_High= DecimalField(required=False)
Week_52_Low= DecimalField(required=False)
Day_50_Moving_Average= DecimalField(required=False)
Day_200_Moving_Average= DecimalField(required=False)
Avg_Vol_3_month= DecimalField(max_length=100,required=False)
Avg_Vol_10_day= DecimalField(max_length=100,required=False)
Shares_Outstanding= DecimalField(max_length=100,required=False)
Float= DecimalField(max_length=100,required=False)
Held_by_Insiders= DecimalField(required=False)
Held_by_Institutions= DecimalField(required=False)
Shares_Short= DecimalField(max_length=100,required=False)
Short_Ratio= DecimalField(required=False)
Short_of_Float= DecimalField(required=False)
Shares_Short_prior_month= DecimalField(max_length=100,required=False)
Forward_Annual_Dividend_Rate= DecimalField(required=False)
Forward_Annual_Dividend_Yield= DecimalField(required=False)
Trailing_Annual_Dividend_Yield= DecimalField(required=False)
Trailing_Annual_Dividend_Rate= DecimalField(required=False)
Year_5_Average_Dividend_Yield= DecimalField(required=False)
Payout_Ratio= DecimalField(required=False)
Dividend_Date= DateTimeField(default=datetime.datetime.utcnow)
Ex_Dividend_Date= DateTimeField(default=datetime.datetime.utcnow)
Last_Split_Factor_new_per_old= StringField(max_length=100,required=False)
Last_Split_Date= DateTimeField(default=datetime.datetime.utcnow)
class nifty_100_fundamental_data(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=True)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=False)
Market_Cap= DecimalField(max_length=100,required=False)
Enterprise_Value= StringField(max_length=100,required=False)
Trailing_P_E= DecimalField(required=False)
Forward_P_E= DecimalField(required=False)
PEG_Ratio= DecimalField(required=False)
Price_Sales= DecimalField(required=False)
Price_Book= DecimalField(required=False)
Enterprise_Value_Revenue= DecimalField(required=False)
Enterprise_Value_EBITDA= DecimalField(required=False)
Fiscal_Year_Ends= DateTimeField(default=datetime.datetime.utcnow)
Most_Recent_Quarter= DateTimeField(default=datetime.datetime.utcnow)
Profit_Margin= DecimalField(required=False)
Operating_Margin= DecimalField(required=False)
Return_on_Assets= DecimalField(required=False)
Return_on_Equity= DecimalField(required=False)
Revenue= DecimalField(max_length=100,required=False)
Revenue_Per_Share= DecimalField(required=False)
Quarterly_Revenue_Growth= DecimalField(required=False)
Gross_Profit= DecimalField(max_length=100,required=False)
EBITDA= DecimalField(max_length=100,required=False)
Net_Income_Avi_to_Common= DecimalField(max_length=100,required=False)
Diluted_EPS= DecimalField(required=False)
Quarterly_Earnings_Growth= DecimalField(required=False)
Total_Cash= DecimalField(max_length=100,required=False)
Total_Cash_Per_Share= DecimalField(required=False)
Total_Debt= DecimalField(max_length=100,required=False)
Total_Debt_Equity= DecimalField(required=False)
Current_Ratio= DecimalField(required=False)
Book_Value_Per_Share= DecimalField(required=False)
Operating_Cash_Flow= DecimalField(max_length=100,required=False)
Levered_Free_Cash_Flow= DecimalField(max_length=100,required=False)
Beta= DecimalField(required=False)
Week_52_Change= DecimalField(required=False)
SP500_52_Week_Change= DecimalField(required=False)
Week_52_High= DecimalField(required=False)
Week_52_Low= DecimalField(required=False)
Day_50_Moving_Average= DecimalField(required=False)
Day_200_Moving_Average= DecimalField(required=False)
Avg_Vol_3_month= DecimalField(max_length=100,required=False)
Avg_Vol_10_day= DecimalField(max_length=100,required=False)
Shares_Outstanding= DecimalField(max_length=100,required=False)
Float= DecimalField(max_length=100,required=False)
Held_by_Insiders= DecimalField(required=False)
Held_by_Institutions= DecimalField(required=False)
Shares_Short= DecimalField(max_length=100,required=False)
Short_Ratio= DecimalField(required=False)
Short_of_Float= DecimalField(required=False)
Shares_Short_prior_month= DecimalField(max_length=100,required=False)
Forward_Annual_Dividend_Rate= DecimalField(required=False)
Forward_Annual_Dividend_Yield= DecimalField(required=False)
Trailing_Annual_Dividend_Yield= DecimalField(required=False)
Trailing_Annual_Dividend_Rate= DecimalField(required=False)
Year_5_Average_Dividend_Yield= DecimalField(required=False)
Payout_Ratio= DecimalField(required=False)
Dividend_Date= DateTimeField(default=datetime.datetime.utcnow)
Ex_Dividend_Date= DateTimeField(default=datetime.datetime.utcnow)
Last_Split_Factor_new_per_old= StringField(max_length=100,required=False)
Last_Split_Date= DateTimeField(default=datetime.datetime.utcnow)
class nifty_50_fundamental_data(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
Ticker= StringField(max_length=100,required=True)
Company_Name = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=False)
Market_Cap= DecimalField(max_length=100,required=False)
Enterprise_Value= StringField(max_length=100,required=False)
Trailing_P_E= DecimalField(required=False)
Forward_P_E= DecimalField(required=False)
PEG_Ratio= DecimalField(required=False)
Price_Sales= DecimalField(required=False)
Price_Book= DecimalField(required=False)
Enterprise_Value_Revenue= DecimalField(required=False)
Enterprise_Value_EBITDA= DecimalField(required=False)
Fiscal_Year_Ends= DateTimeField(default=datetime.datetime.utcnow)
Most_Recent_Quarter= DateTimeField(default=datetime.datetime.utcnow)
Profit_Margin= DecimalField(required=False)
Operating_Margin= DecimalField(required=False)
Return_on_Assets= DecimalField(required=False)
Return_on_Equity= DecimalField(required=False)
Revenue= DecimalField(max_length=100,required=False)
Revenue_Per_Share= DecimalField(required=False)
Quarterly_Revenue_Growth= DecimalField(required=False)
Gross_Profit= DecimalField(max_length=100,required=False)
EBITDA= DecimalField(max_length=100,required=False)
Net_Income_Avi_to_Common= DecimalField(max_length=100,required=False)
Diluted_EPS= DecimalField(required=False)
Quarterly_Earnings_Growth= DecimalField(required=False)
Total_Cash= DecimalField(max_length=100,required=False)
Total_Cash_Per_Share= DecimalField(required=False)
Total_Debt= DecimalField(max_length=100,required=False)
Total_Debt_Equity= DecimalField(required=False)
Current_Ratio= DecimalField(required=False)
Book_Value_Per_Share= DecimalField(required=False)
Operating_Cash_Flow= DecimalField(max_length=100,required=False)
Levered_Free_Cash_Flow= DecimalField(max_length=100,required=False)
Beta= DecimalField(required=False)
Week_52_Change= DecimalField(required=False)
SP500_52_Week_Change= DecimalField(required=False)
Week_52_High= DecimalField(required=False)
Week_52_Low= DecimalField(required=False)
Day_50_Moving_Average= DecimalField(required=False)
Day_200_Moving_Average= DecimalField(required=False)
Avg_Vol_3_month= DecimalField(max_length=100,required=False)
Avg_Vol_10_day= DecimalField(max_length=100,required=False)
Shares_Outstanding= DecimalField(max_length=100,required=False)
Float= DecimalField(max_length=100,required=False)
Held_by_Insiders= DecimalField(required=False)
Held_by_Institutions= DecimalField(required=False)
Shares_Short= DecimalField(max_length=100,required=False)
Short_Ratio= DecimalField(required=False)
Short_of_Float= DecimalField(required=False)
Shares_Short_prior_month= DecimalField(max_length=100,required=False)
Forward_Annual_Dividend_Rate= DecimalField(required=False)
Forward_Annual_Dividend_Yield= DecimalField(required=False)
Trailing_Annual_Dividend_Yield= DecimalField(required=False)
Trailing_Annual_Dividend_Rate= DecimalField(required=False)
Year_5_Average_Dividend_Yield= DecimalField(required=False)
Payout_Ratio= DecimalField(required=False)
Dividend_Date= DateTimeField(default=datetime.datetime.utcnow)
Ex_Dividend_Date= DateTimeField(default=datetime.datetime.utcnow)
Last_Split_Factor_new_per_old= StringField(max_length=100,required=False)
Last_Split_Date= DateTimeField(default=datetime.datetime.utcnow)
class strategy(Document):
Date = DateTimeField(default=datetime.datetime.utcnow)
strategy= StringField(max_length=100,required=True)
Returns = StringField(max_length=100,required=False)
Industry = StringField(max_length=100,required=False)
Market_Cap= DecimalField(max_length=100,required=False)
Enterprise_Value= StringField(max_length=100,required=False)
Trailing_P_E= DecimalField(required=False)
Forward_P_E= DecimalField(required=False)
PEG_Ratio= DecimalField(required=False)
Price_Sales= DecimalField(required=False)
Price_Book= DecimalField(required=False)
class mutualfund_company(Document):
Company_Name = StringField(max_length=100,required=False)
Scheme_Name = StringField(max_length=100,required=False)
Scheme_Code= StringField(max_length=100,required=True)
class mutualfund(Document):
Company_Name = StringField(max_length=100,required=False)
Scheme_Name = StringField(max_length=100,required=False)
Scheme_Code= StringField(max_length=100,required=True)
NAV= DecimalField(max_length=100,required=False)
Date= StringField(max_length=100,required=True)
class scheme(Document):
Scheme_Name = StringField(max_length=100,required=False)
Scheme_Code= StringField(max_length=100,required=True)
class mutualfunddetail(Document):
Company_Name = StringField(max_length=100,required=False)
Scheme_Name = StringField(max_length=100,required=False)
Scheme_Code= StringField(max_length=100,required=True)
NAV= DecimalField(max_length=100,required=False)
Day_50_Moving_Average= DecimalField(required=False)
Day_200_Moving_Average= DecimalField(required=False)
Buy= StringField(max_length=100,required=True)
ImageUrl= StringField(max_length=100,required=True)
|
[
"ayush.op.sharma@gmail.com"
] |
ayush.op.sharma@gmail.com
|
9e52bf65e324fa883eb5835af1d5480b35003237
|
fc1c547e7ebf74b90ba53a991dafd8f11e4294a7
|
/july/urls.py
|
4bdcbcc73cf9bea58f2956666e5fc25f0de396aa
|
[] |
no_license
|
davydova20/tourism-system
|
3f187d69337fa05f7e3f4f8051985b14ed5b2739
|
ebcb8731ece72ecfe40a54a288e09031c98dcac7
|
refs/heads/master
| 2020-12-07T04:39:29.074190
| 2020-01-08T19:16:55
| 2020-01-08T19:16:55
| 232,634,537
| 0
| 0
| null | 2020-01-08T18:47:36
| 2020-01-08T18:47:35
| null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.julia, name='julia'),
]
|
[
"julykopysova97@gmail.com"
] |
julykopysova97@gmail.com
|
2ab2ae183b66faad95089726d243c4cd96777b0e
|
2f6032d7f191b71f93f4bc178a2e753870e54ffd
|
/chapter4-流程控制/demo13-continue.py
|
d74b732ec4fafb4e2ca3aa1838b4beae22022253
|
[] |
no_license
|
mapleinsss/python-basic
|
35606cbf4acbe49cbba8cf84caab2213c53da8a1
|
4d264694e3eda1d000626f4a47805cacbea12086
|
refs/heads/master
| 2023-01-14T01:36:38.673143
| 2020-11-24T15:36:58
| 2020-11-24T15:36:58
| 309,705,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
for i in range(3):
print(i)
if i == 1:
continue
# 在 i 的值为 1 时,该语句不输出
print('continue 后的输出语句')
|
[
"mapleins@aliyun.com"
] |
mapleins@aliyun.com
|
5adaee347242dca5dda438a926c84ca28de4630d
|
04b71b3b8d94ed98e1ec91a8f1ebde18a4e2726e
|
/bin/Helloworld.py
|
f9c3f3e57cf1d9e7b0822753ef9100bffe0b0e85
|
[] |
no_license
|
taborda11/IntroductionGithub
|
4fb234b7426c8018c634168449f1c901323b8014
|
3c85782db7f54e270acb303f5d1cf0f40cb0b505
|
refs/heads/master
| 2020-07-20T09:05:17.790670
| 2019-10-03T14:19:26
| 2019-10-03T14:19:26
| 206,614,073
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22
|
py
|
print ("Hello world.")
|
[
"bmtta@iscte-iul.pt"
] |
bmtta@iscte-iul.pt
|
b15d5da8d3f41a7408acdc942325aac56136b485
|
ccec675a0a4a42981a8879a58f2219d99dc7ac7a
|
/tmuxipy/__init__.py
|
356c47eff514fda92477aab535486f97477a4af9
|
[
"BSD-3-Clause"
] |
permissive
|
lhamu/tmuxipy
|
92425da027c0832dcb0302d19157845755a8dd6a
|
1854ea8e0559f564f6941eba4d6ea2e600e8bf0d
|
refs/heads/master
| 2021-01-21T05:36:55.466152
| 2015-04-10T06:34:06
| 2015-04-10T06:38:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'Milind Shakya'
__email__ = 'sh.milind@gmail.com'
__version__ = '0.1.8'
|
[
"milind.shakya@dealertrack.com"
] |
milind.shakya@dealertrack.com
|
40e79bf69bcab8f1c172648e2483b06d596ee72a
|
d82eddaed09c170b9920c29fa52afb2188e817eb
|
/layers/qrnn.py
|
1a3a1660cddb6767f1eac5fec947c653c81ba957
|
[] |
no_license
|
liuweiping2020/QuasiRNN-DReLU
|
ceac60ebaccf8d56e48ce1b1181fc4ad8a24de02
|
58720cd662dc12ed10c834f6314517768c89d438
|
refs/heads/master
| 2020-03-23T12:52:09.089009
| 2017-11-10T14:14:00
| 2017-11-10T14:14:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,197
|
py
|
__author__ = 'frederic'
import theano
import theano.tensor as T
import lasagne
from lasagne import nonlinearities
from lasagne import init
from lasagne.utils import unroll_scan
from lasagne.layers.base import MergeLayer, Layer
import initializers
import custom_layers
def neg_rectify(x):
return -1.0 * T.nnet.relu(x)
def rectify(x):
return T.nnet.relu(x)
class ELU(object):
def __init__(self, alpha=1.0, neg=False):
self.alpha = alpha
self.neg = neg
def __call__(self, x):
direction = -1.0 if self.neg else 1.0
return direction * theano.tensor.switch(x > 0, x, self.alpha * theano.tensor.expm1(x))
class QRNNLayer(MergeLayer):
def __init__(self, incoming, gate_pos, num_units,
seq_len=0,
cell_init=init.Constant(0.),
backwards=False,
learn_init=False,
gradient_steps=-1,
grad_clipping=0,
unroll_scan=False,
only_return_final=False,
mask_input=None,
**kwargs):
# This layer inherits from a MergeLayer, because it can have four
# inputs - the layer input, the mask, the initial hidden state and the
# inital cell state. We will just provide the layer input as incomings,
# unless a mask input, inital hidden state or initial cell state was
# provided.
incomings = [incoming, gate_pos]
self.cell_init_incoming_index = -1
self.mask_incoming_index = -1
if isinstance(cell_init, Layer):
incomings.append(cell_init)
self.cell_init_incoming_index = len(incomings) - 1
if mask_input is not None:
incomings.append(mask_input)
self.mask_incoming_index = len(incomings) - 1
# Initialize parent layer
super(QRNNLayer, self).__init__(incomings, **kwargs)
self.learn_init = learn_init
self.num_units = num_units
self.backwards = backwards
self.gradient_steps = gradient_steps
self.grad_clipping = grad_clipping
self.unroll_scan = unroll_scan
self.only_return_final = only_return_final
self.seq_len = seq_len
if unroll_scan and gradient_steps != -1:
raise ValueError(
"Gradient steps must be -1 when unroll_scan is true.")
# Retrieve the dimensionality of the incoming layer
input_shape = self.input_shapes[0]
if unroll_scan and input_shape[1] is None:
raise ValueError("Input sequence length cannot be specified as "
"None when unroll_scan is True")
# Setup initial values for the cell and the hidden units
if isinstance(cell_init, Layer):
self.cell_init = cell_init
else:
self.cell_init = self.add_param(
cell_init, (1, num_units), name="cell_init",
trainable=learn_init, regularizable=False)
def get_output_shape_for(self, input_shapes):
# The shape of the input to this layer will be the first element
# of input_shapes, whether or not a mask input is being used.
input_shape = input_shapes[0]
# When only_return_final is true, the second (sequence step) dimension
# will be flattened
if self.only_return_final:
return input_shape[0], self.num_units
else:
return (input_shape[0], input_shape[2], self.num_units), (input_shape[0], input_shape[2], self.num_units)
def get_output_for(self, inputs, **kwargs):
"""
Compute this layer's output function given a symbolic input variable
Parameters
----------
inputs : list of theano.TensorType
`inputs[0]` should always be the symbolic input variable. When
this layer has a mask input (i.e. was instantiated with
`mask_input != None`, indicating that the lengths of sequences in
each batch vary), `inputs` should have length 2, where `inputs[1]`
is the `mask`. The `mask` should be supplied as a Theano variable
denoting whether each time step in each sequence in the batch is
part of the sequence or not. `mask` should be a matrix of shape
``(n_batch, n_time_steps)`` where ``mask[i, j] = 1`` when ``j <=
(length of sequence i)`` and ``mask[i, j] = 0`` when ``j > (length
of sequence i)``. When the hidden state of this layer is to be
pre-filled (i.e. was set to a :class:`Layer` instance) `inputs`
should have length at least 2, and `inputs[-1]` is the hidden state
to prefill with. When the cell state of this layer is to be
pre-filled (i.e. was set to a :class:`Layer` instance) `inputs`
should have length at least 2, and `inputs[-1]` is the hidden state
to prefill with. When both the cell state and the hidden state are
being pre-filled `inputs[-2]` is the hidden state, while
`inputs[-1]` is the cell state.
Returns
-------
layer_output : theano.TensorType
Symbolic output variable.
"""
# Retrieve the layer input
input = inputs[0]
gate = inputs[1]
cell_init = None
if self.cell_init_incoming_index > 0:
cell_init = inputs[self.cell_init_incoming_index]
mask = None
if self.mask_incoming_index > 0:
mask = inputs[self.mask_incoming_index]
# Because scan iterates over the first dimension we dimshuffle to
# (n_time_steps, n_batch, n_features)
input = input[:, :, :, 0].dimshuffle(2, 0, 1)
gate = gate[:, :, :, 0].dimshuffle(2, 0, 1)
seq_len, num_batch, _ = input.shape
# Stack input weight matrices into a (num_inputs, 4*num_units)
# matrix, which speeds up computation
# We define a slicing function that extract the input to each LSTM gate
def slice_w(x, n):
return x[:, n * self.num_units:(n + 1) * self.num_units]
# Create single recurrent computation step function
# input_n is the n'th vector of the input
def step(input_n, gate_n, hid_previous, *args):
hid = input_n
# temp=rectify( gate_pos_n*rectify(hid_previous) )
# temp+=neg_rectify_neg( gate_neg_n*neg_rectify_neg(hid_previous) )
# temp = T.nnet.hard_sigmoid(gate_n)*hid_previous
temp = gate_n * hid_previous
hid += temp
return hid, temp
def step_masked(input_n, gate_n, mask_n, hid_previous, *args):
hid, temp = step(input_n, gate_n, hid_previous, *args)
# Skip over any input with mask 0 by copying the previous
# hidden state; proceed normally for any input with mask 1.
hid = T.switch(mask_n, hid, hid_previous)
return hid, temp
if mask is not None:
# mask is given as (batch_size, seq_len). Because scan iterates
# over first dimension, we dimshuffle to (seq_len, batch_size) and
# add a broadcastable dimension
mask = mask.dimshuffle(1, 0, 'x')
sequences = [input, gate, mask]
step_fun = step_masked
else:
sequences = [input, gate]
step_fun = step
ones = T.ones((num_batch, 1))
if not isinstance(self.cell_init, Layer):
# Dot against a 1s vector to repeat to shape (num_batch, num_units)
cell_init = T.dot(ones, self.cell_init)
outputs_info = [cell_init, None]
if self.unroll_scan:
# Retrieve the dimensionality of the incoming layer
input_shape = self.input_shapes[0]
# Explicitly unroll the recurrence instead of using scan
outs = unroll_scan(
fn=step_fun,
sequences=sequences,
non_sequences=[],
outputs_info=outputs_info,
go_backwards=self.backwards,
n_steps=self.seq_len)
else:
# Scan op iterates over first dimension of input and repeatedly
# applies the step function
outs = theano.scan(
fn=step_fun,
sequences=sequences,
outputs_info=outputs_info,
go_backwards=self.backwards,
# truncate_gradient=self.gradient_steps,
strict=True)[0]
if self.only_return_final:
return outs[-1]
# dimshuffle back to (n_batch, n_time_steps, n_features))
cell_out = outs[0].dimshuffle(1, 0, 2)
temp_out = outs[1].dimshuffle(1, 0, 2)
# if scan is backward reverse the output
if self.backwards:
cell_out = cell_out[:, ::-1]
return cell_out, temp_out
def QRNNBlock(l_in, paras, i, mask, hids):
if i == 0:
input_size = paras["embedding_size"]
else:
if "dense" in paras and paras["dense"]:
input_size = paras["embedding_size"] + i * paras["rec_num_units"]
else:
input_size = paras["rec_num_units"]
if hids is None:
# if no hids are passed, we are in the single sentence case and need to pad the input ourself
l_in = lasagne.layers.PadLayer(l_in, ((paras["k"][i] - 1, 0), (0, 0)), batch_ndim=1)
l_emb_reshaped = lasagne.layers.ReshapeLayer(l_in, (
paras["batch_size"], 1, -1, input_size))
l_conv_gates_rec = lasagne.layers.Conv2DLayer(l_emb_reshaped, paras["rec_num_units"],
(paras["k"][i], input_size), pad="valid", W=eval(paras["init_W"]),
b=eval(paras["init_b"]), untie_biases=paras["untie_biases"],
nonlinearity=nonlinearities.identity, name="forget_gate")
if paras["batch_norm"] == 1:
l_conv_gates_rec = lasagne.layers.batch_norm(l_conv_gates_rec, gamma=lasagne.init.Constant(0.1))
l_conv_gates_rec_hidden = lasagne.layers.NonlinearityLayer(l_conv_gates_rec, nonlinearity=eval(paras["gate_act"]))
l_conv_gates_rec_input = lasagne.layers.NonlinearityLayer(l_conv_gates_rec_hidden, nonlinearity=(lambda x: 1.0 - x))
if paras["rnn_type"] == "qrnn":
l_conv_input = lasagne.layers.Conv2DLayer(l_emb_reshaped, paras["rec_num_units"], (paras["k"][i], input_size),
pad="valid", W=eval(paras["init_W"]), b=eval(paras["init_b"]),
untie_biases=paras["untie_biases"],
nonlinearity=eval(paras["input_act"]), name="input_1")
if paras["batch_norm"] > 0:
l_conv_input = lasagne.layers.batch_norm(l_conv_input, gamma=lasagne.init.Constant(0.1))
elif paras["rnn_type"] == "drelu" or paras["rnn_type"] == "delu":
if paras["rnn_type"] == "delu":
act1 = ELU(paras["elu_alpha"])
act2 = ELU(paras["elu_alpha"], neg=True)
else:
act1 = rectify
act2 = neg_rectify
l_conv_input1 = lasagne.layers.Conv2DLayer(l_emb_reshaped, paras["rec_num_units"], (paras["k"][i], input_size),
pad="valid", W=eval(paras["init_W"]), b=eval(paras["init_b"]),
untie_biases=paras["untie_biases"], nonlinearity=act1,
name="input_1")
l_conv_input2 = lasagne.layers.Conv2DLayer(l_emb_reshaped, paras["rec_num_units"], (paras["k"][i], input_size),
pad="valid", W=eval(paras["init_W"]), b=eval(paras["init_b"]),
untie_biases=paras["untie_biases"], nonlinearity=act2,
name="input_2")
if paras["batch_norm"] > 0:
l_conv_input1 = lasagne.layers.batch_norm(l_conv_input1, gamma=lasagne.init.Constant(0.5))
l_conv_input2 = lasagne.layers.batch_norm(l_conv_input2, gamma=lasagne.init.Constant(0.5))
# Z
l_conv_input = lasagne.layers.ElemwiseSumLayer([l_conv_input1, l_conv_input2], name="sum_input")
l_conv_input_gated = lasagne.layers.ElemwiseMergeLayer([l_conv_input, l_conv_gates_rec_input], T.mul)
if hids is None:
cell_init = lasagne.init.Constant(0)
else:
cell_init = lasagne.layers.InputLayer((paras["batch_size"], paras["rec_num_units"]),
input_var=hids[2 * i + 1])
l_rec1_cells = QRNNLayer(
l_conv_input_gated, l_conv_gates_rec_hidden,
num_units=paras["rec_num_units"],
learn_init=False,
mask_input=mask,
cell_init=cell_init
)
l_rec_1 = custom_layers.SelectOutputLayer(l_rec1_cells, 0)
if paras["pooling"] == "fo":
init_f = eval(paras["init_W"])
l_conv_gates_out = lasagne.layers.Conv2DLayer(l_emb_reshaped, paras["rec_num_units"],
(paras["k"][i], input_size), pad="valid", W=init_f,
b=eval(paras["init_b"]), untie_biases=paras["untie_biases"],
nonlinearity=nonlinearities.identity, name="out_gate")
if paras["batch_norm"] == 1:
l_conv_gates_out = lasagne.layers.batch_norm(l_conv_gates_out, gamma=lasagne.init.Constant(0.1))
l_conv_gates_out = lasagne.layers.NonlinearityLayer(l_conv_gates_out, nonlinearity=eval(paras["gate_act"]))
l_conv_gates_out = lasagne.layers.SliceLayer(l_conv_gates_out, 0, axis=3)
l_conv_gates_out = lasagne.layers.DimshuffleLayer(l_conv_gates_out, (0, 2, 1), name="out_neg_gate")
l_rec1_hids = lasagne.layers.ElemwiseMergeLayer([l_rec_1, l_conv_gates_out], T.mul)
else:
l_rec1_hids = l_rec_1
return l_rec1_hids, l_rec_1
|
[
"frederic.godin@ugent.be"
] |
frederic.godin@ugent.be
|
0f810d768b3594c5556ea275d92b79c351f5a74d
|
0aa66814c5bb5bd480d3661f3312e565b108e697
|
/tests/base.py
|
65040b15073360a6df54f7ca466900d12d62b1a8
|
[
"MIT"
] |
permissive
|
thestd/schedule-DB
|
c9a51415ced9a05a30592ede731105409ab99e9f
|
2c3cb36f700a38083f01a3d819856ac8363ca96e
|
refs/heads/master
| 2022-12-11T04:21:11.996493
| 2019-08-16T08:10:53
| 2019-08-16T08:10:53
| 190,074,479
| 0
| 2
|
MIT
| 2022-12-08T05:50:24
| 2019-06-03T20:07:13
|
Python
|
UTF-8
|
Python
| false
| false
| 562
|
py
|
import uuid
from tornado.ioloop import IOLoop
from tornado.options import options
from tornado.testing import AsyncHTTPTestCase
from app.main import make_app
class TornadoMotorAsyncTest(AsyncHTTPTestCase):
def setUp(self) -> None:
options.db_name = str(uuid.uuid4())
super().setUp()
def tearDown(self) -> None:
self.db.client.drop_database(options.db_name)
def get_new_ioloop(self):
return IOLoop.current()
def get_app(self):
app = make_app()
self.db = app.settings['db']
return app
|
[
"bbwgd77@gmail.com"
] |
bbwgd77@gmail.com
|
6d09201c553cbb1850ef991ae9363a133d3e0dc8
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/01a71dbfca71dcbc1da38f12642ba06caa9ede39cd80962b40b44cf036a6ba31/_cython_0_29_16.py
|
f01dc288889a49d898470e417ec29b2e41220673
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,267
|
py
|
# encoding: utf-8
# module _cython_0_29_16
# from C:\Users\Doly\Anaconda3\lib\site-packages\numpy\random\_pcg64.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# no functions
# classes
class cython_function_or_method(object):
def __call__(self, *args, **kwargs): # real signature unknown
""" Call self as a function. """
pass
def __get__(self, *args, **kwargs): # real signature unknown
""" Return an attribute of instance, which is of type owner. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
func_closure = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_code = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_defaults = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_dict = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_doc = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_globals = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__annotations__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__closure__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__code__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__defaults__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__globals__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__kwdefaults__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__self__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is "mappingproxy({'__repr__': <slot wrapper '__repr__' of 'cython_function_or_method' objects>, '__call__': <slot wrapper '__call__' of 'cython_function_or_method' objects>, '__get__': <slot wrapper '__get__' of 'cython_function_or_method' objects>, '__reduce__': <method '__reduce__' of 'cython_function_or_method' objects>, '__module__': <member '__module__' of 'cython_function_or_method' objects>, 'func_doc': <attribute 'func_doc' of 'cython_function_or_method' objects>, '__doc__': <attribute '__doc__' of 'cython_function_or_method' objects>, 'func_name': <attribute 'func_name' of 'cython_function_or_method' objects>, '__name__': <attribute '__name__' of 'cython_function_or_method' objects>, '__qualname__': <attribute '__qualname__' of 'cython_function_or_method' objects>, '__self__': <attribute '__self__' of 'cython_function_or_method' objects>, 'func_dict': <attribute 'func_dict' of 'cython_function_or_method' objects>, '__dict__': <attribute '__dict__' of 'cython_function_or_method' objects>, 'func_globals': <attribute 'func_globals' of 'cython_function_or_method' objects>, '__globals__': <attribute '__globals__' of 'cython_function_or_method' objects>, 'func_closure': <attribute 'func_closure' of 'cython_function_or_method' objects>, '__closure__': <attribute '__closure__' of 'cython_function_or_method' objects>, 'func_code': <attribute 'func_code' of 'cython_function_or_method' objects>, '__code__': <attribute '__code__' of 'cython_function_or_method' objects>, 'func_defaults': <attribute 'func_defaults' of 'cython_function_or_method' objects>, '__defaults__': <attribute '__defaults__' of 'cython_function_or_method' objects>, '__kwdefaults__': <attribute '__kwdefaults__' of 'cython_function_or_method' objects>, '__annotations__': <attribute '__annotations__' of 'cython_function_or_method' objects>})"
__name__ = 'cython_function_or_method'
__qualname__ = 'cython_function_or_method'
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
117ffea1bc56a4ac8e267359c59cc87a546975bb
|
4f122df5a41d8525190b780e88b899fbe75c0609
|
/Weapon.py
|
f616bfe97fb3eb5863f8b4cc488bd1693639baaf
|
[] |
no_license
|
BlackMagi018/Zork
|
990cda14ed87e1fc278b9fac6d0716953cda5491
|
baa9847dad0a8776c81c191cc48038100058312b
|
refs/heads/master
| 2021-08-22T07:58:31.782008
| 2017-11-29T18:03:30
| 2017-11-29T18:03:30
| 111,501,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
class Weapon:
"""Your Halloween has become your last line of defense from terrifying monsters"""
def __init__(self, text, modifier, ammo):
"""Constructor for Weapon class
Parameters:
text - weapon name
modifier - weapons attack modifier
ammo - uses left in candy
"""
self.name = text
self.mod = modifier
self.ammo = ammo
|
[
"loganjer@mail.gvsu.edu"
] |
loganjer@mail.gvsu.edu
|
8d1a355f58b83ac99d784569eed0d1537f200317
|
46fc4a4e293242f2d16bd7cb52c533d0e808870b
|
/SCRIPTS_ANALISIS/Script_Multi_Eliminate/Eliminate_Polygons_MultiExtent_Main_v3.py
|
46dafb1e225b30cc052a2b9d9d36a2671e99ac44
|
[] |
no_license
|
UpraAnalisis/Herramientas_Optimizadas
|
3ffb2316f41191382a960cf9b9cac20cb49c0ff0
|
36e24d4b23184cd75187421dc454ffdc8cc141d4
|
refs/heads/master
| 2022-11-27T15:22:13.594767
| 2022-11-18T13:03:11
| 2022-11-18T13:03:11
| 148,654,618
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,608
|
py
|
# -*- coding: utf-8 -*-
import os
import arcpy
from arcpy import env
import subprocess
import inspect
import random ,string, shutil
#variables de entrorno y globales
dic={" ":"__","=":"igual", "<":"menor", ">": "mayor"} # diccionario que convierte los caracteres especiales en letras
comandos=[] # arreglo que almacena los comandos a ejecutar por el script auxiliar
dic_acentos={" ":"---","\xc3\xa1":"***a***","\xc3\xa9":"***e***", "\xc3\xad":"***i***",
"\xc3\xb3": "***o***","\xc3\xba": "***u***","\xc3\xb1": "***n***","\xc3\x81":"***A***","\xc3\x89":"***E***",
"\xc3\x8d":"***I***", "\xc3\x93": "***O***","***\xc3\x9a***":"Ú","\xc3\x91": "***N***"}
#=========Parámetros=====================#
fcEntrada =arcpy.Describe(arcpy.GetParameterAsText(0)).catalogpath.decode('utf-8') # Capa que se va a procesar.
grilla = arcpy.Describe(arcpy.GetParameterAsText(1)).catalogpath.decode('utf-8')
consulta = arcpy.GetParameterAsText(2).decode('utf-8') # Expresión que actua como un criterio, que aplicará el eliminate sobre los fetures que cumplan con lo especificado en dicha expresión.
for i in dic: # ciclo que reemplaza los carateres especiales
consulta = consulta.replace(i,dic[i])
selection = arcpy.GetParameterAsText(3)
if selection == "true":
selection = "LENGTH"
else:
selection = "AREA"
FolderEntrada = r"%s"%arcpy.GetParameterAsText(4).decode('utf-8') # Folder donde se almacenarán los datos resultantes
capa_de_salida = arcpy.GetParameterAsText(5).decode('utf-8') # captura el feature class resultante del proceso
capa_exclusion = arcpy.GetParameterAsText(6).decode('utf-8')
if capa_exclusion == "":
capa_exclusion="----"
else:
capa_exclusion = arcpy.Describe(capa_exclusion).catalogpath.decode('utf-8')
expresion_de_exclusion = arcpy.GetParameterAsText(7).decode('utf-8')
if expresion_de_exclusion == "":
expresion_de_exclusion = "----"
for i in dic: # ciclo que reemplaza los carateres especiales
expresion_de_exclusion = expresion_de_exclusion.replace(i,dic[i])
procesossimultaneos = int(arcpy.GetParameterAsText(8)) # captura el número de procesos simultaneos que se van a ejecutar para procesar todas las secciones de la grilla
numeroprocesos = int(arcpy.GetCount_management(grilla)[0]) # captura el numero de secciones de la grilla
datos_intermedios = arcpy.GetParameterAsText(9)
#=========Funciones Auxiliares=====================#
def cambia_caracteres(infea):
for xx in dic_acentos:# ciclo que reemplaza las letras por los carateres especiales
infea=infea.replace(xx,dic_acentos[xx])
return infea
def getPythonPath():
pydir = sys.exec_prefix
pyexe = os.path.join(pydir, "python.exe")
if os.path.exists(pyexe):
return pyexe
else:
raise RuntimeError("python.exe no se encuentra instalado en {0}".format(pydir))
def listaanidada(lista,separador): #convierte un arreglo en una lista anidada
seq = tuple(lista)
texto_anidado=separador.join( seq )
return texto_anidado
def creadirs(): # crea los dierectorios de salida del programa
nombre="unificado"
if not os.path.exists(FolderEntrada+"\\%s"%(nombre)):
os.makedirs(FolderEntrada+"\\%s"%(nombre))
return FolderEntrada+"\\%s"%nombre
def crearFGDB(ruta): # crea la geodatabase que almacena el resultado final
arcpy.CreateFileGDB_management(ruta, "bd_unificado.gdb")
return ruta+"\\"+"bd_unificado.gdb"
def chunkIt(seq, num): # función que parte una secuencia en un número de partes
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def pasarlista(lista): # función que transforma el rango para transferirlo al script auxiliar
lista=str(lista)
lista=lista.replace(", ","_") # convierte los espacios en _
return lista
def listaanidada(lista,separador): #convierte un arreglo en una lista anidada
seq = tuple(lista)
texto_anidado=separador.join( seq )
return texto_anidado
def directorioyArchivo (): # captura el directorio donde se encuentra almacenado el script y el nombre del script
archivo=inspect.getfile(inspect.currentframe()) # script filename
directorio=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # script directory
return archivo, directorio
#=========Validación de requerimientos=====================#
pyexe = getPythonPath()
if not "x64" in r"%s"%(pyexe):
pyexe=pyexe.replace("ArcGIS","ArcGISx64")
if not arcpy.Exists(pyexe):
arcpy.AddError("Usted no tiene instalado el Geoprocesamiento en segundo plano (64 bits)")
raise RuntimeError("Usted no tiene instalado el Geoprocesamiento en segundo plano (64 bits) {0}".format(pyexe))
else:
verPython64=pyexe
scriptAuxiliar="Eliminate_Polygons_MultiExtent_Aux_v3.py" # script auxiliar que ejecuta el prceso de eliminate
verPythonfinal=verPython64
# ------------------------------------------------------------
if __name__ == '__main__':
### codificando parametros #####
fcEntrada= cambia_caracteres(fcEntrada)
grilla = cambia_caracteres(grilla)
consulta = cambia_caracteres(consulta)
FolderEntrada = cambia_caracteres(FolderEntrada)
if capa_exclusion != "----":
capa_exclusion = cambia_caracteres(capa_exclusion)
if expresion_de_exclusion != "----":
expresion_de_exclusion = cambia_caracteres(capa_exclusion)
### codificando parametros #####
verPython=verPythonfinal # asigna la versión de python que se va a usar 32 o 64 bits
verPythonDir=verPython.replace("\\python.exe","") # obtiene la ruta del directorio que almacena el ejecutable de python
script=directorioyArchivo() #
script=script[1]+"\\"+scriptAuxiliar # almacena la ruta y nombre de archivo del script auxiliar
#crea la base
dirSalida= FolderEntrada
cuadros=[num for num in xrange(1,numeroprocesos+1)] # define los cuadros que van a ser procesados
cuadros_ram=cuadros
random.shuffle(cuadros_ram)
partes=chunkIt(cuadros_ram,procesossimultaneos)
if procesossimultaneos!= len(partes): # valida que los procesos coincida con el numero de partes
partes1=partes[:]
partes1.pop(-1)
partes1[-1].extend(partes[-1])
del partes
partes=partes1[:]
for a in partes: # almacena los comandos en un arreglo+
## comandos.append(r"start %s %s %s %s %s %s %s %s"%(verPython,script,fcEntrada,grilla,capa_exclusion,pasarlista(a),dirSalida,consulta))
comandos.append(r"start %s %s %s %s %s %s %s %s %s %s"%(verPython, script, fcEntrada , grilla , consulta, selection, FolderEntrada,
capa_exclusion , expresion_de_exclusion, pasarlista(a)))
letras=string.ascii_letters # crea un listado de lestras que usará para almacenar la ejecución de los comandos
instrucciones="" # incializa la cadena de texto que almacenará las instrucciones de ejecución de los multiples procesos
instrucciones_espera="" # inicializa la variable que almacenará las instrucciones de espera de los procesos
# este ciclo almacena las instrucciónes en una cadena de texto teniendo en cuenta el número de procesos simultaneos definidos
for x in xrange(0,procesossimultaneos):
if x==procesossimultaneos-1 :
instrucciones+='%s = subprocess.Popen(comandos[%s],stdin=None,stdout=subprocess.PIPE,shell=True,env=dict(os.environ, PYTHONHOME=verPythonDir))'%(letras[x],str(x))
else:
instrucciones+='%s = subprocess.Popen(comandos[%s],stdin=None,stdout=subprocess.PIPE,shell=True,env=dict(os.environ, PYTHONHOME=verPythonDir));'%(letras[x],str(x))
for x in xrange(0,procesossimultaneos):
if x==procesossimultaneos-1 :
instrucciones_espera+='astdout, astderr = %s.communicate()'%(letras[x])
else:
instrucciones_espera+='astdout, astderr = %s.communicate();'%(letras[x])
instrucciones=compile(instrucciones, '<string>', 'exec') # compila el texto para que sea ejecutado de mejor forma por el interprete de python
instrucciones_espera=compile(instrucciones_espera, '<string>', 'exec') # compila el texto para que sea ejecutado de mejor forma por el interprete de python
exec(instrucciones) # ejecuta las instrucciones de ejecución compiladas
exec(instrucciones_espera) # ejecuta las instrucciones compiladas de espera
# la linea a continuación construye un arreglo de todos las partes procesadas
arreglo_features=[r"%s"%FolderEntrada+"\\Partes\\"+str(numx)+"\\bd"+str(numx)+".gdb\\cuadrox_"+str(numx) for numx in xrange(1,numeroprocesos+1)]
output=capa_de_salida
capa_fuente=r"%s"%FolderEntrada+"\\Partes\\"+str(1)+"\\bd"+str(1)+".gdb\\cuadrox_1" # nuevo
no_existen,existen,i=[],[],1
for capa in arreglo_features:
if arcpy.Exists(capa):
existen.append(i)
else:
no_existen.append(i)
i+=1
if len(no_existen)==0:
arreglo_features=listaanidada(arreglo_features,";")
ruta_unificado,nombre_salida =os.path.split(capa_de_salida)
arcpy.CreateFeatureclass_management(ruta_unificado,nombre_salida ,
"POLYGON", capa_fuente, "SAME_AS_TEMPLATE", "SAME_AS_TEMPLATE", capa_fuente)
arcpy.AddMessage(arreglo_features)
arcpy.AddMessage(output)
arcpy.Append_management (inputs=arreglo_features, target=output, schema_type="NO_TEST") # nuevo
if datos_intermedios == "false":
shutil.rmtree(r"%s"%FolderEntrada+"\\Partes")
else:
arcpy.AddError("no se pudieron procesar las secciones: "+str(no_existen))
|
[
"carlos.cano@UPRA.LOCAL"
] |
carlos.cano@UPRA.LOCAL
|
5d617f19cb142a9a65b6ad74b5ed92e3d83942ee
|
44bf33f297fb117b95db471a88f77fbac46c5672
|
/config.py
|
aaa586690e207dc334886e0c2964a30384c46477
|
[] |
no_license
|
Yettie-Xu/GrossProfitDash
|
f49653af72a1b7520d644f7ce424da213b052ab7
|
2b2c8e174a96cf4f6118693e00be9875b2d58324
|
refs/heads/main
| 2023-06-17T03:11:53.945320
| 2021-07-07T13:08:36
| 2021-07-07T13:08:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 594
|
py
|
"""Flask config."""
from os import environ, path
basedir = path.abspath(path.dirname(__file__))
class Config:
"""Base config."""
SECRET_KEY = environ.get('SECRET_KEY')
SESSION_COOKIE_NAME = environ.get('SESSION_COOKIE_NAME')
STATIC_FOLDER = 'static'
TEMPLATES_FOLDER = 'templates'
class ProdConfig(Config):
FLASK_ENV = 'production'
DEBUG = False
TESTING = False
DATABASE_URI = environ.get('PROD_DATABASE_URI')
class DevConfig(Config):
FLASK_ENV = 'development'
DEBUG = True
TESTING = True
DATABASE_URI = environ.get('DEV_DATABASE_URI')
|
[
"69041079+tracywst0123@users.noreply.github.com"
] |
69041079+tracywst0123@users.noreply.github.com
|
7babe357ab2437a195264dfc711197baf66ebbc2
|
1450a86baa83473788c8a3af074ba719168f540f
|
/hand_up/settings.py
|
e68622d740fcad202145463659ba8567bd00812f
|
[] |
no_license
|
nickleus27/hand_up_scraper
|
692bb6c11515e26ca0122670a27d29b64b3c0fe3
|
075bc8e8090da2f0127cbcae5eab23fdc24bae9c
|
refs/heads/main
| 2023-08-05T07:58:36.595237
| 2021-09-11T05:59:46
| 2021-09-11T05:59:46
| 394,876,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,404
|
py
|
# Scrapy settings for hand_up project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'hand_up'
SPIDER_MODULES = ['hand_up.spiders']
NEWSPIDER_MODULE = 'hand_up.spiders'
SPLASH_URL = 'http://192.168.59.103:8050'
#dupefilter for splash
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'hand_up (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
# 'hand_up.middlewares.HandUpSpiderMiddleware': 543,
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'hand_up.middlewares.HandUpDownloaderMiddleware': 543,
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'hand_up.pipelines.HandUpPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
|
[
"nickleus27@gmail.com"
] |
nickleus27@gmail.com
|
b0b3c7d1172418161cca5c2b92e65d7073845693
|
08878bfe3a2eba38cd40844d4624e0c6c378b32c
|
/template_c4_ta1.py
|
31206c7b01936363858f593e354a62823a7f91ba
|
[] |
no_license
|
WHJR-G12-Github/Template_C4_TA1
|
d8a498dea313a0863b40b7600e7cad6c9f681263
|
248e6c3c738bd762fe3694b80129e75f9813bfa4
|
refs/heads/main
| 2023-08-22T23:13:30.407823
| 2021-10-12T06:23:11
| 2021-10-12T06:23:11
| 415,817,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,883
|
py
|
import pygame,math
pygame.init()
screen = pygame.display.set_mode((400,600))
pygame.display.set_caption("Shooting Spaceship")
background_image = pygame.image.load("bg2.jpg").convert()
player_image = pygame.image.load("s4.png").convert_alpha()
player=pygame.Rect(200,200,30,30)
WHITE=(255,255,255)
enemy=pygame.Rect(100,100,30,30)
xvel=2
yvel=3
angle=0
change=0
# Creating a variable 'distance' and assigning value '5' to it
distance = 5
# Creating a variable 'forward' and assigning 'False' to it
forward = False
# Define a function 'newxy()' to calculate new x,y coordinates
# New x,y coordinates are based on old x,y coordinates, angle, distance
while True:
screen.blit(background_image,[0,0])
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
change = 6
if event.key ==pygame.K_RIGHT:
change = -6
# Checking if UP arrow key is pressed and make 'forward' to True
if event.type == pygame.KEYUP:
if event.key ==pygame.K_LEFT or event.key == pygame.K_RIGHT:
change = 0
# Check if UP arrow key is released and make 'forward' to False
enemy.x=enemy.x + xvel
enemy.y=enemy.y + yvel
if enemy.x < -250 or enemy.x > 650 :
xvel = -1*xvel
if enemy.y < -250 or enemy.y > 850:
yvel = -1*yvel
# Check if 'forward' is 'True'
if forward:
# Finding new x-coordinate by calling the 'newxy()' function
# Pass 'player.x','player.y','distance','angle' as arguments inside brackets
angle = angle + change
newimage=pygame.transform.rotate(player_image,angle)
screen.blit(newimage ,player)
pygame.draw.rect(screen,WHITE,enemy)
pygame.display.update()
pygame.time.Clock().tick(30)
|
[
"noreply@github.com"
] |
WHJR-G12-Github.noreply@github.com
|
2f1df0f8c3d7e3d92bb502fea46e2572d1be5e24
|
5c4d4a2c44eebf8fd9dd790da2a9ba4ededcfb70
|
/django_kala/projects/forms/documents/new_document.py
|
8d3d95897e7e6fa228f3be9c99be9012593b1a5a
|
[
"LicenseRef-scancode-other-permissive",
"MIT"
] |
permissive
|
brahimmade/kala-app
|
bc2602c6034203f83ced448ba54db7606a1234fe
|
6ac816e7c2711568cd7bcb1d996ba74c09513b3f
|
refs/heads/master
| 2023-03-21T05:15:52.436228
| 2020-03-10T15:50:29
| 2020-03-10T15:50:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,500
|
py
|
from django import forms
from django.conf import settings
from documents.models import Document, DocumentVersion
class NewDocumentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.project = kwargs.pop('project')
super(NewDocumentForm, self).__init__(*args, **kwargs)
self.fields['category'] = forms.ModelChoiceField(
queryset=self.project.category_set.all(),
widget=forms.Select(attrs={'class': 'ui search dropdown'}),
required=False
)
class Meta:
model = Document
fields = ['category', 'tags']
def save(self, commit=True):
self.instance.project = self.project
return super(NewDocumentForm, self).save(commit)
class NewDocumentVersionForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.project = kwargs.pop('project')
self.user = kwargs.pop('user')
super(NewDocumentVersionForm, self).__init__(*args, **kwargs)
class Meta:
model = DocumentVersion
fields = ['description', 'file']
def save(self, document, commit=True):
self.instance.document = document
self.instance.user = self.user
manager = settings.PLATFORM_MANAGER()
manager.upload_document(self.cleaned_data['file'].read(), str(self.instance.uuid))
# TODO: Remove this when file uploading becomes more civilized.
self.file = None
return super(NewDocumentVersionForm, self).save(commit)
|
[
"bgroff@hawaii.edu"
] |
bgroff@hawaii.edu
|
d8e30e73d2417ad16b103c6d325dff495a2afaa5
|
71d2a0f0539546f030044f4c9d23b370d6d503f0
|
/pm/harvest.py
|
c31db15becec1e951d427b4efc2ea5599993d19a
|
[] |
no_license
|
jmoswalt/ProMan
|
94767edddee796ed78757b037851064ee4fac281
|
04f833d5365f5060ada181f4f1e32e0a17716622
|
refs/heads/master
| 2021-01-18T21:32:56.046759
| 2012-07-25T01:42:40
| 2012-07-25T01:42:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,391
|
py
|
from urllib2 import Request, urlopen
import base64
from django.conf import settings
from django.utils import simplejson as json
class Harvest(object):
def __init__(self):
base_url = getattr(settings, 'HV_URL', None)
api_user = getattr(settings, 'HV_USER', None)
api_pass = getattr(settings, 'HV_PASS', None)
self.base_url = base_url
self.headers = {
'Authorization': 'Basic %s' % (
base64.b64encode('%s:%s' % (api_user, api_pass))),
'Accept': 'application/json',
'Content-Type': 'application/json',
}
def _request(self, url):
request = Request(
url=self.base_url + url,
headers=self.headers
)
response = urlopen(request)
json_data = json.loads(response.read())
if json_data:
#data = [a for a in json_data]
return json_data
return None
def projects(self, id=None):
if id:
return self._request('/projects/%s' % id)
return self._request('/projects')
def project_entries(self, id=None, start=None, end=None, billable=None):
url = '/projects/%s/entries?from=%s&to=%s' % (id, start, end)
if billable:
url += "&billable=%s" % billable
return self._request(url)
def clients(self, id=None):
if id:
return self._request('/clients/%s' % id)
return self._request('/clients')
def client_contacts(self, id=None):
if id:
return self._request('/contacts/%s' % id)
return self._request('/contacts')
def users(self, id=None):
if id:
return self._request('/people/%s' % id)
return self._request('/people')
def get_harvest_json(api_url):
base_url = getattr(settings, 'HV_URL', None)
api_user = getattr(settings, 'HV_USER', None)
api_pass = getattr(settings, 'HV_PASS', None)
if base_url and api_user and api_pass:
url = base_url + api_url
req = Request(url)
req.add_header('Accept', 'application/json')
req.add_header("Content-type", "application/json")
req.add_header('Authorization', "Basic " + (api_user +":"+ api_pass).encode("base64").rstrip())
res = urlopen(req)
json_data = json.loads(res.read())
return json_data
return None
|
[
"jmoswalt@gmail.com"
] |
jmoswalt@gmail.com
|
c8095deda42e8905e4cd1034162bcd72beb21223
|
a5dca75cd1f64a166390f070a0a916f423b41daf
|
/vizApp/models.py
|
1faff759bf62cf3d48803a0ead9a9f2a9383fc48
|
[] |
no_license
|
kenyabarnes/medicare-providers
|
1d9b9da3472d0b186e0afe2e909d5817df6a6995
|
6f85c18451fb7e60c3ca6f9fa016f0b5aee15012
|
refs/heads/master
| 2022-12-08T01:12:29.150642
| 2020-06-20T04:05:02
| 2020-06-20T04:05:02
| 224,080,845
| 0
| 0
| null | 2022-12-04T22:03:14
| 2019-11-26T02:03:57
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 356
|
py
|
from django.db import models
# Create your models here.
class Provider(models.Model):
id = models.IntegerField(primary_key = True)
state = models.CharField(max_length = 20)
name = models.CharField(max_length = 200)
type = models.CharField(max_length = 100)
year = models.IntegerField()
def __str__(self):
return self.name
|
[
"Barneskenya@gmail.com"
] |
Barneskenya@gmail.com
|
e5ec365177347f179b249b9dd612eec1f5032966
|
d62863d049c0206bfa744ca4c9e886030bfce1ab
|
/apps/sw_shop/sw_catalog/admin/item_inlines.py
|
04cd23a6f7a69af3873bad04dfcc7dff4f79efc6
|
[] |
no_license
|
jurgeon018/box
|
51738b99e640202936ed72357d3c67d2517e589b
|
50b84a0afa73fab85a00eef54194f3c126d15397
|
refs/heads/master
| 2021-07-17T13:37:08.665292
| 2020-10-15T09:50:33
| 2020-10-15T09:50:33
| 232,013,297
| 0
| 1
| null | 2020-03-27T02:16:44
| 2020-01-06T03:01:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,388
|
py
|
from django.conf import settings
from django.forms import TextInput, Textarea, NumberInput
from django.contrib import admin
from django.shortcuts import reverse
from django.utils.safestring import mark_safe
from django.urls import path
from django.contrib import admin
from django.conf import settings
from django.forms import TextInput, Textarea, NumberInput
from django.shortcuts import reverse
from django.utils.safestring import mark_safe
from django.urls import path
from django.conf import settings
from django.forms import TextInput, Textarea, NumberInput
from django.utils.translation import gettext_lazy as _
from box.core.utils import (
AdminImageWidget, show_admin_link, move_to, BaseAdmin,
seo, base_main_info
)
from box.apps.sw_shop.sw_catalog.models import *
from box.apps.sw_shop.sw_cart.models import *
from box.apps.sw_shop.sw_catalog.models import *
from box.apps.sw_shop.sw_cart.models import *
from adminsortable2.admin import SortableAdminMixin, SortableInlineAdminMixin
from mptt.admin import MPTTModelAdmin, DraggableMPTTAdmin, TreeRelatedFieldListFilter
from modeltranslation.admin import *
from dal import autocomplete
from import_export.admin import ImportExportActionModelAdmin, ImportExportModelAdmin
from .filters import *
from .views import *
from .item_inlines import *
from ..resources import *
class ItemImageInline(
SortableInlineAdminMixin,
TranslationTabularInline,
):
model = ItemImage
extra = 0
classes = ['collapse']
# def get_fields(self, request, obj):
# fields = [
# 'image',
# # 'order',
# 'alt',
# ]
# return fields
fields = [
'image',
'alt',
]
readonly_fields = [
# 'order',
]
formfield_overrides = {models.ImageField: {'widget': AdminImageWidget}}
class ItemReviewInline(admin.TabularInline):
model = ItemReview
extra = 0
classes = ['collapse']
exclude = [
]
class ItemInline(TranslationTabularInline):
def show_title(self, obj):
option = "change" # "delete | history | change"
massiv = []
app = obj._meta.app_label
model = obj._meta.model_name
url = f'admin:{app}_{model}_{option}'
href = reverse(url, args=(obj.pk,))
name = f'{obj.title}'
link = mark_safe(f"<a href={href}>{name}</a>")
return link
show_title.short_description = 'Товар'
model = Item
extra = 0
fields = [
'show_title',
'old_price',
'price',
'currency',
]
readonly_fields = [
'show_title',
'old_price',
'price',
'currency',
]
classes = ['collapse']
# if settings.MULTIPLE_CATEGORY:
# filter_horizontal = [
# 'categories',
# ]
# else:
# filter_horizontal = [
# 'category',
# ]
class ItemCategoryInline(TranslationStackedInline):
model = ItemCategory
extra = 0
fields = [
'title',
'is_active',
'image',
'slug',
]
classes = ['collapse']
verbose_name = _("підкатегорія")
verbose_name_plural = _("підкатегорії")
prepopulated_fields = {
"slug": ("title",),
}
formfield_overrides = {
models.ImageField:{'widget':AdminImageWidget}
}
|
[
"jurgeon018@gmail.com"
] |
jurgeon018@gmail.com
|
b1710a3385cb76f16c2952316ccab9aa63a71f09
|
b5d2ed53375edd9fa8ac234072a3b180ea451186
|
/BSMTOexample.py
|
690f722ebf6a02f89a6489b4231a298f8d0a481e
|
[] |
no_license
|
phoenixwilliams/MultiTaskLibrary
|
79a50921636123529e79160d59b60325f29d5e27
|
70907f3b7d8110c99b976064d7a2d23e0f0d2ffa
|
refs/heads/main
| 2023-02-26T07:51:30.516129
| 2021-01-31T10:28:35
| 2021-01-31T10:28:35
| 327,120,783
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,748
|
py
|
import BSMTO
import BenchmarkProblems
import time
import AlgorithmOperators
import matplotlib.pyplot as plt
if __name__ == "__main__":
dimensions = [50, 50]
crossover_params = {
"pc": 1.0,
"nc": 20,
"length": max(dimensions)
}
gaussmutation_params = {
"mean":0,
"sigma":1,
"pm": 1/max(dimensions),
"length": max(dimensions)
}
polymutation_params = {
"nm": 5,
"pm": 1/max(dimensions),
"length": max(dimensions),
"diffmut": 1
}
selection_params = {
"tournament_size": 1,
"fitness_name": "fitness"
}
design = {
"problems": [BenchmarkProblems.rastrigin, BenchmarkProblems.schwefel],
"K": 2,
"dimensions": dimensions,
"N": 100,
"crossover": AlgorithmOperators.sbx,
"crossover_params": crossover_params,
"mutations": [AlgorithmOperators.mutpoly],
"mutations_params": [polymutation_params],
"p3": 0.85,
"bounds": [[-100, 100], [-50, 50]],
"penalty": 0,
"function_evaluations": 100000,
"nk": 50,
"p2": 0.8,
"selection": AlgorithmOperators.Single_BinaryTournament,
"selection_params": selection_params
}
bsmto = BSMTO.BSMTO(design)
start = time.time()
final_population, process = bsmto.optimize(True)
print(time.time()-start)
avgs = [0,0]
for c in range(len(final_population)):
avg = 0
for p in final_population[c].population:
avg += p.fitness
avg = avg/len(final_population[c].population)
avgs[c] = avg
print(avgs)
plt.plot(process[0])
plt.show()
plt.plot(process[1])
plt.show()
|
[
"32957699+phoenixwilliams@users.noreply.github.com"
] |
32957699+phoenixwilliams@users.noreply.github.com
|
aaa7967d0696133250c9bd8cc83a8d4402fa94ca
|
5a095d0dbd7368c6c71bc4a89ba95a67745f4b52
|
/atcoder_workspace/ABC/121/B/B.py
|
9cbe840fa22e6ddf45092b172e71f551db2a8820
|
[] |
no_license
|
Otsdarva1/today_i_learned
|
8a162681dc92487899e937d8ae9a7b3f94e92eb9
|
6383ba37e88fe586582a6edf25d5baf2654c5a6a
|
refs/heads/master
| 2022-12-15T03:45:53.427641
| 2019-09-12T14:44:51
| 2019-09-12T14:44:51
| 199,311,387
| 0
| 0
| null | 2022-12-06T20:21:53
| 2019-07-28T16:30:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 266
|
py
|
n, m, c = map(int, input().split())
b = list(map(int, input().split()))
a = [list(map(int, input().split())) for _ in range(n)]
ans = 0
for ai in a:
absum = 0
for i in range(m):
absum += ai[i] * b[i]
if absum + c > 0:
ans += 1
print(ans)
|
[
"yamazaki192748@gmail.com"
] |
yamazaki192748@gmail.com
|
4d3c568497ff4a6d8f34084d2456c9e637bebe58
|
9c4a52570d978486f800a83232217cb5c8bad2b6
|
/src/ros_lidar_visualizer_node.py
|
dda75c6a31a671ebaa325344050714869227fd5b
|
[] |
no_license
|
iscumd/Ros-Lidar-Visualizer
|
9097082f77042f9c145b8030d31394bde62cdc10
|
78608b20e5f5187e7371ce52ea3145ca9f9269f2
|
refs/heads/master
| 2021-09-04T20:06:48.300539
| 2018-01-17T03:26:36
| 2018-01-17T03:26:36
| 115,165,057
| 1
| 0
| null | 2018-01-17T03:26:38
| 2017-12-23T02:34:36
|
CMake
|
UTF-8
|
Python
| false
| false
| 3,802
|
py
|
#!/usr/bin/env python
import pygame
import rospy
import math
import sensor_msgs.msg as sensors
import geometry_msgs.msg as geometries
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
DEEP_RED = (155,0,0)
YELLOW = (255,255,0)
PURPLE = (59,11,48)
screen_width = 900
screen_height = 1000
X_SCALE = 1.5
Y_SCALE = 0.6
pygame.init()
size = (screen_width, screen_height)
course_size = [400,1500]
screen = pygame.display.set_mode(size)
obstacles = [[0, 0], [0, 0]]
robot_pose = [0,0,0]
count = 0.01
origin_offset = [200,course_size[1] - 250]
robot_pose[0] = robot_pose[0] + origin_offset[0]
robot_pose[1] = robot_pose[1] + origin_offset[1]
def draw_yeti(x,y,rot): #input scaled x and y
outer_dim = [[0,0],[0.45*100*X_SCALE,0],[0.45*100*X_SCALE,0.75*100*Y_SCALE],[0,0.75*100*Y_SCALE]]
rotated_outer_dim = []
for i in range(0,len(outer_dim)):
rotated_outer_dim.append(rotate_around_point(0.225*100*X_SCALE,0.7*100*Y_SCALE,rot,outer_dim[i][0] + x,outer_dim[i][1] + y ))
print rotated_outer_dim
pygame.draw.polygon(screen,BLUE,rotated_outer_dim,0)
def rotate_around_point(center_x,center_y,angle,x,y):
s = math.sin(angle)
c = math.cos(angle)
x = x - center_x
y = y - center_y
new_x = x * c - y * s
new_y = x * s + y * c
x = new_x + center_x
y = new_y + center_y
return [x,y]
def midpoint(p1, p2):
return [(p1[0]+p2[0])/2, (p1[1]+p2[1])/2]
def pscale(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) // (in_max - in_min) + out_min
def distance(x1,y1,x2,y2):
return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)
def polar_to_cartesian(theta, rot):
x = math.cos(theta) * rot
y = math.sin(theta) * rot
return [x,y]
def text_to_screen(screen, text, x, y, size = 20, color = (000, 000, 000)):
font_type = pygame.font.match_font("roboto-black", "Arial", "sansserif")#'./Roboto-Black.ttf'
try:
text = str(text)
font = pygame.font.Font(font_type, size)
text = font.render(text, True, color)
screen.blit(text, (x, y))
except Exception, e:
print 'Font Error, saw it coming'
raise e
def refresh_screen():
pygame.display.flip()
screen.fill(DEEP_RED)
pygame.draw.rect(screen, BLUE, [0,0,X_SCALE*course_size[0],Y_SCALE*course_size[1]], 2)
pygame.draw.rect(screen, WHITE, [X_SCALE*150, Y_SCALE*200, X_SCALE * 100, Y_SCALE * 1000], 0)
def laserCallback(scan):
#print "got laser"
#lidar_max_range = data.range_max
global count
start = scan.angle_min
for i in range(0, len(scan.ranges)):
point = polar_to_cartesian(start + scan.angle_increment * i, scan.ranges[i])
final_x = ((point[0]*100) + robot_pose[0])*X_SCALE
final_y = (robot_pose[1] - (point[1]*100))*Y_SCALE
pygame.draw.circle(screen, GREEN, (int(final_x), int(final_y)), 3)
draw_yeti(robot_pose[0] * X_SCALE, robot_pose[1]*Y_SCALE, count*math.pi/20)
count = count + 1
refresh_screen()
def obstacleCallback(obstacles):
obstacles = obstacles.poses
for i in range(0,len(obstacles)):
obs_x = (obstacles[i].position.x * 100) * X_SCALE
obs_y = (obstacles[i].position.y * 100) * Y_SCALE
pygame.draw.circle(screen, YELLOW, (int(final_x), int(final_y)), 3)
refresh_screen()
def poseCallback(pose):
print "got pose"
def main():
rospy.init_node('visualizer', anonymous=True)
pygame.display.set_caption("Localization visualizer")
refresh_screen()
rospy.Subscriber("scan", sensors.LaserScan, laserCallback)
rospy.Subscriber("obstacles", geometries.PoseArray, obstacleCallback)
rospy.Subscriber("yeti/pose",geometries.Pose2D,poseCallback)
rospy.spin()
if __name__ == '__main__':
main()
|
[
"afcofiel@umich.edu"
] |
afcofiel@umich.edu
|
f18ffb67534bb145f542caa98102e608568c6969
|
cca6bcec6528417842ce4cc9aee2b891c37fa421
|
/pogo/proto/Data/Player/DailyBonus_pb2.py
|
e05be9f47a36ed1982bcdea5e3a8dd4f6d3ebbc4
|
[] |
no_license
|
p0psicles/pokemongo-api
|
2c1b219dcc6441399a787280e3df9446761d2230
|
c1e20ae5892b045ac0b035b0f50254d94a6ac077
|
refs/heads/master
| 2021-01-16T23:08:42.501756
| 2016-07-20T20:51:54
| 2016-07-20T20:51:54
| 63,850,559
| 2
| 0
| null | 2016-07-21T08:17:44
| 2016-07-21T08:17:44
| null |
UTF-8
|
Python
| false
| true
| 2,530
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Data/Player/DailyBonus.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='Data/Player/DailyBonus.proto',
package='POGOProtos.Data.Player',
syntax='proto3',
serialized_pb=_b('\n\x1c\x44\x61ta/Player/DailyBonus.proto\x12\x16POGOProtos.Data.Player\"c\n\nDailyBonus\x12#\n\x1bnext_collected_timestamp_ms\x18\x01 \x01(\x03\x12\x30\n(next_defender_bonus_collect_timestamp_ms\x18\x02 \x01(\x03\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DAILYBONUS = _descriptor.Descriptor(
name='DailyBonus',
full_name='POGOProtos.Data.Player.DailyBonus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='next_collected_timestamp_ms', full_name='POGOProtos.Data.Player.DailyBonus.next_collected_timestamp_ms', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_defender_bonus_collect_timestamp_ms', full_name='POGOProtos.Data.Player.DailyBonus.next_defender_bonus_collect_timestamp_ms', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=56,
serialized_end=155,
)
DESCRIPTOR.message_types_by_name['DailyBonus'] = _DAILYBONUS
DailyBonus = _reflection.GeneratedProtocolMessageType('DailyBonus', (_message.Message,), dict(
DESCRIPTOR = _DAILYBONUS,
__module__ = 'Data.Player.DailyBonus_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Data.Player.DailyBonus)
))
_sym_db.RegisterMessage(DailyBonus)
# @@protoc_insertion_point(module_scope)
|
[
"contact@dylanmadisetti.com"
] |
contact@dylanmadisetti.com
|
380ec927060ea6cdb2c70170326d30a0f152a396
|
cb560f64ba1b931778ffb7dd6435bca5614c24d6
|
/zorkwithclasses/main_game.py
|
8faae1a9897987816ab4f1be1e0821a32c10f8f0
|
[] |
no_license
|
kazi-2020/Clue-Based-Adventure-Game
|
1ed69015499e2ecfe5c6eb4d79716d909ba91613
|
78573f28f1d54c08e75c1e50f1161a0fa7febb30
|
refs/heads/main
| 2023-03-29T22:33:11.741966
| 2021-04-22T03:22:53
| 2021-04-22T03:22:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,419
|
py
|
from player import *
from board import *
from time import sleep
from PIL import Image
import sys
user_name=input("To play, please enter your name :- ")
p1=Player(user_name)
new_board = Board()
new_board.addPlayer(p1)
print("\n----ZORK-CLUE----")
intro="""Welcome to Manor Mansions, the heart of this town. It's the residence of the mayor of this town. \n
Unfortunately the towns mental asylum had a security breach, one of the lunatics got out and killed the mayor's dog with a candle stick.\n
The killer has been caught but cannot be convicted until the murder weapon is found, according to interpol it's still in one of the rooms inside the mansion.\n
Help the mayor's dog get his justice by heping them find the weapon.\n"""
print("\n-------------x---------------------x------------------x----")
for char in intro:
sleep(0.05)
sys.stdout.write(char)
print("-------------x---------------------x------------------x----")
print("You are standing in j3\n")
print("Use the map for reference.")
sleep(0.05)
print("To move up type in 'mu'\n")
sleep(0.05)
print("To move down type in 'md'\n")
sleep(0.05)
print("To move left type in 'ml'\n")
sleep(0.05)
print("To move right type in 'mr'\n")
sleep(0.05)
print("-------------x---------------------x------------------x----")
im = Image.open('map.jpg')
im.show()
def movement(player):
moveto = (input("Which way do you want to move:- "))
if moveto == "mr" :
new_board.moveright(player)
new_board.current_room(player)
elif moveto == "ml" :
new_board.moveleft(player)
new_board.current_room(player)
elif moveto == "mu" :
new_board.moveup(player)
new_board.current_room(player)
elif moveto == "md" :
new_board.movedown(player)
new_board.current_room(player)
else:
print("invalid move")
new_board.current_room(player)
def objective_loop(player):
Board.clues()
found=0
count=0
while found==0:
if str(Board.board[player.x][player.y]) != str(weapon_position):
movement(p1)
found=0
count = count+1
else:
found=1
print("You have found the weapon. Now the lunatic can be convicted.")
print("You caught the killer with",count,"points.")
objective_loop(p1)
|
[
"noreply@github.com"
] |
kazi-2020.noreply@github.com
|
4c0ca8fc2bdcc9fe30d65aff66bb876728f439c6
|
0b15f0143e4affb5b7e08eb3ab6cc608397082d9
|
/FrameworkExe_VBFHbb/scripts/form_HSG5_out.py
|
7911707e83db1175cdcbfd0568bd762e3184eb22
|
[] |
no_license
|
yqqxyy/VBFHbb
|
20d1ecbbb771df910ef4c878112ee4a40bef1d8f
|
395b45b417ba04bc31e980b1870050067ee84ef6
|
refs/heads/master
| 2023-07-15T18:09:56.826638
| 2021-08-31T10:54:07
| 2021-08-31T10:54:07
| 401,651,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,086
|
py
|
#!/usr/bin/python
import os, math, sys
#### job version
job_version = "HIGG2D4.11-3"
####
#user = "user.XXXX."
user = "group.phys-higgs."
#### input file -> take care not to put / in the end, it will be handled later
file_in = sys.argv[1]
sample_file = open('./'+file_in, 'r')
#### output file, to take back jobs
out_file = open('./out_sample.txt', 'w')
for i_sample in sample_file :
# protect from commented lines
if i_sample.find("#")>=0 : continue
# protect from blank lines
if i_sample == "\n" : continue
#
count = 0
sample_out = user
sample_out_s = ""
count_s = 0
is_offset = False
count_total = 0
for char in i_sample :
# check offset
if count_total==0 :
if char=="g" or char=="u" : is_offset = True
# protect from offset
val_offset = 0
dot_offset=False
if is_offset :
val_offset = 2
if count+1==val_offset and char==".": dot_offset=True
# form string from generator name
if char == "." : count += 1
if count >= val_offset and count < val_offset+3 and dot_offset==False : sample_out += char
# form string from s reco name
if count >= val_offset+5 and dot_offset==False :
if char=="_" : count_s += 1
elif count_s==1: sample_out_s += char
count_total += 1
if sample_out.find("data") < 0 : sample_out += "."+sample_out_s
sample_out += "."+job_version+"\n"
# extra pruning
sample_out=sample_out.replace("ParticleGenerator","PG")
sample_out=sample_out.replace("Pythia","Py")
sample_out=sample_out.replace("Powheg","Pw")
sample_out=sample_out.replace("MadGraph","MG")
sample_out=sample_out.replace("EvtGen","EG")
sample_out=sample_out.replace("Sherpa","Sh")
sample_out=sample_out.replace("_CT10","")
out_file.write(sample_out)
# check length
full_name = sample_out.strip()
#full_name += ".CxAOD.root"
max_str = 115
if len(full_name) >= max_str : print "WARNING : "+full_name+" output name is "+str(len(full_name))+" chars, it will crash the job (should be <"+str(max_str)+"). Re-visit the name structure."
sample_file.close()
out_file.close()
|
[
"18329007325@163.com"
] |
18329007325@163.com
|
183f56a0ddd259fd9504e2954818dce3803861cc
|
beefd0c709fbf1a0429f8ebe475192e9d1d24289
|
/Exception.py
|
5c34abca489008be6c4389c5b127f6182d5e154b
|
[
"MIT"
] |
permissive
|
iraycd/automated-trading-and-alert-system-
|
47e2427a71bbde0071629be295b1270b9c485db9
|
c5050766f8cc321df17526f824246a35252e8bdf
|
refs/heads/master
| 2022-11-16T03:31:48.059163
| 2020-07-06T09:44:20
| 2020-07-06T09:44:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
import linecache
import sys
import init
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
return ('EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
|
[
"54738408+pradeep-p-1999@users.noreply.github.com"
] |
54738408+pradeep-p-1999@users.noreply.github.com
|
2418a6251f7845a23f6ba681a498498303746bfd
|
639fad37e3b0be9e6cd7d2f0f68bbfb5529558cf
|
/EchoesJava2/EchoesJava/echoes-runtime/bin/rendering-engine/objects/Plants.py
|
c324a47cfb8c8ba77a4585965ae75051d010f413
|
[] |
no_license
|
keithsaccount/ECHOES2
|
768df0e3967ae5b16179a3279167f6fab76d302e
|
289cfc5112b0c242c05f82332872274d6f573660
|
refs/heads/master
| 2021-01-17T01:11:21.852047
| 2012-05-16T15:49:15
| 2012-05-16T15:49:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40,075
|
py
|
'''
Created on 8 Sep 2009
@author: cfabric
'''
from EchoesObject import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLE import *
from OpenGL.GLUT import *
import PIL.Image
import random
import echoes
import math, numpy
import objects.Environment
import Ice
import Logger
import Bubbles, PlayObjects
import Motions
class EchoesFlower(EchoesObject):
'''
classdocs
'''
def __init__(self, app, autoAdd=True, props={"type": "Flower"}, fadeIn = False, fadingFrames = 100, callback=None):
'''
Constructor
'''
super(EchoesFlower, self).__init__(app, autoAdd, props, fadeIn, fadingFrames, callback)
self.size = 0.4
self.maxSize = 0.6
self.pos = (0,0,0)
self.rotate = [0,0,0]
self.publishRegion = True
self.underCloud = False
self.amplitude = 0
self.swing = 0
if "colour" in self.props:
self.colour = self.props["colour"]
else:
self.colour = "red"
self.patterntex = self.setImage("visual/images/Circles.png")
self.shape = [(-1, -1), (1, -1), (1, 1), (-1, 1)]
self.texshape = [(0, 0), (1, 0), (1, 1), (0, 1)]
self.targetPos = None
self.targetBasket = None
self.pot = None
self.basket = None
self.inCollision = None
self.canGrow = True
self.isGrowing = 0
self.growToSize = None
self.avatarTCB = None
self.canTurnIntoBall = True
self.canTurnIntoBubble = True
self.childCanTurnIntoBubble = True
self.childCanTurnIntoBall = True
def __setattr__(self, item, value):
if item == "size":
self.stemLength = value * 4
self.calcStemPoints()
self.stemWidth = int(min(self.app.canvas.lineWidthRange[1] * 2 * value, 10))
elif item == "growToSize":
value = min(self.maxSize, value)
elif item == "colour":
if value == "green":
self.texture = self.setImage('visual/images/FlowerHead-01.png')
elif value == "blue":
self.texture = self.setImage('visual/images/FlowerHead-03.png')
elif value == "yellow":
self.texture = self.setImage('visual/images/FlowerHead-04.png')
else: # red is the default
self.texture = self.setImage('visual/images/FlowerHead-02.png')
elif item == "pos" and hasattr(self, "pos") and hasattr(self, "underCloud"):
for oid, o in self.app.canvas.objects.items():
if isinstance(o, objects.Environment.Cloud):
if o.isUnder(self):
if not self.underCloud: self.underCloud = True
else:
if self.underCloud: self.underCloud = False
elif item == "pot":
if value == None:
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "flower_pot", "None")
else:
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "flower_pot", str(value.id))
elif item == "basket":
if value == None:
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "flower_basket", "None")
else:
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "flower_basket", str(value.id))
elif item == "underCloud":
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "under_cloud", str(value))
elif hasattr(self, "isGrowing") and item == "isGrowing":
if self.isGrowing > 0 and value == 0:
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "is_growing", "False")
if self.isGrowing <= 0 and value > 0:
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "is_growing", "True")
object.__setattr__(self, item, value)
def findTargetBasket(self):
for id, se in self.app.canvas.objects.items():
if isinstance(se, objects.Environment.Basket):
self.targetBasket = se
break
def calcStemPoints(self):
self.stemPoints = []
for i in range(4):
if i > 0 and i < 4:
x = random.uniform(-0.2,0.2)
else:
x = 0
self.stemPoints.append([x, -1*self.stemLength*float(i)/3.0, 0])
def renderObj(self):
'''
overwriting the render method to draw the flower
'''
if not (hasattr(self, "swing")): return
if (not (self.basket and self.basket.avatarTCB) and
not (self.pot and self.pot.avatarTCB)):
if not self.inCollision:
if self.basket:
self.basket.removeFlower(self)
self.basket = None
if self.pot:
self.pot.flower = None
self.pot = None
self.inCollision = None
if self.isGrowing > 0:
self.isGrowing -= 1
if self.growToSize and self.canGrow:
if self.size < self.growToSize:
self.grow()
else:
self.growToSize = None
if self.targetPos:
d = [0,0,0]
for i in range(3):
d[i] = self.targetPos[i] - self.pos[i]
self.pos = [self.pos[0] + d[0] / 20, self.pos[1] + d[1] / 20, self.pos[2] + d[2] / 20]
if abs(d[0]+d[1]+d[2]) < 0.05:
self.pos = self.targetPos
self.targetPos = None
if self.targetBasket:
self.targetBasket.addFlower(self)
self.interactive = True
self.targetBasket = None
if not self.beingDragged:
self.swing = (self.swing + 0.1) % (2*math.pi) # animate the swinging stem
self.amplitude = self.amplitude - 0.005
if self.amplitude < 0: self.amplitude = 0
dx= -1.5*self.size * self.amplitude * math.sin(self.swing)
dy= self.stemLength - math.sqrt(math.pow(self.stemLength, 2) - math.pow(dx, 2))
self.stemPoints[0]=(-1*dx,-1*dy,self.pos[2])
glPushMatrix()
# centre position
glTranslate(self.pos[0], self.pos[1], self.pos[2]) #make sure the head is in front of the stem
glRotatef(self.rotate[2],0,0,1)
# Stem
if not (hasattr(self, "stemWidth")) or self.stemWidth == 0:
self.stemWidth = 1
glLineWidth(self.stemWidth)
glColor4f(0.229, 0.259, 0.326, self.transperancy)
self.app.canvas.drawBezier(self.stemPoints, False)
glLineWidth(1.0)
# touch area for better dragging
glDisable(GL_DEPTH_TEST)
glColor4f(1, 1, 1, 0.0)
glBegin(GL_QUADS)
glVertex3f(-self.size*0.7, 0, -0.1)
glVertex3f(self.size*0.7, 0, -0.1)
glVertex3f(self.size*0.7, -self.stemLength, -0.1)
glVertex3f(-self.size*0.7, -self.stemLength, -0.1)
glEnd()
glEnable(GL_DEPTH_TEST)
# Head
glEnable( GL_ALPHA_TEST )
glAlphaFunc( GL_GREATER, 0.1 )
glEnable( GL_TEXTURE_2D )
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glBindTexture(GL_TEXTURE_2D, self.texture)
glTranslate(self.stemPoints[0][0], self.stemPoints[0][1], self.stemPoints[0][2]+0.05)
glScalef(self.size, self.size, self.size)
glColor4f(1, 1, 1, self.transperancy)
glBegin(GL_QUADS)
ti = 0
for v in self.shape:
glTexCoord2d(self.texshape[ti][0], self.texshape[ti][1])
glVertex3f(v[0], v[1], self.pos[2])
ti += 1
glEnd()
glDisable( GL_TEXTURE_2D )
if not self.childCanTurnIntoBall or not self.childCanTurnIntoBubble:
glEnable( GL_TEXTURE_2D )
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glBindTexture(GL_TEXTURE_2D, self.patterntex)
glColor4f(1, 1, 1, self.transperancy*0.5)
glTranslate(0,0,0.05)
glBegin(GL_QUADS)
ti = 0
for v in self.shape:
glTexCoord2d(self.texshape[ti][0], self.texshape[ti][1])
glVertex3f(v[0], v[1], self.pos[2])
ti += 1
glEnd()
glDisable( GL_TEXTURE_2D )
glDisable( GL_ALPHA_TEST )
glPopMatrix()
def shake(self, force):
'''
Shake the whole plant, stem rooted in the soil
'''
pass
def grow(self):
'''
Grow the plant bigger to the set maximum
'''
if self.size < self.maxSize:
self.size += 0.001
self.pos = (self.pos[0], self.pos[1]+0.004, self.pos[2])
self.isGrowing = 5 # number of frames that it will report growing
else:
self.canGrow=False
def moveToBasket(self, id):
if id:
self.targetBasket = self.app.canvas.objects[id]
else:
self.findTargetBasket()
if self.targetBasket:
self.interactive = False
if self.basket == self.targetBasket:
Logger.warning("Flower " + str(self.id) + " is already in basket " + str(self.targetBasket.id))
else:
Logger.trace("info", "moving flower " + str(self.id) + " to basket " + str(self.targetBasket.id))
self.targetPos = [self.targetBasket.pos[0]+(0.4*random.random()-0.2), self.targetBasket.pos[1]+self.stemLength-self.targetBasket.size/2, self.targetBasket.pos[2]-0.5]
else:
Logger.warning("Cannot move flower " + str(self.id) + " to basket, no basket found in scene")
def attachToJoint(self, jpos, jori, avatarTCB):
self.avatarTCB = avatarTCB
self.objectCollisionTest = False
rotz_r = math.pi - jori[2]
if jori[0] < 0:
self.rotate[2] = math.degrees(rotz_r)
self.pos = [jpos[0]-self.stemLength/2*math.sin(rotz_r), jpos[1]+self.stemLength/2*math.cos(rotz_r), self.pos[2]]
else:
self.rotate[2] = math.degrees(rotz_r) + 180
self.pos = [jpos[0]+self.stemLength/2*math.sin(rotz_r), jpos[1]-self.stemLength/2*math.cos(rotz_r), self.pos[2]]
self.old_jpos = jpos
def detachFromJoint(self):
self.avatarTCB = None
self.objectCollisionTest = True
self.pos = [self.old_jpos[0], self.old_jpos[1] + self.stemLength/2, self.old_jpos[2]]
self.rotate = [0,0,0]
def click(self, agentName):
'''
pick
'''
self.app.canvas.agentPublisher.agentActionCompleted('User', 'flower_pick', [str(self.id)])
pass
def startDrag(self, newXY):
self.beingDragged = True
# Based on http://web.iiit.ac.in/~vkrishna/data/unproj.html
projection = glGetDoublev(GL_PROJECTION_MATRIX)
modelview = glGetDoublev(GL_MODELVIEW_MATRIX)
viewport = glGetIntegerv(GL_VIEWPORT)
windowZ = glReadPixels(newXY[0], viewport[3]-newXY[1], 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
worldCoords = gluUnProject(newXY[0], viewport[3] - newXY[1], windowZ[0][0], modelview, projection, viewport)
self.worldDragOffset = [self.pos[0]-worldCoords[0], self.pos[1]-worldCoords[1], 0]
def stopDrag(self):
self.beingDragged = False
def drag(self, newXY):
if not self.interactive: return
# Based on http://web.iiit.ac.in/~vkrishna/data/unproj.html
projection = glGetDoublev(GL_PROJECTION_MATRIX)
modelview = glGetDoublev(GL_MODELVIEW_MATRIX)
viewport = glGetIntegerv(GL_VIEWPORT)
windowZ = glReadPixels(newXY[0], viewport[3]-newXY[1], 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
worldCoords = gluUnProject(newXY[0], viewport[3] - newXY[1], windowZ[0][0], modelview, projection, viewport)
# started drag outside the flower head
if self.worldDragOffset[1] > self.size:
# drag
self.pos = [worldCoords[0]+self.worldDragOffset[0], worldCoords[1]+self.worldDragOffset[1], self.pos[2]]
self.locationChanged = True
if self.avatarTCB:
self.avatarTCB.detachObject(self)
# started drag in within the flowerhead
else:
# into Bubble
if self.magic and self.childCanTurnIntoBubble and worldCoords[1] > (self.pos[1] + self.size/2):
if self.avatarTCB:
self.avatarTCB.detachObject(self)
self.intoBubble(True)
# into Ball
elif self.magic and self.childCanTurnIntoBall and worldCoords[1] < (self.pos[1] - self.size/2):
if self.avatarTCB:
self.avatarTCB.detachObject(self)
self.intoBall(True)
# swing
else:
self.swing = max(min((worldCoords[0] - self.pos[0]) / self.size, 1), -1)
self.amplitude = math.fabs(self.swing)
self.swing = self.swing * math.pi / 2 # for max amplitude
def intoBubble(self, byUser=False):
if self.canTurnIntoBubble:
bubble = Bubbles.EchoesBubble(self.app, True, fadeIn=True, fadingFrames=10)
bubble.setStartPos(self.pos)
bubble.size = self.size
bubble.willBeReplaced = False
if self.pot:
self.pot.flower = None
if self.basket:
self.basket.removeFlower(self)
self.remove()
if byUser:
self.app.canvas.agentPublisher.agentActionCompleted('User', 'flower_bubble', [str(self.id), str(bubble.id)])
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "flower_bubble", str(bubble.id))
def intoBall(self, byUser=False):
if self.canTurnIntoBall:
ball = PlayObjects.Ball(self.app, True, fadeIn=True, fadingFrames=10)
ball.pos = self.pos
ball.size = self.size
ball.colour = self.colour
if self.pot:
self.pot.flower = None
if self.basket:
self.basket.removeFlower(self)
self.remove()
if byUser:
self.app.canvas.agentPublisher.agentActionCompleted('User', 'flower_ball', [str(self.id), str(ball.id)])
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "flower_ball", str(ball.id))
def remove(self, fadeOut = False, fadingFrames = 100):
if self.avatarTCB:
self.detachFromJoint()
super(EchoesFlower, self).remove(fadeOut, fadingFrames)
class Pot(EchoesObject):
'''
classdocs
'''
def __init__(self, app, autoAdd=True, props={"type": "Pot"}, fadeIn = False, fadingFrames = 100, callback=None):
'''
Constructor
'''
super(Pot, self).__init__(app, autoAdd, props, fadeIn, fadingFrames, callback)
self.size = 0.3 + random.random()*0.2
self.pos = [-1,-2.5,0.1]
self.publishRegion = True
self.underCloud = False
self.canBeDraged = True
self.publishGrowStarted = False
self.defaultHeight = self.app.canvas.getRegionCoords("ground")[0][1]
self.fallToDefaultHeight = True
self.falling = False
# basic shape in two strips [x,y, colour shade value]
self.shape = [[[-1, 0.5, 1], [1, 0.5, 0.8], [1, 0.7, 0.8], [-1, 0.7, 1]],
[[-0.8, 0.5, 1], [-0.6, -0.7, 0.6], [0.6, -0.7, 0.6], [0.8, 0.5, 1]]]
# a random neutral shade
self.neutralshade = ["neutral-1", "neutral-2", "neutral-3", "neutral-4", "neutral-5"][random.randint(0,4)]
# the flower growing out of the pot
self.flower = None
self.stack = None
if "colour" in self.props:
self.colour = self.props["colour"]
self.neutralshade = self.props["colour"]
else:
self.colour = self.neutralshade
self.avatarTCB = None
def __setattr__(self, item, value):
if item == "colour":
if value == "dark":
self.basecolour = [0.770, 0.371, 0.082, 1.0]
self.linecolour = [0.3,0.1,0.1,1]
elif value == "neutral-1":
self.basecolour = [1.000, 0.609, 0.277, 1.000]
self.linecolour = [0.3,0.1,0.1,1]
elif value == "neutral-2":
self.basecolour = [0.955, 0.878, 0.471, 1.000]
self.linecolour = [0.3,0.1,0.1,1]
elif value == "neutral-3":
self.basecolour = [1.000, 0.796, 0.634, 1.000]
self.linecolour = [0.3,0.1,0.1,1]
elif value == "neutral-4":
self.basecolour = [0.872, 0.655, 0.133, 1.000]
self.linecolour = [0.3,0.1,0.1,1]
else: # neutral is the default
self.basecolour = [0.970, 0.571, 0.282, 1.0]
self.linecolour = [1,0,0,1]
elif item == "flower" and isinstance(value, EchoesFlower):
if hasattr(self, "hasOnTop") and self.hasOnTop:
Logger.warning("Pot: can't have flower in pot that has other pots on top of it")
return
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "pot_flower", str(value.id))
value.pos = [self.pos[0], self.pos[1]+value.stemLength+self.size/2, self.pos[2]-0.01]
value.inCollision = self.id
value.pot = self
Logger.trace("info", "Flower put into pot" + str(self.id) )
if value.beingDragged:
self.app.canvas.agentPublisher.agentActionCompleted('User', 'flower_placeInPot', [str(self.id), str(value.id)])
elif item == "flower" and value == None:
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "pot_flower", "None")
elif item == "pos" and hasattr(self, "pos"):
if hasattr(self, "stack") and self.stack and ((hasattr(self, "beingDragged") and self.beingDragged) or (hasattr(self, "avatarTCB") and self.avatarTCB)):
# If the user did it, notify the rest of the system
split = self.stack.split(self)
if split and hasattr(self, "beingDragged") and self.beingDragged:
self.app.canvas.agentPublisher.agentActionCompleted('User', 'unstack_pot', [str(self.id)])
if self.stack: # the stack might be removed if its the only pot left
for pot in self.stack.pots:
if pot != self:
dx = self.pos[0]-pot.pos[0]
dy = self.pos[1]-pot.pos[1]
pot.pos = [value[0]-dx, value[1]-dy, pot.pos[2]]
if hasattr(self, "flower") and self.flower:
self.flower.pos = [value[0], value[1]+self.flower.stemLength+self.size/2, value[2]-0.01]
if hasattr(self, "underCloud"):
for oid, o in self.app.canvas.objects.items():
if isinstance(o, objects.Environment.Cloud):
if o.isUnder(self):
if not self.underCloud: self.underCloud = True
else:
if self.underCloud: self.underCloud = False
elif item == "stack":
if value == None:
self.hasOnTop = None
self.isOnTopOf = None
self.colour = self.neutralshade
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "pot_stack", "False")
else:
self.colour = "dark"
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "pot_stack", "True")
elif item == "underCloud":
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "under_cloud", str(value))
elif item == "hasOnTop":
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "has_on_top", str(value))
elif item == "isOnTopOf":
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "is_on_top_of", str(value))
object.__setattr__(self, item, value)
def renderObj(self):
'''
overwriting the render method to draw the pot
'''
if not hasattr(self, "stack"): return # in case rendering is called before the object is fully built
if self.stack:
if self.stack.pots[len(self.stack.pots)-1] == self:
if self.hasOnTop:self.hasOnTop = None
else:
if not self.hasOnTop:
i = self.stack.pots.index(self)
self.hasOnTop = self.stack.pots[i+1].id
if self.stack.pots[0] == self:
if self.isOnTopOf:self.isOnTopOf = None
else:
if not self.isOnTopOf:
i = self.stack.pots.index(self)
self.isOnTopOf = self.stack.pots[i-1].id
if self.fallToDefaultHeight and not self.beingDragged and not self.avatarTCB:
hdiff = self.pos[1] - self.defaultHeight
if abs(hdiff) > 0.05:
if not self.stack: # no stack
self.pos = [self.pos[0], self.pos[1]-hdiff/10, self.pos[2]]
self.falling = True
elif self==self.stack.pots[0]: # lowest of stack
for pot in self.stack.pots:
pot.pos = [pot.pos[0], pot.pos[1]-hdiff/10, pot.pos[2]]
pot.falling = True
else:
self.falling = False
else:
self.falling = False
glPushMatrix()
glTranslate(self.pos[0], self.pos[1], self.pos[2])
glScalef(self.size, self.size, self.size)
c = self.basecolour
for rectangle in self.shape:
glBegin( GL_QUADS )
for v in rectangle:
glColor4f(c[0]*v[2], c[1]*v[2], c[2]*v[2], c[3]*self.transperancy)
glVertex(v[0],v[1], self.pos[2])
glEnd()
glLineWidth(3.0)
glBegin( GL_LINE_STRIP )
glColor4f(self.linecolour[0], self.linecolour[1], self.linecolour[2], self.linecolour[3]*self.transperancy)
for v in rectangle:
glVertex(v[0],v[1], self.pos[2])
glEnd()
glLineWidth(1.0)
glPopMatrix()
def growFlower(self):
if not self.hasOnTop:
if not self.flower:
self.flower = EchoesFlower(self.app, True, fadeIn=True)
self.flower.size = 0.1
self.flower.pos = [self.pos[0], self.pos[1]+self.flower.stemLength+self.size/2, self.pos[2]-0.01]
else:
self.flower.grow()
def click(self, agentName):
'''
pick
'''
pass
def startDrag(self, newXY):
if self.avatarTCB:
self.avatarTCB.detachObject(self)
self.beingDragged = True
# Based on http://web.iiit.ac.in/~vkrishna/data/unproj.html
projection = glGetDoublev(GL_PROJECTION_MATRIX)
modelview = glGetDoublev(GL_MODELVIEW_MATRIX)
viewport = glGetIntegerv(GL_VIEWPORT)
windowZ = glReadPixels(newXY[0], viewport[3]-newXY[1], 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
worldCoords = gluUnProject(newXY[0], viewport[3] - newXY[1], windowZ[0][0], modelview, projection, viewport)
self.worldDragOffset = [self.pos[0]-worldCoords[0], self.pos[1]-worldCoords[1], 0]
def stopDrag(self):
self.beingDragged = False
if self.publishGrowStarted:
self.publishGrowStarted = False
self.app.canvas.agentPublisher.agentActionCompleted('User', 'flower_grow', [str(self.id), str(self.flower.id), str(self.growPond)])
def drag(self, newXY):
if self.interactive and self.canBeDraged:
# Based on http://web.iiit.ac.in/~vkrishna/data/unproj.html
projection = glGetDoublev(GL_PROJECTION_MATRIX)
modelview = glGetDoublev(GL_MODELVIEW_MATRIX)
viewport = glGetIntegerv(GL_VIEWPORT)
windowZ = glReadPixels(newXY[0], viewport[3]-newXY[1], 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
worldCoords = gluUnProject(newXY[0], viewport[3] - newXY[1], windowZ[0][0], modelview, projection, viewport)
if self.beingDragged:
if self.fallToDefaultHeight:
self.pos = [worldCoords[0]+self.worldDragOffset[0], max(self.defaultHeight, worldCoords[1]+self.worldDragOffset[1]), self.pos[2]]
else:
self.pos = [worldCoords[0]+self.worldDragOffset[0], worldCoords[1]+self.worldDragOffset[1], self.pos[2]]
self.locationChanged = True
def attachToJoint(self, jpos, jori, avatarTCB):
self.avatarTCB = avatarTCB
self.objectCollisionTest = False
if self.fallToDefaultHeight:
y = max(jpos[1], self.defaultHeight)
else:
y = jpos[1]
self.pos = [jpos[0], y, self.pos[2]]
def detachFromJoint(self):
self.avatarTCB = None
self.objectCollisionTest = True
def stackUp(self, pot):
if not self.stack and not pot.stack:
self.stack = pot.stack = Stack(self.app)
self.stack.pots = [self, pot]
elif self.stack and pot.stack:
newstack = Stack(self.app)
newstack.pots = self.stack.pots + pot.stack.pots
for pot in newstack.pots:
pot.stack = newstack
elif self.stack or pot.stack:
if pot.stack:
pot.stack.pots = [self] + pot.stack.pots
self.stack = pot.stack
else:
self.stack.pots = self.stack.pots + [pot]
pot.stack = self.stack
self.stack.checkAlignment()
def remove(self, fadeOut = False, fadingFrames = 100):
if not fadeOut and self.stack and self in self.stack.pots:
self.objectCollisionTest = False
del self.stack.pots[self.stack.pots.index(self)]
self.stack = None
super(Pot, self).remove(fadeOut, fadingFrames)
class Stack():
'''
classdocs
'''
def __init__(self, app):
'''
Constructor
'''
self.app = app
self.pots = []
self.objectCollisionTest = False
self.agentCollisionTest = False
def top(self):
l = len(self.pots)
if l > 0:
return self.pots[l-1]
else:
return None
def bottom(self):
if len(self.pots) > 0:
return self.pots[0]
else:
return None
def split(self, pot):
# if pot is the lowest anyway
if self.pots[0] == pot: return False
#if there are only two pots in the stack
if len(self.pots) == 2:
self.pots[0].stack = self.pots[1].stack = None
self.pots = []
return True
# if pot splits stack with one pot left
if self.pots[1] == pot:
self.pots[0].stack = None
del self.pots[0]
return True
if self.pots[len(self.pots)-1] == pot:
pot.stack = None
del self.pots[len(self.pots)-1]
return True
# split stack into two stacks
newStack = Stack(self.app)
while self.pots[0] != pot:
newStack.pots.append(self.pots[0])
self.pots[0].stack = newStack
del self.pots[0]
return True
def checkAlignment(self):
prevPot = None
for pot in self.pots:
if prevPot:
x, y, z = pot.pos
if abs(x - prevPot.pos[0]) > prevPot.size / 1.5:
x = prevPot.pos[0] + random.uniform(-0.1,0.1)
if isinstance(pot, objects.Plants.Pot) and isinstance(prevPot, objects.Plants.Pot):
y = prevPot.pos[1] + prevPot.size + pot.size * 0.37
else: # the upper pot is really a basket
y = prevPot.pos[1] + prevPot.size + pot.size * 0.9
z = prevPot.pos[2]-0.01
pot.pos = [x,y,z]
prevPot = pot
def intoTree(self):
Logger.trace("info", "replacing stack with tree")
tree = LifeTree(self.app, True, fadeIn=True)
size = 0
for pot in self.pots:
size += pot.size
size += 2.5
tree.size = size
lowest = self.pots[0]
tree.pos = [lowest.pos[0], lowest.pos[1] + size/2, lowest.pos[2]]
class LifeTree(EchoesObject):
'''
classdocs
'''
def __init__(self, app, autoAdd=True, props={"type": "LifeTree"}, fadeIn = False, fadingFrames = 100, callback=None):
'''
Constructor
'''
super(LifeTree, self).__init__(app, autoAdd, props, fadeIn, fadingFrames, callback)
self.size = 3.5
self.pos = (-2.5,-0.5,-1)
self.texture = self.setImage("visual/images/LifeTree.png")
self.shape = [(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)]
self.texshape = [(0, 0), (1, 0), (1, 1), (0, 1)]
self.leaves = [None, None, None, None]
def __setattr__(self, item, value):
if item == "pos":
pass
object.__setattr__(self, item, value)
def getFreeBranch(self):
branch = 0
for leaf in self.leaves:
if not leaf:
return branch
branch += 1
return -1
def renderObj(self):
'''
overwriting the render method to draw the flower
'''
glPushMatrix()
glEnable( GL_ALPHA_TEST )
glAlphaFunc( GL_GREATER, 0.1 )
glEnable( GL_TEXTURE_2D )
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glBindTexture(GL_TEXTURE_2D, self.texture)
glTranslate(self.pos[0], self.pos[1], self.pos[2])
glScalef(self.size, self.size, self.size)
glColor4f(1, 1, 1, self.transperancy)
glBegin(GL_QUADS)
ti = 0
for v in self.shape:
glTexCoord2d(self.texshape[ti][0], self.texshape[ti][1])
glVertex3f(v[0], v[1], self.pos[2])
ti += 1
glEnd()
glDisable( GL_TEXTURE_2D )
glDisable( GL_ALPHA_TEST )
glPopMatrix()
class MagicLeaves(EchoesObject, Motions.BezierMotion):
'''
classdocs
'''
def __init__(self, app, autoAdd=True, props={"type": "MagicLeaves"}, fadeIn = False, fadingFrames = 100, callback=None):
'''
Constructor
'''
super(MagicLeaves, self).__init__(app, autoAdd, props, fadeIn, fadingFrames, callback)
super(MagicLeaves, self).initBezierVars()
self.size = 0.5
self.pos = [0,0,0]
self.orientation = 0
self.speed = 0.04
self.flying = True
self.flyingXY = True
self.newctrlpoints()
self.drawCtrlPoints = False
self.removeAtTargetPos = False
self.flapamplitude = 45 # max opening angle when flapping in degrees
self.flap = 0
self.energy = 1.0
self.setImage()
self.shape = [(0, 0), (1, 0), (1, 1), (0, 1)]
self.texshape = [(0, 0), (1, 0), (1, 1), (0, 1)]
self.tree = None
self.putOnTree()
def __setattr__(self, item, value):
if item == "energy":
self.flapamplitude = 45 * value
if value > 0.8:
self.boundingBox = self.app.canvas.getRegionCoords("v-top")
elif value > 0.6:
self.boundingBox = self.app.canvas.getRegionCoords("v-middle")
elif value > 0.3:
self.boundingBox = self.app.canvas.getRegionCoords("v-bottom")
else:
self.boundingBox = self.app.canvas.getRegionCoords("ground")
self.speed = 0.01 * value
if item == "flying":
self.app.canvas.rlPublisher.objectPropertyChanged(str(self.id), "leaves_flying", str(value))
object.__setattr__(self, item, value)
def setImage(self):
images = ['Leaf1.png', 'Leaf2.png']
self.textures = glGenTextures(len(images))
i = 0
for image in images:
im = PIL.Image.open("visual/images/" + image)
try:
ix, iy, idata = im.size[0], im.size[1], im.tostring("raw", "RGBA", 0, -1)
except SystemError:
ix, iy, idata = im.size[0], im.size[1], im.tostring("raw", "RGBX", 0, -1)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
glBindTexture(GL_TEXTURE_2D, self.textures[i])
glTexImage2D(GL_TEXTURE_2D, 0, 4, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, idata)
i += 1
def renderObj(self):
'''
overwriting the render method to draw the flower
'''
glPushMatrix()
glEnable( GL_ALPHA_TEST )
glAlphaFunc( GL_GREATER, 0.1 )
if self.energy > 0:
self.energy -= 0.0005
else:
self.energy = 0
if self.flying and self.interactive:
oldpos = self.pos
self.pos = self.nextBezierPos(self.flyingXY)
if self.pos[0]!=oldpos[0] or self.pos[1]!=oldpos[1] or self.pos[2]!=oldpos[2]:
self.orientation = math.atan2(self.pos[1]-oldpos[1], self.pos[0]-oldpos[0])
if self.removeAtTargetPos and self.bezierIndex > 0.95:
self.remove()
self.flap = (self.flap + 0.4) % (2*math.pi)
glTranslate(self.pos[0], self.pos[1], self.pos[2])
glScalef(self.size, self.size, self.size)
glRotate(math.degrees(self.orientation), 0,0,1)
angle = self.flapamplitude * (1+math.sin(self.flap))
if self.flying or self.beingDragged:
glColor4f(0.584, 0.060, 0.025, self.transperancy)
glBegin(GL_QUADS)
glVertex3f(0.5*self.size, 0.05*self.size, self.pos[2])
glVertex3f(0.5*self.size, -0.05*self.size, self.pos[2])
glVertex3f(-0.5*self.size, -0.05*self.size, self.pos[2])
glVertex3f(-0.5*self.size, 0.05*self.size, self.pos[2])
glEnd()
i = 0
olda = 0
for texture in self.textures:
glEnable( GL_TEXTURE_2D )
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glBindTexture(GL_TEXTURE_2D, texture)
a = math.pow(-1, i) * angle - olda
olda = a
glRotate(a, 1,0,0.25)
glColor4f(1, 1, 1, self.transperancy)
glBegin(GL_QUADS)
ti = 0
for v in self.shape:
glTexCoord2d(self.texshape[ti][0], self.texshape[ti][1])
glVertex3f(v[0], v[1], 0)
ti += 1
glEnd()
glDisable( GL_TEXTURE_2D )
i += 1
glDisable( GL_ALPHA_TEST )
glPopMatrix()
def startDrag(self, pos=(0,0)):
self.app.canvas.agentPublisher.agentActionCompleted('User', 'touch_leaves', [str(self.id)])
self.beingDragged = True
self.energy = 0
self.flying = False
if self.tree:
branch = 0
for leaf in self.tree.leaves:
if leaf == self: self.tree.leaves[branch] = None
branch += 1
def stopDrag(self):
self.beingDragged = False
h = float(self.app.canvas.orthoCoordWidth / self.app.canvas.aspectRatio)
self.energy = (self.pos[1] + h/2)/h
self.newctrlpoints()
self.flying = True
def drag(self, newXY):
if self.interactive:
# Based on http://web.iiit.ac.in/~vkrishna/data/unproj.html
projection = glGetDoublev(GL_PROJECTION_MATRIX)
modelview = glGetDoublev(GL_MODELVIEW_MATRIX)
viewport = glGetIntegerv(GL_VIEWPORT)
windowZ = glReadPixels(newXY[0], viewport[3]-newXY[1], 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
worldCoords = gluUnProject(newXY[0], viewport[3] - newXY[1], windowZ[0][0], modelview, projection, viewport)
self.pos = (worldCoords[0], worldCoords[1], self.pos[2])
def touchLeaves(self, agent_id=None):
# if agent_id:
# self.app.canvas.agentPublisher.agentActionCompleted('Agent', 'touch_leaves', [str(self.id), str(agent_id)])
if self.tree:
branch = 0
for leaf in self.tree.leaves:
if leaf == self: self.tree.leaves[branch] = None
branch += 1
h = float(self.app.canvas.orthoCoordWidth / self.app.canvas.aspectRatio)
self.energy = (self.pos[1] + h/2)/h
self.newctrlpoints()
self.flying = True
def putOnTree(self, id=None, branch=-1):
if not id:
for oid, se in self.app.canvas.objects.items():
if isinstance(se, LifeTree):
id = oid
break
if not id:
Logger.warning("No tree found to put magic leaves on")
return
tree = self.app.canvas.objects[id]
if branch==-1:
branch = tree.getFreeBranch()
if branch==-1:
Logger.warning("No free tree branch found to put magic leaves on")
return
self.energy = 0.0
self.flying = False
tree.leaves[branch] = self
self.tree = tree
if branch == 0:
dx = -0.47
dy = 0.35
self.orientation = 1.5
elif branch == 1:
dx = -0.15
dy = 0.49
self.orientation = 0.2
elif branch == 2:
dx = 0.19
dy = 0.47
self.orientation = -0.2
else:
dx = 0.47
dy = 0.26
self.orientation = -0.5
self.pos = (tree.pos[0]+tree.size*dx, tree.pos[1]+tree.size*dy, tree.pos[2])
def remove(self, fadeOut=False, fadingFrames=100):
super(MagicLeaves, self).remove(fadeOut, fadingFrames)
|
[
"gr8sunny@gmail.com"
] |
gr8sunny@gmail.com
|
0fa800818ea9c55747f6ea1cac54b6d99f042b1a
|
457af6ce768a725bfbb4037691355bb3872b755c
|
/ResolutionStudy/mkARManalysis.py
|
839dce4ab81b2083249a6b1623621f3e680ee52c
|
[] |
no_license
|
reginacaputo/AstroPix
|
fe5340623944810246d812aafddc04ea935b901b
|
5cd2d08c308f2cdc972529e8c78436140c09b5df
|
refs/heads/master
| 2021-06-06T18:58:29.632297
| 2021-05-14T02:20:19
| 2021-05-14T02:20:19
| 130,399,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,133
|
py
|
#*******************************************************************#
#-------------------------------------------------------------------#
#---------------------------- AstroPix -----------------------------#
#-------------------------------------------------------------------#
#------------- Michela Negro - GSFC/UMBC - 2019/2020 ---------------#
#-------------------------------------------------------------------#
#*******************************************************************#
import os
import sys
import argparse
import numpy as np
from importlib.machinery import SourceFileLoader
import scipy.integrate as integrate
import scipy.signal as signal
import matplotlib.pyplot as plt
__description__ = 'Creates the AstroPix geometry configurations'
formatter = argparse.ArgumentDefaultsHelpFormatter
PARSER = argparse.ArgumentParser(description=__description__, formatter_class=formatter)
PARSER.add_argument('-lf', '--logfiles', type=str, required=True, nargs='*',
help='the log files saved from mkARM.py run')
PARSER.add_argument('-lab', '--labels', type=str, required=True, nargs='*',
help='the lables for the plot legend')
PARSER.add_argument('-t', '--title', type=str, required=True,
help='title of the plot')
PARSER.add_argument('-ofl', '--outflabel', type=str, required=True,
help='label for fig file name')
PARSER.add_argument('--show', type=bool, required=False, default=True,
help='if true the images will be shown')
def get_var_from_file(filename):
f = open(filename)
global data
data = SourceFileLoader('data', filename).load_module()
f.close()
def log_file_parsing(log_file):
passive, thickness, energy = 0, 0, 0
pixsize = []
ARM_fwhm = []
ARM_rms = []
ARM_centroid = []
ARM_integral = []
analyzed_events = []
f = open(log_file)
for line in f:
if 'Analyzed Compton and pair events' in line:
analyzed_events.append(float(line.split(' ')[-1]))
if 'FWHM' in line:
ARM_fwhm.append(float(line.split(' ')[-2]))
if 'Maximum of fit (x position)' in line:
ARM_centroid.append(float(line.split(' ')[17]))
std = ARM_fwhm[-1]*0.6
m = ARM_centroid[-1]
int, interr = integrate.quad(lambda x: 1/(std*(2*np.pi)**0.5)*np.exp((x-m)**2/(-2*std**2)),
-ARM_fwhm[-1]/2, ARM_fwhm[-1]/2)
ARM_integral.append(int)
if 'PASSIVE' in line:
passive = float(line.split(' ')[-1].replace('%', ''))
if 'ENERGY' in line:
energy = float(line.split(' ')[-2])
if 'THICKNESS' in line:
thickness = float(line.split(' ')[-2])
if 'VOXEL SIZE' in line:
pixsize.append(float(line.split(' ')[-2]))
label_params = (passive, thickness, energy)
value_labels = (pixsize, ARM_fwhm, ARM_integral, ARM_centroid, analyzed_events)
return label_params, value_labels
def run_mkARManalysis(**kwargs):
c = ['firebrick', 'peru', 'teal', 'darkolivegreen', 'rebeccapurple', 'orange', '0.4',
'saddlebrown', 'lightcoral' ]
print('---> Centroid plot')
plt.figure(figsize=(6,8))
for i, log_f in enumerate(kwargs['logfiles']):
assert(log_f.endswith('.txt'))
lab = kwargs['labels'][i]
offset = i*0.1+1
lparams, vlists = log_file_parsing(log_f)
print('---> Simulations parameters: (passive, thickness, energy)=', lparams)
plt.errorbar(vlists[3], np.array(vlists[0])*offset, xerr=np.array(vlists[1])/2, yerr=None,
fmt='.', color=c[i], mew=0, alpha=0.3, linewidth=3, label=lab)
plt.plot(vlists[3], np.array(vlists[0])*offset, '.', color='0.3')
plt.title (kwargs['title'], size=16)
plt.plot([0, 0],[0.001,50], '--', color='silver', linewidth=0.5)
plt.xlabel('ARM Centroid [deg]', size=15)
plt.ylabel('Pixel Size [mm]', size=15)
plt.xlim(-8, 8)
plt.ylim(1e-3, 20)
plt.yscale('log')
plt.legend(loc=3, fontsize=15)
plt.savefig('figs/ARMcntr_%s.png'%kwargs['outflabel'], format='png')
plt.savefig('figs/ARMcntr_%s.pdf'%kwargs['outflabel'], format='pdf')
print('\n')
print('---> FWHM plot')
plt.figure(figsize=(6,5))
for i, log_f in enumerate(kwargs['logfiles']):
assert(log_f.endswith('.txt'))
lab = kwargs['labels'][i]
offset = i*0.1+1
lparams, vlists = log_file_parsing(log_f)
print('---> Simulations parameters: (passive, thickness, energy)=', lparams)
plt.plot(vlists[0], vlists[1], 'o--', label=lab, color=c[i])
plt.title (kwargs['title'], size=16)
plt.ylabel('ARM FWHM [deg]', size=15)
plt.xlabel('Pixel Size [mm]', size=15)
plt.ylim(0, 8)
plt.xscale('log')
plt.legend(loc=2, fontsize=15)
plt.savefig('figs/ARMfwhm_%s.png'%kwargs['outflabel'], format='png')
plt.savefig('figs/ARMfwhm_%s.pdf'%kwargs['outflabel'], format='pdf')
for i, log_f in enumerate(kwargs['logfiles']):
lparams, vlists = log_file_parsing(log_f)
print('Analyzed events (p %i%%)): '%lparams[0], vlists[-1])
if kwargs['show']:
plt.show()
if __name__ == '__main__':
args = PARSER.parse_args()
print("--------------------")
print("---- Start Run ----")
print("--------------------")
print('\n')
run_mkARManalysis(**args.__dict__)
print("--------------------")
print("----- End Run -----")
print("--------------------")
|
[
"michela.negro@to.infn.it"
] |
michela.negro@to.infn.it
|
956d560754d141f24482c69fadb44676272210d6
|
4c4a4b6e94e45ad9e3c4405b7cf056280d161ac6
|
/Exam01/Ex1_2.py
|
889574c09ba7e06f93c286e391eb5a924a6856fc
|
[] |
no_license
|
a3539a/Python
|
63fc4193deeee6cf4251c990c779d96aa6104ce1
|
a07ad2a7781173be51670026acc2cef0784565f8
|
refs/heads/master
| 2023-04-12T02:39:38.681024
| 2021-05-17T00:33:20
| 2021-05-17T00:33:20
| 361,670,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
"""
날찌 : 2021/04/30
이름 : 김승용
내용 : 1 ~ 10 까지의 정수에서 2의 배수와 3의 배수 정수의 합을 구하시오.
"""
sum = 0
for k in range(1, 11):
if k % 2 == 0 or k % 3 == 0:
sum += k
print('2와 3 배수의 정수의 합 : ', sum)
|
[
"a3539a@gmail.com"
] |
a3539a@gmail.com
|
9ec526e4e9ddd510a050e0d0db9d665bfb810d47
|
8882d876fb82b687756453a7ccdc985385d2ac3b
|
/utils/splash.py
|
f1d43e096b4cdebaf53838e70b546aee6367559a
|
[] |
no_license
|
Jack-a-Lent/RepO
|
825a0855f43c64d22f6327cd592a5b4c445da157
|
14abd4ff1e415fd71b14b7722666351f21613163
|
refs/heads/master
| 2021-05-19T16:36:15.605520
| 2019-10-09T13:14:54
| 2019-10-09T13:14:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,934
|
py
|
from typing import Tuple
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtCore import Qt
from config import Config
from game import Game
from rrrocket_parser import parse_replay
def get_splash_and_progress_bar() -> Tuple[QtWidgets.QSplashScreen, QtWidgets.QProgressBar]:
# Splash
splash_pix = QtGui.QPixmap('calculated_logo_flair_resized.png')
splash = QtWidgets.QSplashScreen(splash_pix, Qt.WindowStaysOnTopHint)
splash.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint)
splash.setEnabled(False)
progress_bar = QtWidgets.QProgressBar(splash)
progress_bar.setGeometry(0, splash_pix.height() - 50, splash_pix.width(), 25)
progress_bar.setStyleSheet(
"""
QProgressBar {
border: 1px solid black;
height: 500px;
text-align: center;
padding: 1px;
border-radius: 7px;
background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #fff,
stop: 0.4999 #eee,
stop: 0.5 #ddd,
stop: 1 #eee
);
width: 15px;
}
QProgressBar::chunk {
background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #7df,
stop: 0.4999 #7bd,
stop: 0.5 #49b,
stop: 1 #48a
);
border-radius: 7px;
border: 1px solid black;
}
"""
)
progress_bar_palette = QtGui.QPalette()
progress_bar_palette.setColor(QtGui.QPalette.Text, QtGui.QColor("black"))
progress_bar.setPalette(progress_bar_palette)
return splash, progress_bar
def parse_config_with_progress_bar(config: Config, progress_bar, app: QtWidgets.QApplication):
unstashed_replay_paths = list(config.replay_folder_path.glob("*.replay"))
stashed_replay_paths = list(config.stash_folder_path.glob("*.replay"))
unstashed_replay_paths_count = len(unstashed_replay_paths)
stashed_replay_paths_count = len(stashed_replay_paths)
print(f"Found {unstashed_replay_paths_count} unstashed replays, {stashed_replay_paths_count} stashed replays.")
progress_bar.setMaximum(unstashed_replay_paths_count + stashed_replay_paths_count)
parsed_count = 0
game_datas = []
for replay_path in unstashed_replay_paths:
_game_data = Game(parse_replay(replay_path)['properties'])
game_datas.append((_game_data, replay_path, False))
parsed_count += 1
progress_bar.setValue(parsed_count)
app.processEvents()
for replay_path in stashed_replay_paths:
_game_data = Game(parse_replay(replay_path)['properties'])
game_datas.append((_game_data, replay_path, True))
parsed_count += 1
progress_bar.setValue(parsed_count)
app.processEvents()
game_datas.sort(key=lambda _game: _game[0].date, reverse=True)
return game_datas
|
[
"harry1996@gmail.com"
] |
harry1996@gmail.com
|
a524fc1d6afcf71fb644f4bd9e4558faa4960148
|
0725d2a93c9058113d0127501fa7bbea28b4f8b1
|
/venv/Lib/site-packages/tencentcloud/dbbrain/v20191016/models.py
|
ede9f9b0348ccc31711b60e96fa8c5720b14f03c
|
[] |
no_license
|
liugngg/liug-001
|
8c9d39fab49b1895c213814dfec4b1bff649671b
|
9ae093a3f7e042b29e756620311f0a57ad083f5c
|
refs/heads/master
| 2023-06-06T00:14:52.548147
| 2021-06-25T07:30:26
| 2021-06-25T07:30:26
| 380,157,121
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119,602
|
py
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AddUserContactRequest(AbstractModel):
"""AddUserContact请求参数结构体
"""
def __init__(self):
"""
:param Name: 联系人姓名,大小写字母+数字+下划线,最小 2 位最大 60 位的长度, 不能以"_"开头,且联系人名保持唯一。
:type Name: str
:param ContactInfo: 邮箱地址,大小写字母、数字及下划线组成, 不能以"_"开头。
:type ContactInfo: str
:param Product: 服务产品类型,固定值:"mysql"。
:type Product: str
"""
self.Name = None
self.ContactInfo = None
self.Product = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.ContactInfo = params.get("ContactInfo")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class AddUserContactResponse(AbstractModel):
"""AddUserContact返回参数结构体
"""
def __init__(self):
"""
:param Id: 添加成功的联系人id。
:type Id: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Id = None
self.RequestId = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ContactItem(AbstractModel):
"""联系人contact描述。
"""
def __init__(self):
"""
:param Id: 联系人id。
:type Id: int
:param Name: 联系人姓名。
:type Name: str
:param Mail: 联系人绑定的邮箱。
:type Mail: str
"""
self.Id = None
self.Name = None
self.Mail = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Name = params.get("Name")
self.Mail = params.get("Mail")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateDBDiagReportTaskRequest(AbstractModel):
"""CreateDBDiagReportTask请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param StartTime: 开始时间,如“2020-11-08T14:00:00+08:00”。
:type StartTime: str
:param EndTime: 结束时间,如“2020-11-09T14:00:00+08:00”。
:type EndTime: str
:param SendMailFlag: 是否发送邮件: 0 - 否,1 - 是。
:type SendMailFlag: int
:param ContactPerson: 接收邮件的联系人ID数组。
:type ContactPerson: list of int
:param ContactGroup: 接收邮件的联系组ID数组。
:type ContactGroup: list of int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认值为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.SendMailFlag = None
self.ContactPerson = None
self.ContactGroup = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SendMailFlag = params.get("SendMailFlag")
self.ContactPerson = params.get("ContactPerson")
self.ContactGroup = params.get("ContactGroup")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateDBDiagReportTaskResponse(AbstractModel):
"""CreateDBDiagReportTask返回参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 异步任务的请求 ID,可使用此 ID 查询异步任务的执行结果。
注意:此字段可能返回 null,表示取不到有效值。
:type AsyncRequestId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateDBDiagReportUrlRequest(AbstractModel):
"""CreateDBDiagReportUrl请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param AsyncRequestId: 健康报告相应的任务ID,可通过DescribeDBDiagReportTasks查询。
:type AsyncRequestId: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.AsyncRequestId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.AsyncRequestId = params.get("AsyncRequestId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateDBDiagReportUrlResponse(AbstractModel):
"""CreateDBDiagReportUrl返回参数结构体
"""
def __init__(self):
"""
:param ReportUrl: 健康报告浏览地址。
:type ReportUrl: str
:param ExpireTime: 健康报告浏览地址到期时间戳(秒)。
:type ExpireTime: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ReportUrl = None
self.ExpireTime = None
self.RequestId = None
def _deserialize(self, params):
self.ReportUrl = params.get("ReportUrl")
self.ExpireTime = params.get("ExpireTime")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateMailProfileRequest(AbstractModel):
"""CreateMailProfile请求参数结构体
"""
def __init__(self):
"""
:param ProfileInfo: 邮件配置内容。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20191016.models.ProfileInfo`
:param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。
:type ProfileLevel: str
:param ProfileName: 配置名称,需要保持唯一性,数据库巡检邮件配置名称自拟;定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。
:type ProfileName: str
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
:type ProfileType: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。
:type Product: str
:param BindInstanceIds: 配置绑定的实例ID,当配置级别为"Instance"时需要传入且只能为一个实例;当配置级别为“User”时,此参数不填。
:type BindInstanceIds: list of str
"""
self.ProfileInfo = None
self.ProfileLevel = None
self.ProfileName = None
self.ProfileType = None
self.Product = None
self.BindInstanceIds = None
def _deserialize(self, params):
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
self.ProfileLevel = params.get("ProfileLevel")
self.ProfileName = params.get("ProfileName")
self.ProfileType = params.get("ProfileType")
self.Product = params.get("Product")
self.BindInstanceIds = params.get("BindInstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateMailProfileResponse(AbstractModel):
"""CreateMailProfile返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateSchedulerMailProfileRequest(AbstractModel):
"""CreateSchedulerMailProfile请求参数结构体
"""
def __init__(self):
"""
:param WeekConfiguration: 取值范围1-7,分别代表周一至周日。
:type WeekConfiguration: list of int
:param ProfileInfo: 邮件配置内容。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20191016.models.ProfileInfo`
:param ProfileName: 配置名称,需要保持唯一性,定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。
:type ProfileName: str
:param BindInstanceId: 配置订阅的实例ID。
:type BindInstanceId: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.WeekConfiguration = None
self.ProfileInfo = None
self.ProfileName = None
self.BindInstanceId = None
self.Product = None
def _deserialize(self, params):
self.WeekConfiguration = params.get("WeekConfiguration")
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
self.ProfileName = params.get("ProfileName")
self.BindInstanceId = params.get("BindInstanceId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateSchedulerMailProfileResponse(AbstractModel):
"""CreateSchedulerMailProfile返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateSecurityAuditLogExportTaskRequest(AbstractModel):
"""CreateSecurityAuditLogExportTask请求参数结构体
"""
def __init__(self):
"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param StartTime: 导出日志开始时间,例如2020-12-28 00:00:00。
:type StartTime: str
:param EndTime: 导出日志结束时间,例如2020-12-28 01:00:00。
:type EndTime: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
:param DangerLevels: 日志风险等级列表,支持值包括:0 无风险;1 低风险;2 中风险;3 高风险。
:type DangerLevels: list of int
"""
self.SecAuditGroupId = None
self.StartTime = None
self.EndTime = None
self.Product = None
self.DangerLevels = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
self.DangerLevels = params.get("DangerLevels")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateSecurityAuditLogExportTaskResponse(AbstractModel):
"""CreateSecurityAuditLogExportTask返回参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 日志导出任务Id。
:type AsyncRequestId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteSecurityAuditLogExportTasksRequest(AbstractModel):
"""DeleteSecurityAuditLogExportTasks请求参数结构体
"""
def __init__(self):
"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param AsyncRequestIds: 日志导出任务Id列表,接口会忽略不存在或已删除的任务Id。
:type AsyncRequestIds: list of int non-negative
:param Product: 服务产品类型,支持值: "mysql" - 云数据库 MySQL。
:type Product: str
"""
self.SecAuditGroupId = None
self.AsyncRequestIds = None
self.Product = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.AsyncRequestIds = params.get("AsyncRequestIds")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteSecurityAuditLogExportTasksResponse(AbstractModel):
"""DeleteSecurityAuditLogExportTasks返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeAllUserContactRequest(AbstractModel):
"""DescribeAllUserContact请求参数结构体
"""
def __init__(self):
"""
:param Product: 服务产品类型,固定值:mysql。
:type Product: str
:param Names: 联系人名数组,支持模糊搜索。
:type Names: list of str
"""
self.Product = None
self.Names = None
def _deserialize(self, params):
self.Product = params.get("Product")
self.Names = params.get("Names")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeAllUserContactResponse(AbstractModel):
"""DescribeAllUserContact返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 联系人的总数量。
:type TotalCount: int
:param Contacts: 联系人的信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Contacts: list of ContactItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Contacts = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Contacts") is not None:
self.Contacts = []
for item in params.get("Contacts"):
obj = ContactItem()
obj._deserialize(item)
self.Contacts.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeAllUserGroupRequest(AbstractModel):
"""DescribeAllUserGroup请求参数结构体
"""
def __init__(self):
"""
:param Product: 服务产品类型,固定值:mysql。
:type Product: str
:param Names: 联系组名称数组,支持模糊搜索。
:type Names: list of str
"""
self.Product = None
self.Names = None
def _deserialize(self, params):
self.Product = params.get("Product")
self.Names = params.get("Names")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeAllUserGroupResponse(AbstractModel):
"""DescribeAllUserGroup返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 组总数。
:type TotalCount: int
:param Groups: 组信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Groups: list of GroupItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Groups = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Groups") is not None:
self.Groups = []
for item in params.get("Groups"):
obj = GroupItem()
obj._deserialize(item)
self.Groups.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBDiagEventRequest(AbstractModel):
"""DescribeDBDiagEvent请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param EventId: 事件 ID 。通过“获取实例诊断历史DescribeDBDiagHistory”获取。
:type EventId: int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.EventId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.EventId = params.get("EventId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBDiagEventResponse(AbstractModel):
"""DescribeDBDiagEvent返回参数结构体
"""
def __init__(self):
"""
:param DiagItem: 诊断项。
:type DiagItem: str
:param DiagType: 诊断类型。
:type DiagType: str
:param EventId: 事件 ID 。
:type EventId: int
:param Explanation: 事件详情。
:type Explanation: str
:param Outline: 概要。
:type Outline: str
:param Problem: 诊断出的问题。
:type Problem: str
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param StartTime: 开始时间
:type StartTime: str
:param Suggestions: 建议。
:type Suggestions: str
:param Metric: 保留字段。
注意:此字段可能返回 null,表示取不到有效值。
:type Metric: str
:param EndTime: 结束时间。
:type EndTime: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DiagItem = None
self.DiagType = None
self.EventId = None
self.Explanation = None
self.Outline = None
self.Problem = None
self.Severity = None
self.StartTime = None
self.Suggestions = None
self.Metric = None
self.EndTime = None
self.RequestId = None
def _deserialize(self, params):
self.DiagItem = params.get("DiagItem")
self.DiagType = params.get("DiagType")
self.EventId = params.get("EventId")
self.Explanation = params.get("Explanation")
self.Outline = params.get("Outline")
self.Problem = params.get("Problem")
self.Severity = params.get("Severity")
self.StartTime = params.get("StartTime")
self.Suggestions = params.get("Suggestions")
self.Metric = params.get("Metric")
self.EndTime = params.get("EndTime")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBDiagHistoryRequest(AbstractModel):
"""DescribeDBDiagHistory请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 结束时间,如“2019-09-11 12:13:14”,结束时间与开始时间的间隔最大可为2天。
:type EndTime: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBDiagHistoryResponse(AbstractModel):
"""DescribeDBDiagHistory返回参数结构体
"""
def __init__(self):
"""
:param Events: 事件描述。
:type Events: list of DiagHistoryEventItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Events = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Events") is not None:
self.Events = []
for item in params.get("Events"):
obj = DiagHistoryEventItem()
obj._deserialize(item)
self.Events.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBDiagReportTasksRequest(AbstractModel):
"""DescribeDBDiagReportTasks请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 第一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。
:type StartTime: str
:param EndTime: 最后一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。
:type EndTime: str
:param InstanceIds: 实例ID数组,用于筛选指定实例的任务列表。
:type InstanceIds: list of str
:param Sources: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。
:type Sources: list of str
:param HealthLevels: 报告的健康等级,支持的取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK" - 危险;"HIGH_RISK" - 高危。
:type HealthLevels: str
:param TaskStatuses: 任务的状态,支持的取值包括:"created" - 新建;"chosen" - 待执行; "running" - 执行中;"failed" - 失败;"finished" - 已完成。
:type TaskStatuses: str
:param Offset: 偏移量,默认0。
:type Offset: int
:param Limit: 返回数量,默认20。
:type Limit: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.StartTime = None
self.EndTime = None
self.InstanceIds = None
self.Sources = None
self.HealthLevels = None
self.TaskStatuses = None
self.Offset = None
self.Limit = None
self.Product = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.InstanceIds = params.get("InstanceIds")
self.Sources = params.get("Sources")
self.HealthLevels = params.get("HealthLevels")
self.TaskStatuses = params.get("TaskStatuses")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBDiagReportTasksResponse(AbstractModel):
"""DescribeDBDiagReportTasks返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 任务总数目。
:type TotalCount: int
:param Tasks: 任务列表。
:type Tasks: list of HealthReportTask
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Tasks = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Tasks") is not None:
self.Tasks = []
for item in params.get("Tasks"):
obj = HealthReportTask()
obj._deserialize(item)
self.Tasks.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBSpaceStatusRequest(AbstractModel):
"""DescribeDBSpaceStatus请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param RangeDays: 时间段天数,截止日期为当日,默认为7天。
:type RangeDays: int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.RangeDays = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.RangeDays = params.get("RangeDays")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBSpaceStatusResponse(AbstractModel):
"""DescribeDBSpaceStatus返回参数结构体
"""
def __init__(self):
"""
:param Growth: 磁盘增长量(MB)。
:type Growth: int
:param Remain: 磁盘剩余(MB)。
:type Remain: int
:param Total: 磁盘总量(MB)。
:type Total: int
:param AvailableDays: 预计可用天数。
:type AvailableDays: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Growth = None
self.Remain = None
self.Total = None
self.AvailableDays = None
self.RequestId = None
def _deserialize(self, params):
self.Growth = params.get("Growth")
self.Remain = params.get("Remain")
self.Total = params.get("Total")
self.AvailableDays = params.get("AvailableDays")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDiagDBInstancesRequest(AbstractModel):
"""DescribeDiagDBInstances请求参数结构体
"""
def __init__(self):
"""
:param IsSupported: 是否是DBbrain支持的实例,固定传 true。
:type IsSupported: bool
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Offset: 分页参数,偏移量。
:type Offset: int
:param Limit: 分页参数,分页值。
:type Limit: int
:param InstanceNames: 根据实例名称条件查询。
:type InstanceNames: list of str
:param InstanceIds: 根据实例ID条件查询。
:type InstanceIds: list of str
:param Regions: 根据地域条件查询。
:type Regions: list of str
"""
self.IsSupported = None
self.Product = None
self.Offset = None
self.Limit = None
self.InstanceNames = None
self.InstanceIds = None
self.Regions = None
def _deserialize(self, params):
self.IsSupported = params.get("IsSupported")
self.Product = params.get("Product")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.InstanceNames = params.get("InstanceNames")
self.InstanceIds = params.get("InstanceIds")
self.Regions = params.get("Regions")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDiagDBInstancesResponse(AbstractModel):
"""DescribeDiagDBInstances返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 实例总数。
:type TotalCount: int
:param DbScanStatus: 全实例巡检状态:0:开启全实例巡检;1:未开启全实例巡检。
:type DbScanStatus: int
:param Items: 实例相关信息。
:type Items: list of InstanceInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.DbScanStatus = None
self.Items = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
self.DbScanStatus = params.get("DbScanStatus")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = InstanceInfo()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeHealthScoreRequest(AbstractModel):
"""DescribeHealthScore请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 需要获取健康得分的实例ID。
:type InstanceId: str
:param Time: 获取健康得分的时间。
:type Time: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Time = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Time = params.get("Time")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeHealthScoreResponse(AbstractModel):
"""DescribeHealthScore返回参数结构体
"""
def __init__(self):
"""
:param Data: 健康得分以及异常扣分项。
:type Data: :class:`tencentcloud.dbbrain.v20191016.models.HealthScoreInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = HealthScoreInfo()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeMailProfileRequest(AbstractModel):
"""DescribeMailProfile请求参数结构体
"""
def __init__(self):
"""
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
:type ProfileType: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Offset: 分页偏移量。
:type Offset: int
:param Limit: 分页单位,最大支持50。
:type Limit: int
:param ProfileName: 根据邮件配置名称查询,定期发送的邮件配置名称遵循:"scheduler_"+{instanceId}的规则。
:type ProfileName: str
"""
self.ProfileType = None
self.Product = None
self.Offset = None
self.Limit = None
self.ProfileName = None
def _deserialize(self, params):
self.ProfileType = params.get("ProfileType")
self.Product = params.get("Product")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.ProfileName = params.get("ProfileName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeMailProfileResponse(AbstractModel):
"""DescribeMailProfile返回参数结构体
"""
def __init__(self):
"""
:param ProfileList: 邮件配置详情。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileList: list of UserProfile
:param TotalCount: 邮件模版总数。
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ProfileList = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ProfileList") is not None:
self.ProfileList = []
for item in params.get("ProfileList"):
obj = UserProfile()
obj._deserialize(item)
self.ProfileList.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSecurityAuditLogDownloadUrlsRequest(AbstractModel):
"""DescribeSecurityAuditLogDownloadUrls请求参数结构体
"""
def __init__(self):
"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param AsyncRequestId: 异步任务Id。
:type AsyncRequestId: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
"""
self.SecAuditGroupId = None
self.AsyncRequestId = None
self.Product = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.AsyncRequestId = params.get("AsyncRequestId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSecurityAuditLogDownloadUrlsResponse(AbstractModel):
"""DescribeSecurityAuditLogDownloadUrls返回参数结构体
"""
def __init__(self):
"""
:param Urls: 导出结果的COS链接列表。当结果集很大时,可能会切分为多个url下载。
:type Urls: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Urls = None
self.RequestId = None
def _deserialize(self, params):
self.Urls = params.get("Urls")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSecurityAuditLogExportTasksRequest(AbstractModel):
"""DescribeSecurityAuditLogExportTasks请求参数结构体
"""
def __init__(self):
"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
:param AsyncRequestIds: 日志导出任务Id列表。
:type AsyncRequestIds: list of int non-negative
:param Offset: 偏移量,默认0。
:type Offset: int
:param Limit: 返回数量,默认20。
:type Limit: int
"""
self.SecAuditGroupId = None
self.Product = None
self.AsyncRequestIds = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.Product = params.get("Product")
self.AsyncRequestIds = params.get("AsyncRequestIds")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSecurityAuditLogExportTasksResponse(AbstractModel):
"""DescribeSecurityAuditLogExportTasks返回参数结构体
"""
def __init__(self):
"""
:param Tasks: 安全审计日志导出任务列表。
:type Tasks: list of SecLogExportTaskInfo
:param TotalCount: 安全审计日志导出任务总数。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Tasks = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Tasks") is not None:
self.Tasks = []
for item in params.get("Tasks"):
obj = SecLogExportTaskInfo()
obj._deserialize(item)
self.Tasks.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSlowLogTimeSeriesStatsRequest(AbstractModel):
"""DescribeSlowLogTimeSeriesStats请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 结束时间,如“2019-09-10 12:13:14”,结束时间与开始时间的间隔最大可为7天。
:type EndTime: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSlowLogTimeSeriesStatsResponse(AbstractModel):
"""DescribeSlowLogTimeSeriesStats返回参数结构体
"""
def __init__(self):
"""
:param Period: 柱间单位时间间隔,单位为秒。
:type Period: int
:param TimeSeries: 单位时间间隔内慢日志数量统计。
:type TimeSeries: list of TimeSlice
:param SeriesData: 单位时间间隔内的实例 cpu 利用率监控数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20191016.models.MonitorMetricSeriesData`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Period = None
self.TimeSeries = None
self.SeriesData = None
self.RequestId = None
def _deserialize(self, params):
self.Period = params.get("Period")
if params.get("TimeSeries") is not None:
self.TimeSeries = []
for item in params.get("TimeSeries"):
obj = TimeSlice()
obj._deserialize(item)
self.TimeSeries.append(obj)
if params.get("SeriesData") is not None:
self.SeriesData = MonitorMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSlowLogTopSqlsRequest(AbstractModel):
"""DescribeSlowLogTopSqls请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 截止时间,如“2019-09-10 12:13:14”,截止时间与开始时间的间隔最大可为7天。
:type EndTime: str
:param SortBy: 排序键,目前支持 QueryTime,ExecTimes,RowsSent,LockTime以及RowsExamined 等排序键。
:type SortBy: str
:param OrderBy: 排序方式,支持ASC(升序)以及DESC(降序)。
:type OrderBy: str
:param Limit: 返回数量,默认为20,最大值为100。
:type Limit: int
:param Offset: 偏移量,默认为0。
:type Offset: int
:param SchemaList: 数据库名称数组。
:type SchemaList: list of SchemaItem
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.SortBy = None
self.OrderBy = None
self.Limit = None
self.Offset = None
self.SchemaList = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SortBy = params.get("SortBy")
self.OrderBy = params.get("OrderBy")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
if params.get("SchemaList") is not None:
self.SchemaList = []
for item in params.get("SchemaList"):
obj = SchemaItem()
obj._deserialize(item)
self.SchemaList.append(obj)
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSlowLogTopSqlsResponse(AbstractModel):
"""DescribeSlowLogTopSqls返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合条件的记录总数。
:type TotalCount: int
:param Rows: 慢日志 top sql 列表
:type Rows: list of SlowLogTopSqlItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Rows = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Rows") is not None:
self.Rows = []
for item in params.get("Rows"):
obj = SlowLogTopSqlItem()
obj._deserialize(item)
self.Rows.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSlowLogUserHostStatsRequest(AbstractModel):
"""DescribeSlowLogUserHostStats请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param StartTime: 查询范围的开始时间,时间格式如:2019-09-10 12:13:14。
:type StartTime: str
:param EndTime: 查询范围的结束时间,时间格式如:2019-09-10 12:13:14。
:type EndTime: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSlowLogUserHostStatsResponse(AbstractModel):
"""DescribeSlowLogUserHostStats返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 来源地址数目。
:type TotalCount: int
:param Items: 各来源地址的慢日志占比详情列表。
:type Items: list of SlowLogHost
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Items = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = SlowLogHost()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeTopSpaceSchemaTimeSeriesRequest(AbstractModel):
"""DescribeTopSpaceSchemaTimeSeries请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param Limit: 返回的Top库数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。
:type StartDate: str
:param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。
:type EndDate: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.StartDate = None
self.EndDate = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.StartDate = params.get("StartDate")
self.EndDate = params.get("EndDate")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeTopSpaceSchemaTimeSeriesResponse(AbstractModel):
"""DescribeTopSpaceSchemaTimeSeries返回参数结构体
"""
def __init__(self):
"""
:param TopSpaceSchemaTimeSeries: 返回的Top库空间统计信息的时序数据列表。
:type TopSpaceSchemaTimeSeries: list of SchemaSpaceTimeSeries
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceSchemaTimeSeries = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceSchemaTimeSeries") is not None:
self.TopSpaceSchemaTimeSeries = []
for item in params.get("TopSpaceSchemaTimeSeries"):
obj = SchemaSpaceTimeSeries()
obj._deserialize(item)
self.TopSpaceSchemaTimeSeries.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeTopSpaceSchemasRequest(AbstractModel):
"""DescribeTopSpaceSchemas请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top库数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeTopSpaceSchemasResponse(AbstractModel):
"""DescribeTopSpaceSchemas返回参数结构体
"""
def __init__(self):
"""
:param TopSpaceSchemas: 返回的Top库空间统计信息列表。
:type TopSpaceSchemas: list of SchemaSpaceData
:param Timestamp: 采集库空间数据的时间戳(秒)。
:type Timestamp: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceSchemas = None
self.Timestamp = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceSchemas") is not None:
self.TopSpaceSchemas = []
for item in params.get("TopSpaceSchemas"):
obj = SchemaSpaceData()
obj._deserialize(item)
self.TopSpaceSchemas.append(obj)
self.Timestamp = params.get("Timestamp")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeTopSpaceTableTimeSeriesRequest(AbstractModel):
"""DescribeTopSpaceTableTimeSeries请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top表数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize,默认为 PhysicalFileSize。
:type SortBy: str
:param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。
:type StartDate: str
:param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。
:type EndDate: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.StartDate = None
self.EndDate = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.StartDate = params.get("StartDate")
self.EndDate = params.get("EndDate")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeTopSpaceTableTimeSeriesResponse(AbstractModel):
"""DescribeTopSpaceTableTimeSeries返回参数结构体
"""
def __init__(self):
"""
:param TopSpaceTableTimeSeries: 返回的Top表空间统计信息的时序数据列表。
:type TopSpaceTableTimeSeries: list of TableSpaceTimeSeries
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceTableTimeSeries = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceTableTimeSeries") is not None:
self.TopSpaceTableTimeSeries = []
for item in params.get("TopSpaceTableTimeSeries"):
obj = TableSpaceTimeSeries()
obj._deserialize(item)
self.TopSpaceTableTimeSeries.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeTopSpaceTablesRequest(AbstractModel):
"""DescribeTopSpaceTables请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top表数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeTopSpaceTablesResponse(AbstractModel):
"""DescribeTopSpaceTables返回参数结构体
"""
def __init__(self):
"""
:param TopSpaceTables: 返回的Top表空间统计信息列表。
:type TopSpaceTables: list of TableSpaceData
:param Timestamp: 采集表空间数据的时间戳(秒)。
:type Timestamp: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceTables = None
self.Timestamp = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceTables") is not None:
self.TopSpaceTables = []
for item in params.get("TopSpaceTables"):
obj = TableSpaceData()
obj._deserialize(item)
self.TopSpaceTables.append(obj)
self.Timestamp = params.get("Timestamp")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeUserSqlAdviceRequest(AbstractModel):
"""DescribeUserSqlAdvice请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param SqlText: SQL语句。
:type SqlText: str
:param Schema: 库名。
:type Schema: str
"""
self.InstanceId = None
self.SqlText = None
self.Schema = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeUserSqlAdviceResponse(AbstractModel):
"""DescribeUserSqlAdvice返回参数结构体
"""
def __init__(self):
"""
:param Advices: SQL优化建议,可解析为JSON数组。
:type Advices: str
:param Comments: SQL优化建议备注,可解析为String数组。
:type Comments: str
:param SqlText: SQL语句。
:type SqlText: str
:param Schema: 库名。
:type Schema: str
:param Tables: 相关表的DDL信息,可解析为JSON数组。
:type Tables: str
:param SqlPlan: SQL执行计划,可解析为JSON。
:type SqlPlan: str
:param Cost: SQL优化后的成本节约详情,可解析为JSON。
:type Cost: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Advices = None
self.Comments = None
self.SqlText = None
self.Schema = None
self.Tables = None
self.SqlPlan = None
self.Cost = None
self.RequestId = None
def _deserialize(self, params):
self.Advices = params.get("Advices")
self.Comments = params.get("Comments")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
self.Tables = params.get("Tables")
self.SqlPlan = params.get("SqlPlan")
self.Cost = params.get("Cost")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DiagHistoryEventItem(AbstractModel):
"""实例诊断历史事件
"""
def __init__(self):
"""
:param DiagType: 诊断类型。
:type DiagType: str
:param EndTime: 结束时间。
:type EndTime: str
:param StartTime: 开始时间。
:type StartTime: str
:param EventId: 事件 ID 。
:type EventId: int
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param Outline: 概要。
:type Outline: str
:param DiagItem: 诊断项。
:type DiagItem: str
:param InstanceId: 实例 ID 。
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceId: str
:param Metric: 保留字段
注意:此字段可能返回 null,表示取不到有效值。
:type Metric: str
:param Region: 地域
注意:此字段可能返回 null,表示取不到有效值。
:type Region: str
"""
self.DiagType = None
self.EndTime = None
self.StartTime = None
self.EventId = None
self.Severity = None
self.Outline = None
self.DiagItem = None
self.InstanceId = None
self.Metric = None
self.Region = None
def _deserialize(self, params):
self.DiagType = params.get("DiagType")
self.EndTime = params.get("EndTime")
self.StartTime = params.get("StartTime")
self.EventId = params.get("EventId")
self.Severity = params.get("Severity")
self.Outline = params.get("Outline")
self.DiagItem = params.get("DiagItem")
self.InstanceId = params.get("InstanceId")
self.Metric = params.get("Metric")
self.Region = params.get("Region")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class EventInfo(AbstractModel):
"""异常事件信息。
"""
def __init__(self):
"""
:param EventId: 事件 ID 。
:type EventId: int
:param DiagType: 诊断类型。
:type DiagType: str
:param StartTime: 开始时间。
:type StartTime: str
:param EndTime: 结束时间。
:type EndTime: str
:param Outline: 概要。
:type Outline: str
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param ScoreLost: 扣分。
:type ScoreLost: int
:param Metric: 保留字段。
:type Metric: str
:param Count: 告警数目。
:type Count: int
"""
self.EventId = None
self.DiagType = None
self.StartTime = None
self.EndTime = None
self.Outline = None
self.Severity = None
self.ScoreLost = None
self.Metric = None
self.Count = None
def _deserialize(self, params):
self.EventId = params.get("EventId")
self.DiagType = params.get("DiagType")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Outline = params.get("Outline")
self.Severity = params.get("Severity")
self.ScoreLost = params.get("ScoreLost")
self.Metric = params.get("Metric")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class GroupItem(AbstractModel):
"""描述组信息。
"""
def __init__(self):
"""
:param Id: 组id。
:type Id: int
:param Name: 组名称。
:type Name: str
:param MemberCount: 组成员数量。
:type MemberCount: int
"""
self.Id = None
self.Name = None
self.MemberCount = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Name = params.get("Name")
self.MemberCount = params.get("MemberCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class HealthReportTask(AbstractModel):
"""健康报告任务详情。
"""
def __init__(self):
"""
:param AsyncRequestId: 异步任务请求 ID。
:type AsyncRequestId: int
:param Source: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。
:type Source: str
:param Progress: 任务完成进度,单位%。
:type Progress: int
:param CreateTime: 任务创建时间。
:type CreateTime: str
:param StartTime: 任务开始执行时间。
:type StartTime: str
:param EndTime: 任务完成执行时间。
:type EndTime: str
:param InstanceInfo: 任务所属实例的基础信息。
:type InstanceInfo: :class:`tencentcloud.dbbrain.v20191016.models.InstanceBasicInfo`
:param HealthStatus: 健康报告中的健康信息。
:type HealthStatus: :class:`tencentcloud.dbbrain.v20191016.models.HealthStatus`
"""
self.AsyncRequestId = None
self.Source = None
self.Progress = None
self.CreateTime = None
self.StartTime = None
self.EndTime = None
self.InstanceInfo = None
self.HealthStatus = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.Source = params.get("Source")
self.Progress = params.get("Progress")
self.CreateTime = params.get("CreateTime")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
if params.get("InstanceInfo") is not None:
self.InstanceInfo = InstanceBasicInfo()
self.InstanceInfo._deserialize(params.get("InstanceInfo"))
if params.get("HealthStatus") is not None:
self.HealthStatus = HealthStatus()
self.HealthStatus._deserialize(params.get("HealthStatus"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class HealthScoreInfo(AbstractModel):
"""获取健康得分返回的详情。
"""
def __init__(self):
"""
:param IssueTypes: 异常详情。
:type IssueTypes: list of IssueTypeInfo
:param EventsTotalCount: 异常事件总数。
:type EventsTotalCount: int
:param HealthScore: 健康得分。
:type HealthScore: int
:param HealthLevel: 健康等级, 如:"HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"。
:type HealthLevel: str
"""
self.IssueTypes = None
self.EventsTotalCount = None
self.HealthScore = None
self.HealthLevel = None
def _deserialize(self, params):
if params.get("IssueTypes") is not None:
self.IssueTypes = []
for item in params.get("IssueTypes"):
obj = IssueTypeInfo()
obj._deserialize(item)
self.IssueTypes.append(obj)
self.EventsTotalCount = params.get("EventsTotalCount")
self.HealthScore = params.get("HealthScore")
self.HealthLevel = params.get("HealthLevel")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class HealthStatus(AbstractModel):
"""实例健康详情。
"""
def __init__(self):
"""
:param HealthScore: 健康分数,满分100。
:type HealthScore: int
:param HealthLevel: 健康等级,取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK"- 危险;"HIGH_RISK" - 高危。
:type HealthLevel: str
:param ScoreLost: 总扣分分数。
:type ScoreLost: int
:param ScoreDetails: 扣分详情。
注意:此字段可能返回 null,表示取不到有效值。
:type ScoreDetails: list of ScoreDetail
"""
self.HealthScore = None
self.HealthLevel = None
self.ScoreLost = None
self.ScoreDetails = None
def _deserialize(self, params):
self.HealthScore = params.get("HealthScore")
self.HealthLevel = params.get("HealthLevel")
self.ScoreLost = params.get("ScoreLost")
if params.get("ScoreDetails") is not None:
self.ScoreDetails = []
for item in params.get("ScoreDetails"):
obj = ScoreDetail()
obj._deserialize(item)
self.ScoreDetails.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstanceBasicInfo(AbstractModel):
"""实例基础信息。
"""
def __init__(self):
"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param InstanceName: 实例名称。
:type InstanceName: str
:param Vip: 实例内网IP。
:type Vip: str
:param Vport: 实例内网Port。
:type Vport: int
:param Product: 实例产品。
:type Product: str
:param EngineVersion: 实例引擎版本。
:type EngineVersion: str
"""
self.InstanceId = None
self.InstanceName = None
self.Vip = None
self.Vport = None
self.Product = None
self.EngineVersion = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.Product = params.get("Product")
self.EngineVersion = params.get("EngineVersion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstanceConfs(AbstractModel):
"""实例配置。
"""
def __init__(self):
"""
:param DailyInspection: 数据库巡检开关, Yes/No。
:type DailyInspection: str
:param OverviewDisplay: 实例概览开关,Yes/No。
:type OverviewDisplay: str
"""
self.DailyInspection = None
self.OverviewDisplay = None
def _deserialize(self, params):
self.DailyInspection = params.get("DailyInspection")
self.OverviewDisplay = params.get("OverviewDisplay")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstanceInfo(AbstractModel):
"""查询实例列表,返回实例的相关信息的对象。
"""
def __init__(self):
"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param InstanceName: 实例名称。
:type InstanceName: str
:param Region: 实例所属地域。
:type Region: str
:param HealthScore: 健康得分。
:type HealthScore: int
:param Product: 所属产品。
:type Product: str
:param EventCount: 异常事件数量。
:type EventCount: int
:param InstanceType: 实例类型:1:MASTER;2:DR,3:RO,4:SDR。
:type InstanceType: int
:param Cpu: 核心数。
:type Cpu: int
:param Memory: 内存,单位MB。
:type Memory: int
:param Volume: 硬盘存储,单位GB。
:type Volume: int
:param EngineVersion: 数据库版本。
:type EngineVersion: str
:param Vip: 内网地址。
:type Vip: str
:param Vport: 内网端口。
:type Vport: int
:param Source: 接入来源。
:type Source: str
:param GroupId: 分组ID。
:type GroupId: str
:param GroupName: 分组组名。
:type GroupName: str
:param Status: 实例状态:0:发货中;1:运行正常;4:销毁中;5:隔离中。
:type Status: int
:param UniqSubnetId: 子网统一ID。
:type UniqSubnetId: str
:param DeployMode: cdb类型。
:type DeployMode: str
:param InitFlag: cdb实例初始化标志:0:未初始化;1:已初始化。
:type InitFlag: int
:param TaskStatus: 任务状态。
:type TaskStatus: int
:param UniqVpcId: 私有网络统一ID。
:type UniqVpcId: str
:param InstanceConf: 实例巡检/概览的状态。
:type InstanceConf: :class:`tencentcloud.dbbrain.v20191016.models.InstanceConfs`
:param DeadlineTime: 资源到期时间。
:type DeadlineTime: str
:param IsSupported: 是否是DBbrain支持的实例。
:type IsSupported: bool
:param SecAuditStatus: 实例安全审计日志开启状态:ON: 安全审计开启;OFF: 未开启安全审计。
:type SecAuditStatus: str
:param AuditPolicyStatus: 实例审计日志开启状态,ALL_AUDIT: 开启全审计;RULE_AUDIT: 开启规则审计;UNBOUND: 未开启审计。
:type AuditPolicyStatus: str
:param AuditRunningStatus: 实例审计日志运行状态:normal: 运行中; paused: 欠费暂停。
:type AuditRunningStatus: str
"""
self.InstanceId = None
self.InstanceName = None
self.Region = None
self.HealthScore = None
self.Product = None
self.EventCount = None
self.InstanceType = None
self.Cpu = None
self.Memory = None
self.Volume = None
self.EngineVersion = None
self.Vip = None
self.Vport = None
self.Source = None
self.GroupId = None
self.GroupName = None
self.Status = None
self.UniqSubnetId = None
self.DeployMode = None
self.InitFlag = None
self.TaskStatus = None
self.UniqVpcId = None
self.InstanceConf = None
self.DeadlineTime = None
self.IsSupported = None
self.SecAuditStatus = None
self.AuditPolicyStatus = None
self.AuditRunningStatus = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Region = params.get("Region")
self.HealthScore = params.get("HealthScore")
self.Product = params.get("Product")
self.EventCount = params.get("EventCount")
self.InstanceType = params.get("InstanceType")
self.Cpu = params.get("Cpu")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.EngineVersion = params.get("EngineVersion")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.Source = params.get("Source")
self.GroupId = params.get("GroupId")
self.GroupName = params.get("GroupName")
self.Status = params.get("Status")
self.UniqSubnetId = params.get("UniqSubnetId")
self.DeployMode = params.get("DeployMode")
self.InitFlag = params.get("InitFlag")
self.TaskStatus = params.get("TaskStatus")
self.UniqVpcId = params.get("UniqVpcId")
if params.get("InstanceConf") is not None:
self.InstanceConf = InstanceConfs()
self.InstanceConf._deserialize(params.get("InstanceConf"))
self.DeadlineTime = params.get("DeadlineTime")
self.IsSupported = params.get("IsSupported")
self.SecAuditStatus = params.get("SecAuditStatus")
self.AuditPolicyStatus = params.get("AuditPolicyStatus")
self.AuditRunningStatus = params.get("AuditRunningStatus")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class IssueTypeInfo(AbstractModel):
"""指标信息。
"""
def __init__(self):
"""
:param IssueType: 指标分类:AVAILABILITY:可用性,MAINTAINABILITY:可维护性,PERFORMANCE,性能,RELIABILITY可靠性。
:type IssueType: str
:param Events: 异常事件。
:type Events: list of EventInfo
:param TotalCount: 异常事件总数。
:type TotalCount: int
"""
self.IssueType = None
self.Events = None
self.TotalCount = None
def _deserialize(self, params):
self.IssueType = params.get("IssueType")
if params.get("Events") is not None:
self.Events = []
for item in params.get("Events"):
obj = EventInfo()
obj._deserialize(item)
self.Events.append(obj)
self.TotalCount = params.get("TotalCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MailConfiguration(AbstractModel):
"""邮件发送配置
"""
def __init__(self):
"""
:param SendMail: 是否开启邮件发送: 0, 否; 1, 是。
:type SendMail: int
:param Region: 地域配置, 如["ap-guangzhou", "ap-shanghai"]。巡检的邮件发送模版,配置需要发送巡检邮件的地域;订阅的邮件发送模版,配置当前订阅实例的所属地域。
:type Region: list of str
:param HealthStatus: 发送指定的健康等级的报告, 如["HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"]。
:type HealthStatus: list of str
:param ContactPerson: 联系人id, 联系人/联系组不能都为空。
:type ContactPerson: list of int
:param ContactGroup: 联系组id, 联系人/联系组不能都为空。
:type ContactGroup: list of int
"""
self.SendMail = None
self.Region = None
self.HealthStatus = None
self.ContactPerson = None
self.ContactGroup = None
def _deserialize(self, params):
self.SendMail = params.get("SendMail")
self.Region = params.get("Region")
self.HealthStatus = params.get("HealthStatus")
self.ContactPerson = params.get("ContactPerson")
self.ContactGroup = params.get("ContactGroup")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifyDiagDBInstanceConfRequest(AbstractModel):
"""ModifyDiagDBInstanceConf请求参数结构体
"""
def __init__(self):
"""
:param InstanceConfs: 巡检开关。
:type InstanceConfs: :class:`tencentcloud.dbbrain.v20191016.models.InstanceConfs`
:param Regions: 生效实例地域,取值为"All",代表全地域。
:type Regions: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。
:type Product: str
:param InstanceIds: 指定更改巡检状态的实例ID。
:type InstanceIds: list of str
"""
self.InstanceConfs = None
self.Regions = None
self.Product = None
self.InstanceIds = None
def _deserialize(self, params):
if params.get("InstanceConfs") is not None:
self.InstanceConfs = InstanceConfs()
self.InstanceConfs._deserialize(params.get("InstanceConfs"))
self.Regions = params.get("Regions")
self.Product = params.get("Product")
self.InstanceIds = params.get("InstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifyDiagDBInstanceConfResponse(AbstractModel):
"""ModifyDiagDBInstanceConf返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MonitorFloatMetric(AbstractModel):
"""监控数据(浮点型)
"""
def __init__(self):
"""
:param Metric: 指标名称。
:type Metric: str
:param Unit: 指标单位。
:type Unit: str
:param Values: 指标值。
注意:此字段可能返回 null,表示取不到有效值。
:type Values: list of float
"""
self.Metric = None
self.Unit = None
self.Values = None
def _deserialize(self, params):
self.Metric = params.get("Metric")
self.Unit = params.get("Unit")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MonitorFloatMetricSeriesData(AbstractModel):
"""单位时间间隔内的监控指标数据(浮点型)
"""
def __init__(self):
"""
:param Series: 监控指标。
:type Series: list of MonitorFloatMetric
:param Timestamp: 监控指标对应的时间戳。
:type Timestamp: list of int
"""
self.Series = None
self.Timestamp = None
def _deserialize(self, params):
if params.get("Series") is not None:
self.Series = []
for item in params.get("Series"):
obj = MonitorFloatMetric()
obj._deserialize(item)
self.Series.append(obj)
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MonitorMetric(AbstractModel):
"""监控数据
"""
def __init__(self):
"""
:param Metric: 指标名称。
:type Metric: str
:param Unit: 指标单位。
:type Unit: str
:param Values: 指标值。
注意:此字段可能返回 null,表示取不到有效值。
:type Values: list of int
"""
self.Metric = None
self.Unit = None
self.Values = None
def _deserialize(self, params):
self.Metric = params.get("Metric")
self.Unit = params.get("Unit")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MonitorMetricSeriesData(AbstractModel):
"""单位时间间隔内的监控指标数据
"""
def __init__(self):
"""
:param Series: 监控指标。
:type Series: list of MonitorMetric
:param Timestamp: 监控指标对应的时间戳。
:type Timestamp: list of int
"""
self.Series = None
self.Timestamp = None
def _deserialize(self, params):
if params.get("Series") is not None:
self.Series = []
for item in params.get("Series"):
obj = MonitorMetric()
obj._deserialize(item)
self.Series.append(obj)
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ProfileInfo(AbstractModel):
"""用户配置的信息
"""
def __init__(self):
"""
:param Language: 语言, 如"zh"。
:type Language: str
:param MailConfiguration: 邮件模板的内容。
:type MailConfiguration: :class:`tencentcloud.dbbrain.v20191016.models.MailConfiguration`
"""
self.Language = None
self.MailConfiguration = None
def _deserialize(self, params):
self.Language = params.get("Language")
if params.get("MailConfiguration") is not None:
self.MailConfiguration = MailConfiguration()
self.MailConfiguration._deserialize(params.get("MailConfiguration"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SchemaItem(AbstractModel):
"""SchemaItem数组
"""
def __init__(self):
"""
:param Schema: 数据库名称
:type Schema: str
"""
self.Schema = None
def _deserialize(self, params):
self.Schema = params.get("Schema")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SchemaSpaceData(AbstractModel):
"""库空间统计数据。
"""
def __init__(self):
"""
:param TableSchema: 库名。
:type TableSchema: str
:param DataLength: 数据空间(MB)。
:type DataLength: float
:param IndexLength: 索引空间(MB)。
:type IndexLength: float
:param DataFree: 碎片空间(MB)。
:type DataFree: float
:param TotalLength: 总使用空间(MB)。
:type TotalLength: float
:param FragRatio: 碎片率(%)。
:type FragRatio: float
:param TableRows: 行数。
:type TableRows: int
:param PhysicalFileSize: 库中所有表对应的独立物理文件大小加和(MB)。
注意:此字段可能返回 null,表示取不到有效值。
:type PhysicalFileSize: float
"""
self.TableSchema = None
self.DataLength = None
self.IndexLength = None
self.DataFree = None
self.TotalLength = None
self.FragRatio = None
self.TableRows = None
self.PhysicalFileSize = None
def _deserialize(self, params):
self.TableSchema = params.get("TableSchema")
self.DataLength = params.get("DataLength")
self.IndexLength = params.get("IndexLength")
self.DataFree = params.get("DataFree")
self.TotalLength = params.get("TotalLength")
self.FragRatio = params.get("FragRatio")
self.TableRows = params.get("TableRows")
self.PhysicalFileSize = params.get("PhysicalFileSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SchemaSpaceTimeSeries(AbstractModel):
"""库空间时序数据
"""
def __init__(self):
"""
:param TableSchema: 库名
:type TableSchema: str
:param SeriesData: 单位时间间隔内的空间指标数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20191016.models.MonitorMetricSeriesData`
"""
self.TableSchema = None
self.SeriesData = None
def _deserialize(self, params):
self.TableSchema = params.get("TableSchema")
if params.get("SeriesData") is not None:
self.SeriesData = MonitorMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ScoreDetail(AbstractModel):
"""扣分详情。
"""
def __init__(self):
"""
:param IssueType: 扣分项分类,取值包括:可用性、可维护性、性能及可靠性。
:type IssueType: str
:param ScoreLost: 扣分总分。
:type ScoreLost: int
:param ScoreLostMax: 扣分总分上限。
:type ScoreLostMax: int
:param Items: 扣分项列表。
注意:此字段可能返回 null,表示取不到有效值。
:type Items: list of ScoreItem
"""
self.IssueType = None
self.ScoreLost = None
self.ScoreLostMax = None
self.Items = None
def _deserialize(self, params):
self.IssueType = params.get("IssueType")
self.ScoreLost = params.get("ScoreLost")
self.ScoreLostMax = params.get("ScoreLostMax")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = ScoreItem()
obj._deserialize(item)
self.Items.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ScoreItem(AbstractModel):
"""诊断扣分项。
"""
def __init__(self):
"""
:param DiagItem: 异常诊断项名称。
:type DiagItem: str
:param IssueType: 诊断项分类,取值包括:可用性、可维护性、性能及可靠性。
:type IssueType: str
:param TopSeverity: 健康等级,取值包括:信息、提示、告警、严重、致命。
:type TopSeverity: str
:param Count: 该异常诊断项出现次数。
:type Count: int
:param ScoreLost: 扣分分数。
:type ScoreLost: int
"""
self.DiagItem = None
self.IssueType = None
self.TopSeverity = None
self.Count = None
self.ScoreLost = None
def _deserialize(self, params):
self.DiagItem = params.get("DiagItem")
self.IssueType = params.get("IssueType")
self.TopSeverity = params.get("TopSeverity")
self.Count = params.get("Count")
self.ScoreLost = params.get("ScoreLost")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SecLogExportTaskInfo(AbstractModel):
"""安全审计日志导出任务信息
"""
def __init__(self):
"""
:param AsyncRequestId: 异步任务Id。
:type AsyncRequestId: int
:param StartTime: 任务开始时间。
注意:此字段可能返回 null,表示取不到有效值。
:type StartTime: str
:param EndTime: 任务结束时间。
注意:此字段可能返回 null,表示取不到有效值。
:type EndTime: str
:param CreateTime: 任务创建时间。
:type CreateTime: str
:param Status: 任务状态。
:type Status: str
:param Progress: 任务执行进度。
:type Progress: int
:param LogStartTime: 导出日志开始时间。
注意:此字段可能返回 null,表示取不到有效值。
:type LogStartTime: str
:param LogEndTime: 导出日志结束时间。
注意:此字段可能返回 null,表示取不到有效值。
:type LogEndTime: str
:param TotalSize: 日志文件总大小,单位KB。
注意:此字段可能返回 null,表示取不到有效值。
:type TotalSize: int
:param DangerLevels: 风险等级列表。0 无风险;1 低风险;2 中风险;3 高风险。
注意:此字段可能返回 null,表示取不到有效值。
:type DangerLevels: list of int non-negative
"""
self.AsyncRequestId = None
self.StartTime = None
self.EndTime = None
self.CreateTime = None
self.Status = None
self.Progress = None
self.LogStartTime = None
self.LogEndTime = None
self.TotalSize = None
self.DangerLevels = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.CreateTime = params.get("CreateTime")
self.Status = params.get("Status")
self.Progress = params.get("Progress")
self.LogStartTime = params.get("LogStartTime")
self.LogEndTime = params.get("LogEndTime")
self.TotalSize = params.get("TotalSize")
self.DangerLevels = params.get("DangerLevels")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SlowLogHost(AbstractModel):
"""慢日志来源地址详情。
"""
def __init__(self):
"""
:param UserHost: 来源地址。
:type UserHost: str
:param Ratio: 该来源地址的慢日志数目占总数目的比例,单位%。
:type Ratio: float
:param Count: 该来源地址的慢日志数目。
:type Count: int
"""
self.UserHost = None
self.Ratio = None
self.Count = None
def _deserialize(self, params):
self.UserHost = params.get("UserHost")
self.Ratio = params.get("Ratio")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SlowLogTopSqlItem(AbstractModel):
"""慢日志TopSql
"""
def __init__(self):
"""
:param LockTime: sql总锁等待时间
:type LockTime: float
:param LockTimeMax: 最大锁等待时间
:type LockTimeMax: float
:param LockTimeMin: 最小锁等待时间
:type LockTimeMin: float
:param RowsExamined: 总扫描行数
:type RowsExamined: int
:param RowsExaminedMax: 最大扫描行数
:type RowsExaminedMax: int
:param RowsExaminedMin: 最小扫描行数
:type RowsExaminedMin: int
:param QueryTime: 总耗时
:type QueryTime: float
:param QueryTimeMax: 最大执行时间
:type QueryTimeMax: float
:param QueryTimeMin: 最小执行时间
:type QueryTimeMin: float
:param RowsSent: 总返回行数
:type RowsSent: int
:param RowsSentMax: 最大返回行数
:type RowsSentMax: int
:param RowsSentMin: 最小返回行数
:type RowsSentMin: int
:param ExecTimes: 执行次数
:type ExecTimes: int
:param SqlTemplate: sql模板
:type SqlTemplate: str
:param SqlText: 带参数SQL(随机)
:type SqlText: str
:param Schema: 数据库名
:type Schema: str
:param QueryTimeRatio: 总耗时占比
:type QueryTimeRatio: float
:param LockTimeRatio: sql总锁等待时间占比
:type LockTimeRatio: float
:param RowsExaminedRatio: 总扫描行数占比
:type RowsExaminedRatio: float
:param RowsSentRatio: 总返回行数占比
:type RowsSentRatio: float
:param QueryTimeAvg: 平均执行时间
:type QueryTimeAvg: float
:param RowsSentAvg: 平均返回行数
:type RowsSentAvg: float
:param LockTimeAvg: 平均锁等待时间
:type LockTimeAvg: float
:param RowsExaminedAvg: 平均扫描行数
:type RowsExaminedAvg: float
"""
self.LockTime = None
self.LockTimeMax = None
self.LockTimeMin = None
self.RowsExamined = None
self.RowsExaminedMax = None
self.RowsExaminedMin = None
self.QueryTime = None
self.QueryTimeMax = None
self.QueryTimeMin = None
self.RowsSent = None
self.RowsSentMax = None
self.RowsSentMin = None
self.ExecTimes = None
self.SqlTemplate = None
self.SqlText = None
self.Schema = None
self.QueryTimeRatio = None
self.LockTimeRatio = None
self.RowsExaminedRatio = None
self.RowsSentRatio = None
self.QueryTimeAvg = None
self.RowsSentAvg = None
self.LockTimeAvg = None
self.RowsExaminedAvg = None
def _deserialize(self, params):
self.LockTime = params.get("LockTime")
self.LockTimeMax = params.get("LockTimeMax")
self.LockTimeMin = params.get("LockTimeMin")
self.RowsExamined = params.get("RowsExamined")
self.RowsExaminedMax = params.get("RowsExaminedMax")
self.RowsExaminedMin = params.get("RowsExaminedMin")
self.QueryTime = params.get("QueryTime")
self.QueryTimeMax = params.get("QueryTimeMax")
self.QueryTimeMin = params.get("QueryTimeMin")
self.RowsSent = params.get("RowsSent")
self.RowsSentMax = params.get("RowsSentMax")
self.RowsSentMin = params.get("RowsSentMin")
self.ExecTimes = params.get("ExecTimes")
self.SqlTemplate = params.get("SqlTemplate")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
self.QueryTimeRatio = params.get("QueryTimeRatio")
self.LockTimeRatio = params.get("LockTimeRatio")
self.RowsExaminedRatio = params.get("RowsExaminedRatio")
self.RowsSentRatio = params.get("RowsSentRatio")
self.QueryTimeAvg = params.get("QueryTimeAvg")
self.RowsSentAvg = params.get("RowsSentAvg")
self.LockTimeAvg = params.get("LockTimeAvg")
self.RowsExaminedAvg = params.get("RowsExaminedAvg")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TableSpaceData(AbstractModel):
"""库表空间统计数据。
"""
def __init__(self):
"""
:param TableName: 表名。
:type TableName: str
:param TableSchema: 库名。
:type TableSchema: str
:param Engine: 库表的存储引擎。
:type Engine: str
:param DataLength: 数据空间(MB)。
:type DataLength: float
:param IndexLength: 索引空间(MB)。
:type IndexLength: float
:param DataFree: 碎片空间(MB)。
:type DataFree: float
:param TotalLength: 总使用空间(MB)。
:type TotalLength: float
:param FragRatio: 碎片率(%)。
:type FragRatio: float
:param TableRows: 行数。
:type TableRows: int
:param PhysicalFileSize: 表对应的独立物理文件大小(MB)。
:type PhysicalFileSize: float
"""
self.TableName = None
self.TableSchema = None
self.Engine = None
self.DataLength = None
self.IndexLength = None
self.DataFree = None
self.TotalLength = None
self.FragRatio = None
self.TableRows = None
self.PhysicalFileSize = None
def _deserialize(self, params):
self.TableName = params.get("TableName")
self.TableSchema = params.get("TableSchema")
self.Engine = params.get("Engine")
self.DataLength = params.get("DataLength")
self.IndexLength = params.get("IndexLength")
self.DataFree = params.get("DataFree")
self.TotalLength = params.get("TotalLength")
self.FragRatio = params.get("FragRatio")
self.TableRows = params.get("TableRows")
self.PhysicalFileSize = params.get("PhysicalFileSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TableSpaceTimeSeries(AbstractModel):
"""库表空间时序数据
"""
def __init__(self):
"""
:param TableName: 表名。
:type TableName: str
:param TableSchema: 库名。
:type TableSchema: str
:param Engine: 库表的存储引擎。
:type Engine: str
:param SeriesData: 单位时间间隔内的空间指标数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20191016.models.MonitorFloatMetricSeriesData`
"""
self.TableName = None
self.TableSchema = None
self.Engine = None
self.SeriesData = None
def _deserialize(self, params):
self.TableName = params.get("TableName")
self.TableSchema = params.get("TableSchema")
self.Engine = params.get("Engine")
if params.get("SeriesData") is not None:
self.SeriesData = MonitorFloatMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TimeSlice(AbstractModel):
"""单位时间间隔内的慢日志统计
"""
def __init__(self):
"""
:param Count: 总数
:type Count: int
:param Timestamp: 统计开始时间
:type Timestamp: int
"""
self.Count = None
self.Timestamp = None
def _deserialize(self, params):
self.Count = params.get("Count")
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UserProfile(AbstractModel):
"""用户配置的相关信息,包括邮件配置。
"""
def __init__(self):
"""
:param ProfileId: 配置的id。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileId: str
:param ProfileType: 配置类型。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileType: str
:param ProfileLevel: 配置级别,"User"或"Instance"。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileLevel: str
:param ProfileName: 配置名称。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileName: str
:param ProfileInfo: 配置详情。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20191016.models.ProfileInfo`
"""
self.ProfileId = None
self.ProfileType = None
self.ProfileLevel = None
self.ProfileName = None
self.ProfileInfo = None
def _deserialize(self, params):
self.ProfileId = params.get("ProfileId")
self.ProfileType = params.get("ProfileType")
self.ProfileLevel = params.get("ProfileLevel")
self.ProfileName = params.get("ProfileName")
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
|
[
"liugngg.gmail.com"
] |
liugngg.gmail.com
|
672a6a72f6183c8b5c819e74c251aab20e351aeb
|
e7742a9656684071bd6ffcc4b0c479c1a4580409
|
/backend/src/articles/migrations/0001_initial.py
|
cd404ded9ce81304357bbb07199002b640e23a48
|
[] |
no_license
|
vincey101/reactdj
|
81d106f24b86b3aa4e5286cdeae8a87b6c050f89
|
44f1005d8627657f2673b80c6c006c26e730e58c
|
refs/heads/master
| 2023-06-27T17:31:15.433071
| 2021-08-05T16:03:20
| 2021-08-05T16:03:20
| 383,918,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
# Generated by Django 3.2.4 on 2021-06-03 16:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('content', models.TextField()),
],
),
]
|
[
"60340075+vincey101@users.noreply.github.com"
] |
60340075+vincey101@users.noreply.github.com
|
0d2e8eb82fcf56dc2eaae7992fd0c6c9d8934816
|
5bba265a60e22f89356ff9b807a2356eb96d5791
|
/app/core/migrations/0004_recipe.py
|
8d279ad5f3371461a6831b5876b7672f3a7f37be
|
[
"MIT"
] |
permissive
|
HutahCode/recipe-app-api
|
9d89ee02e702c1d26bacc4891ce66fec914a0df8
|
4a272795630f9ceade1d6acbb9fa31ac147b1a9a
|
refs/heads/master
| 2020-12-05T06:26:25.162812
| 2020-02-15T14:16:32
| 2020-02-15T14:16:32
| 232,034,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
# Generated by Django 2.1.15 on 2020-02-04 05:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"hutahcode@gmail.com"
] |
hutahcode@gmail.com
|
671429cdff5179f3516134da1acafcff5fa37232
|
5dbfb45a2818850e5bb721e860fba823cb5464f7
|
/deluge-client/experiment/action/generate_all_start.py
|
974369d2234041f22c79c140af89632b01b5fdb7
|
[] |
no_license
|
john35452/sdn-p2p
|
fd3d8af4735558b5ff57f397c6729df19e37f037
|
e5171b5cc6e2aeb0129de9cd35938e565d467cea
|
refs/heads/master
| 2020-04-06T06:56:21.518291
| 2016-08-25T08:26:36
| 2016-08-25T08:26:36
| 60,574,920
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
import sys
import os
import random
left = 32
right = 32
rate = [0,1,2,5,1000]
for i in range(4):
print 'group:',(i+1)
for j in range(left/4):
new_rate = random.uniform(rate[i],rate[i+1])
os.system('python make_data_all_start.py '+sys.argv[1]+'/user1'+str(i*(left/4)+j+1).zfill(2)+' '+str(new_rate))
for j in range(right/4):
new_rate = random.uniform(rate[i],rate[i+1])
os.system('python make_data_all_start.py '+sys.argv[1]+'/user2'+str(i*(right/4)+j+1).zfill(2)+' '+str(new_rate))
|
[
"john354525@gmail.com"
] |
john354525@gmail.com
|
66bfd0645c3e75bf9d0b96e5f8d6c98b715bf162
|
f4ed88eb42788cc57d43758adcdb863d7d159bcc
|
/Marketba/MBA/settings.py
|
8b50506310bbd469736e8add4288f2430d2e1f1b
|
[] |
no_license
|
surya-95/Mbarep
|
8ec56f1c1f61d10ec66f0b8bc50bf6d3a134293e
|
fb7aa359e14535e31f9548d411b94dceed278409
|
refs/heads/master
| 2020-05-05T13:53:46.049028
| 2019-04-23T17:08:38
| 2019-04-23T17:08:38
| 180,097,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,333
|
py
|
"""
Django settings for MBA project.
Generated by 'django-admin startproject' using Django 1.11.15.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(cp5%v5g#c)iw=r)tlw=(#$w+7)#86+x7tirt+)=+j9-yy&*f='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'adminp.apps.AdminpConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MBA.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MBA.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mba',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost', # Or an IP Address that your DB is hosted on
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_URL = '/static/'
|
[
"knsurya54@gmail.com"
] |
knsurya54@gmail.com
|
54ab17f1968006213596d13d4eb8fa7652332414
|
af9e29a701b1e1a9aefd2f01f21e5d0798ea83a4
|
/venv/Lib/site-packages/PyQt5/QtHelp.pyi
|
27f1831ee364bdf8fa1e4bb5cd2921cee33bfb8d
|
[] |
no_license
|
pjared/312TSP
|
fe31fa1156ca4bf6f006b4863bd48e13ec716079
|
5594a9da2d5189767fbe310fd87615833a4a0b45
|
refs/heads/main
| 2023-01-30T22:39:10.603466
| 2020-12-11T02:26:42
| 2020-12-11T02:26:42
| 316,882,131
| 1
| 1
| null | 2020-12-09T22:03:21
| 2020-11-29T05:21:59
|
Python
|
UTF-8
|
Python
| false
| false
| 13,324
|
pyi
|
# The PEP 484 type hints stub file for the QtHelp module.
#
# Generated by SIP 5.5.0
#
# Copyright (c) 2020 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of PyQt5.
#
# This file may be used under the terms of the GNU General Public License
# version 3.0 as published by the Free Software Foundation and appearing in
# the file LICENSE included in the packaging of this file. Please review the
# following information to ensure the GNU General Public License version 3.0
# requirements will be met: http://www.gnu.org/copyleft/gpl.html.
#
# If you do not wish to use this file under the terms of the GPL version 3.0
# then you may purchase a commercial license. For more information contact
# info@riverbankcomputing.com.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
import typing
import sip
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
# Support for QDate, QDateTime and QTime.
import datetime
# Convenient type aliases.
PYQT_SIGNAL = typing.Union[QtCore.pyqtSignal, QtCore.pyqtBoundSignal]
PYQT_SLOT = typing.Union[typing.Callable[..., None], QtCore.pyqtBoundSignal]
# Convenient aliases for complicated OpenGL types.
PYQT_OPENGL_ARRAY = typing.Union[typing.Sequence[int], typing.Sequence[float],
sip.Buffer, None]
PYQT_OPENGL_BOUND_ARRAY = typing.Union[typing.Sequence[int],
typing.Sequence[float], sip.Buffer, int, None]
class QCompressedHelpInfo(sip.simplewrapper):
@typing.overload
def __init__(self) -> None: ...
@typing.overload
def __init__(self, other: 'QCompressedHelpInfo') -> None: ...
def isNull(self) -> bool: ...
@staticmethod
def fromCompressedHelpFile(documentationFileName: str) -> 'QCompressedHelpInfo': ...
def version(self) -> QtCore.QVersionNumber: ...
def component(self) -> str: ...
def namespaceName(self) -> str: ...
def swap(self, other: 'QCompressedHelpInfo') -> None: ...
class QHelpContentItem(sip.simplewrapper):
def childPosition(self, child: 'QHelpContentItem') -> int: ...
def parent(self) -> 'QHelpContentItem': ...
def row(self) -> int: ...
def url(self) -> QtCore.QUrl: ...
def title(self) -> str: ...
def childCount(self) -> int: ...
def child(self, row: int) -> 'QHelpContentItem': ...
class QHelpContentModel(QtCore.QAbstractItemModel):
def contentsCreated(self) -> None: ...
def contentsCreationStarted(self) -> None: ...
def isCreatingContents(self) -> bool: ...
def columnCount(self, parent: QtCore.QModelIndex = ...) -> int: ...
def rowCount(self, parent: QtCore.QModelIndex = ...) -> int: ...
def parent(self, index: QtCore.QModelIndex) -> QtCore.QModelIndex: ...
def index(self, row: int, column: int, parent: QtCore.QModelIndex = ...) -> QtCore.QModelIndex: ...
def data(self, index: QtCore.QModelIndex, role: int) -> typing.Any: ...
def contentItemAt(self, index: QtCore.QModelIndex) -> QHelpContentItem: ...
def createContents(self, customFilterName: str) -> None: ...
class QHelpContentWidget(QtWidgets.QTreeView):
def linkActivated(self, link: QtCore.QUrl) -> None: ...
def indexOf(self, link: QtCore.QUrl) -> QtCore.QModelIndex: ...
class QHelpEngineCore(QtCore.QObject):
def __init__(self, collectionFile: str, parent: typing.Optional[QtCore.QObject] = ...) -> None: ...
@typing.overload
def documentsForKeyword(self, keyword: str) -> typing.List['QHelpLink']: ...
@typing.overload
def documentsForKeyword(self, keyword: str, filterName: str) -> typing.List['QHelpLink']: ...
@typing.overload
def documentsForIdentifier(self, id: str) -> typing.List['QHelpLink']: ...
@typing.overload
def documentsForIdentifier(self, id: str, filterName: str) -> typing.List['QHelpLink']: ...
def usesFilterEngine(self) -> bool: ...
def setUsesFilterEngine(self, uses: bool) -> None: ...
def filterEngine(self) -> 'QHelpFilterEngine': ...
def readersAboutToBeInvalidated(self) -> None: ...
def warning(self, msg: str) -> None: ...
def currentFilterChanged(self, newFilter: str) -> None: ...
def setupFinished(self) -> None: ...
def setupStarted(self) -> None: ...
def setAutoSaveFilter(self, save: bool) -> None: ...
def autoSaveFilter(self) -> bool: ...
def error(self) -> str: ...
@staticmethod
def metaData(documentationFileName: str, name: str) -> typing.Any: ...
def setCustomValue(self, key: str, value: typing.Any) -> bool: ...
def customValue(self, key: str, defaultValue: typing.Any = ...) -> typing.Any: ...
def removeCustomValue(self, key: str) -> bool: ...
def linksForKeyword(self, keyword: str) -> typing.Dict[str, QtCore.QUrl]: ...
def linksForIdentifier(self, id: str) -> typing.Dict[str, QtCore.QUrl]: ...
def fileData(self, url: QtCore.QUrl) -> QtCore.QByteArray: ...
def findFile(self, url: QtCore.QUrl) -> QtCore.QUrl: ...
@typing.overload
def files(self, namespaceName: str, filterAttributes: typing.Iterable[str], extensionFilter: str = ...) -> typing.List[QtCore.QUrl]: ...
@typing.overload
def files(self, namespaceName: str, filterName: str, extensionFilter: str = ...) -> typing.List[QtCore.QUrl]: ...
def filterAttributeSets(self, namespaceName: str) -> typing.List[typing.List[str]]: ...
def registeredDocumentations(self) -> typing.List[str]: ...
def setCurrentFilter(self, filterName: str) -> None: ...
def currentFilter(self) -> str: ...
@typing.overload
def filterAttributes(self) -> typing.List[str]: ...
@typing.overload
def filterAttributes(self, filterName: str) -> typing.List[str]: ...
def addCustomFilter(self, filterName: str, attributes: typing.Iterable[str]) -> bool: ...
def removeCustomFilter(self, filterName: str) -> bool: ...
def customFilters(self) -> typing.List[str]: ...
def documentationFileName(self, namespaceName: str) -> str: ...
def unregisterDocumentation(self, namespaceName: str) -> bool: ...
def registerDocumentation(self, documentationFileName: str) -> bool: ...
@staticmethod
def namespaceName(documentationFileName: str) -> str: ...
def copyCollectionFile(self, fileName: str) -> bool: ...
def setCollectionFile(self, fileName: str) -> None: ...
def collectionFile(self) -> str: ...
def setupData(self) -> bool: ...
class QHelpEngine(QHelpEngineCore):
def __init__(self, collectionFile: str, parent: typing.Optional[QtCore.QObject] = ...) -> None: ...
def searchEngine(self) -> 'QHelpSearchEngine': ...
def indexWidget(self) -> 'QHelpIndexWidget': ...
def contentWidget(self) -> QHelpContentWidget: ...
def indexModel(self) -> 'QHelpIndexModel': ...
def contentModel(self) -> QHelpContentModel: ...
class QHelpFilterData(sip.simplewrapper):
@typing.overload
def __init__(self) -> None: ...
@typing.overload
def __init__(self, other: 'QHelpFilterData') -> None: ...
def versions(self) -> typing.List[QtCore.QVersionNumber]: ...
def components(self) -> typing.List[str]: ...
def setVersions(self, versions: typing.Iterable[QtCore.QVersionNumber]) -> None: ...
def setComponents(self, components: typing.Iterable[str]) -> None: ...
def swap(self, other: 'QHelpFilterData') -> None: ...
class QHelpFilterEngine(QtCore.QObject):
@typing.overload
def indices(self) -> typing.List[str]: ...
@typing.overload
def indices(self, filterName: str) -> typing.List[str]: ...
def availableVersions(self) -> typing.List[QtCore.QVersionNumber]: ...
def filterActivated(self, newFilter: str) -> None: ...
def namespacesForFilter(self, filterName: str) -> typing.List[str]: ...
def removeFilter(self, filterName: str) -> bool: ...
def setFilterData(self, filterName: str, filterData: QHelpFilterData) -> bool: ...
def filterData(self, filterName: str) -> QHelpFilterData: ...
def availableComponents(self) -> typing.List[str]: ...
def setActiveFilter(self, filterName: str) -> bool: ...
def activeFilter(self) -> str: ...
def filters(self) -> typing.List[str]: ...
def namespaceToVersion(self) -> typing.Dict[str, QtCore.QVersionNumber]: ...
def namespaceToComponent(self) -> typing.Dict[str, str]: ...
class QHelpFilterSettingsWidget(QtWidgets.QWidget):
def __init__(self, parent: typing.Optional[QtWidgets.QWidget] = ...) -> None: ...
def applySettings(self, filterEngine: QHelpFilterEngine) -> bool: ...
def readSettings(self, filterEngine: QHelpFilterEngine) -> None: ...
def setAvailableVersions(self, versions: typing.Iterable[QtCore.QVersionNumber]) -> None: ...
def setAvailableComponents(self, components: typing.Iterable[str]) -> None: ...
class QHelpIndexModel(QtCore.QStringListModel):
def indexCreated(self) -> None: ...
def indexCreationStarted(self) -> None: ...
def isCreatingIndex(self) -> bool: ...
def linksForKeyword(self, keyword: str) -> typing.Dict[str, QtCore.QUrl]: ...
def filter(self, filter: str, wildcard: str = ...) -> QtCore.QModelIndex: ...
def createIndex(self, customFilterName: str) -> None: ...
def helpEngine(self) -> QHelpEngineCore: ...
class QHelpIndexWidget(QtWidgets.QListView):
def documentsActivated(self, documents: typing.Iterable['QHelpLink'], keyword: str) -> None: ...
def documentActivated(self, document: 'QHelpLink', keyword: str) -> None: ...
def activateCurrentItem(self) -> None: ...
def filterIndices(self, filter: str, wildcard: str = ...) -> None: ...
def linksActivated(self, links: typing.Dict[str, QtCore.QUrl], keyword: str) -> None: ...
def linkActivated(self, link: QtCore.QUrl, keyword: str) -> None: ...
class QHelpLink(sip.simplewrapper):
title = ... # type: str
url = ... # type: QtCore.QUrl
@typing.overload
def __init__(self) -> None: ...
@typing.overload
def __init__(self, a0: 'QHelpLink') -> None: ...
class QHelpSearchQuery(sip.simplewrapper):
class FieldName(int): ...
DEFAULT = ... # type: 'QHelpSearchQuery.FieldName'
FUZZY = ... # type: 'QHelpSearchQuery.FieldName'
WITHOUT = ... # type: 'QHelpSearchQuery.FieldName'
PHRASE = ... # type: 'QHelpSearchQuery.FieldName'
ALL = ... # type: 'QHelpSearchQuery.FieldName'
ATLEAST = ... # type: 'QHelpSearchQuery.FieldName'
@typing.overload
def __init__(self) -> None: ...
@typing.overload
def __init__(self, field: 'QHelpSearchQuery.FieldName', wordList: typing.Iterable[str]) -> None: ...
@typing.overload
def __init__(self, a0: 'QHelpSearchQuery') -> None: ...
class QHelpSearchEngine(QtCore.QObject):
def __init__(self, helpEngine: QHelpEngineCore, parent: typing.Optional[QtCore.QObject] = ...) -> None: ...
def searchInput(self) -> str: ...
def searchResults(self, start: int, end: int) -> typing.List['QHelpSearchResult']: ...
def searchResultCount(self) -> int: ...
def searchingFinished(self, hits: int) -> None: ...
def searchingStarted(self) -> None: ...
def indexingFinished(self) -> None: ...
def indexingStarted(self) -> None: ...
def cancelSearching(self) -> None: ...
@typing.overload
def search(self, queryList: typing.Iterable[QHelpSearchQuery]) -> None: ...
@typing.overload
def search(self, searchInput: str) -> None: ...
def cancelIndexing(self) -> None: ...
def reindexDocumentation(self) -> None: ...
def hits(self, start: int, end: int) -> typing.List[typing.Tuple[str, str]]: ...
def hitCount(self) -> int: ...
def resultWidget(self) -> 'QHelpSearchResultWidget': ...
def queryWidget(self) -> 'QHelpSearchQueryWidget': ...
def query(self) -> typing.List[QHelpSearchQuery]: ...
class QHelpSearchResult(sip.simplewrapper):
@typing.overload
def __init__(self) -> None: ...
@typing.overload
def __init__(self, other: 'QHelpSearchResult') -> None: ...
@typing.overload
def __init__(self, url: QtCore.QUrl, title: str, snippet: str) -> None: ...
def snippet(self) -> str: ...
def url(self) -> QtCore.QUrl: ...
def title(self) -> str: ...
class QHelpSearchQueryWidget(QtWidgets.QWidget):
def __init__(self, parent: typing.Optional[QtWidgets.QWidget] = ...) -> None: ...
def setSearchInput(self, searchInput: str) -> None: ...
def searchInput(self) -> str: ...
def setCompactMode(self, on: bool) -> None: ...
def isCompactMode(self) -> bool: ...
def search(self) -> None: ...
def collapseExtendedSearch(self) -> None: ...
def expandExtendedSearch(self) -> None: ...
def setQuery(self, queryList: typing.Iterable[QHelpSearchQuery]) -> None: ...
def query(self) -> typing.List[QHelpSearchQuery]: ...
class QHelpSearchResultWidget(QtWidgets.QWidget):
def requestShowLink(self, url: QtCore.QUrl) -> None: ...
def linkAt(self, point: QtCore.QPoint) -> QtCore.QUrl: ...
|
[
"pjared870@gmail.com"
] |
pjared870@gmail.com
|
2e989cb9128a91d5462b6ece809b0caa1970ec39
|
bd69c871c5a3a41100df65b74fd8889225c12604
|
/msrptw/handler.py
|
cb2b55fc3df8327ece1acb377f57e3afcd99a1a8
|
[
"MIT"
] |
permissive
|
travishen/msrptw
|
1335de32d1be9005e8d5360abf59c7b9d19945c2
|
fbc9c631dddcba262098c067a34f615ba7d300a0
|
refs/heads/master
| 2021-10-08T10:18:02.739909
| 2018-12-11T05:43:20
| 2018-12-11T05:43:20
| 110,566,346
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import logging
from .database.config import session_scope
from .database.model import Log
class SQLAlchemyHandler(logging.Handler):
def emit(self, record):
log = Log(
logger=record.__dict__['name'],
level=record.__dict__['levelname'],
msg=record.__dict__['msg'], )
with session_scope() as session:
session.add(log)
|
[
"travishen.tw@gmail.com"
] |
travishen.tw@gmail.com
|
d512f381f87df814af88cee9a31206120310697e
|
3a003fbe23963615f96a5742c51ce51f6fc9406a
|
/src/day15/main.py
|
e097d51b79f6b7030eeff97d7797cca0faa6b72a
|
[] |
no_license
|
pypeaday/aoc-2020
|
c1bfb5b4d994a9a02dd742d22d5c07344147fc12
|
cefafc25b927dbd2d837c986093e42474074161f
|
refs/heads/master
| 2023-02-04T16:13:55.224966
| 2020-12-17T22:49:54
| 2020-12-17T22:49:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,623
|
py
|
from tqdm import tqdm
from collections import defaultdict
def get_data(filepath: str = "./data/raw/day15_sample.txt"):
"""Return raw data in a list
Args:
filepath (str, optional): Defaults to "./data/raw/day15_sample.txt".
Returns:
[list]: raw data in a list
"""
with open(filepath, "r") as f:
line = f.readline()
data = [int(x) for x in line.split(",")]
return data
def instantiate_maps(data: list):
"""Create initial maps based on raw data
Args:
data (list): raw data from get_data()
Returns:
dicts: turn_map and value_map
turn_map is keyed by a turn id with the value said at that turn
value_map is keyed by a value and contains a list of turn ids at
which that value was said
"""
turn_map = {k: v for k, v in zip(range(1, len(data) + 1), data)}
value_map = defaultdict(list)
for i, k in enumerate(data):
value_map[k].append(i + 1)
return turn_map, value_map
def take_turn(turn_map: dict, value_map: dict, turn_id: int):
"""Take a turn of the memory game
Args:
turn_map (dict): current turn_map up to date through turn_id-1
value_map (dict): current value_map up to date through turn_id-1
turn_id (int): the current turn number
Returns:
[type]: updated turn_map and value_map
"""
if (
turn_map[turn_id - 1] in value_map.keys()
and len(value_map[turn_map[turn_id - 1]]) == 1
):
turn_map[turn_id] = 0
value_map[0].append(turn_id)
else:
turn_map[turn_id] = (
value_map[turn_map[turn_id - 1]][-1]
- value_map[turn_map[turn_id - 1]][-2]
)
value_map[turn_map[turn_id]].append(turn_id)
return turn_map, value_map
def calculate_solution_1(data: list):
turn_map, value_map = instantiate_maps(data)
turn_id = len(data) + 1
for i in range(turn_id, 2021):
turn_map, value_map = take_turn(turn_map, value_map, i)
return turn_map[2020]
def calculate_solution_2(data: list):
turn_map, value_map = instantiate_maps(data)
turn_id = len(data) + 1
for i in tqdm(range(turn_id, 30000001)):
turn_map, value_map = take_turn(turn_map, value_map, i)
return turn_map[30000000]
def main(filepath: str = "./data/raw/day15_sample.txt."):
data = get_data(filepath)
sol_1 = calculate_solution_1(data)
print(f"Solution 1: {sol_1}")
sol_2 = calculate_solution_2(data)
print(f"Solution 2: {sol_2}")
if __name__ == "__main__":
main("./data/raw/day15_input.txt")
|
[
"nicpayne713@gmail.com"
] |
nicpayne713@gmail.com
|
40bfe1a9793f2e71914b3fbca2e4a328e6611917
|
182caa9f90ac3f4e42e437c51195f18c98486d46
|
/engine/schedular.py
|
d735e7714093cbb6fb17d02f2535ce27ef95fe3c
|
[
"MIT"
] |
permissive
|
crazymuse/Krama
|
b6a5f96e592cc19889bff57006d275c52f489535
|
5f29096828fe659eccf8525ff31bc4d7a273c049
|
refs/heads/master
| 2021-06-07T10:01:17.950735
| 2016-10-07T10:07:31
| 2016-10-07T10:07:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,791
|
py
|
from __future__ import absolute_import
from ..utils import graph_parser
import os
from time import sleep
import multiprocessing
import sys
from multiprocessing import Process
from . import process_util
def execute(project_path,krama_root):
graph_proto=graph_parser.create_dag(project_path=project_path,krama_root=krama_root)
open(os.path.join(project_path,'main.prototxt'),'w').write(str(graph_proto))
current_exec_path=initalize_execution(graph_proto=graph_proto,project_path=project_path)
execute_graph(graph_proto=graph_proto
,current_exec_path=current_exec_path)
def get_next(execution_path):
execlist=[int(elem[5:]) for elem in os.listdir(execution_path) if
elem.startswith('exec_') and os.path.isdir(os.path.join(execution_path,elem))]
if len(execlist)==0:
return 0
else:
return max(execlist)+1
def initalize_execution(graph_proto,project_path):
execution_path=os.path.join(project_path,'.executions');
if not os.path.exists(execution_path):
os.mkdir(execution_path)
current_exec_path=os.path.join(execution_path, 'exec_'+str(get_next(execution_path=execution_path)))
os.mkdir(current_exec_path)
open(os.path.join(current_exec_path,'main.prototxt'),'w').write(str(graph_proto))
return current_exec_path
def get_independent_jobs(graph_proto):
job_queue=[]
for schedule_job in graph_proto.schedule_job:
if len(schedule_job.depends_on)==0:job_queue.append(schedule_job)
return job_queue
def execute_graph(graph_proto,current_exec_path):
process_util.process(schedule_jobs=graph_proto.schedule_job,current_exec_path=current_exec_path)
if __name__=="__main__":
execute(project_path='/home/jaley/Projects/project1',krama_root='/usr/share/krama')
|
[
"jaley.dholakiya@gmail.com"
] |
jaley.dholakiya@gmail.com
|
de0ca62f4371b88d9a14acc04945e6008d17ad26
|
c176bcccbe0e22c6a8a485a02ffd8f5b164f0c49
|
/data/parse.py
|
48a8547fb6f04020c7e04b5041078892ca4c7140
|
[
"MIT"
] |
permissive
|
yiannisy/behop-misc
|
59ee78b8cf98010657beed7a9045ba6add74917c
|
75290bafaa1acc312a0d9eea06ddeb1d94cf48da
|
refs/heads/master
| 2021-01-10T01:16:30.052578
| 2016-04-05T00:39:20
| 2016-04-05T00:39:20
| 55,451,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,834
|
py
|
#!/usr/bin/env python
import string
import sys
import matplotlib
matplotlib.use('Agg')
import pylab
#f1 = open('_snr.log','r')
#f2 = open('_times.log','r')
#f1_lines = f1.readlines()
#f2_lines = f2.readlines()
#for l1,l2 in zip(f1_lines, f2_lines):
# l1 = l1.strip()
# l2 = l2.strip()
# print l2[10:19], l1[-3:]
#f1.close()
#f2.close()
ctrl_log = sys.argv[1]
laptop_log = sys.argv[2]
output = sys.argv[3]
def time_str_to_sec(timestr,base,offset=0):
time_vals = string.split(timestr,':')
time = 3600*int(time_vals[0])+60*int(time_vals[1])+int(time_vals[2])
time = time - base - offset
return time
f = open(ctrl_log,'r')
cur_snrs = []
max_snrs = []
sta_snrs = []
ap_snrs = {}
pings = []
time_base = None
for line in f.readlines():
vals = string.split(line,' ')
#print vals
#print vals[0],vals[6], vals[7], vals[11], vals[12]
time = vals[0][1:-1]
time_vals = string.split(time,':')
time = 3600*int(time_vals[0])+60*int(time_vals[1])+int(time_vals[2])
if time_base == None:
time_base = time
time = time-time_base
cur_snr = int(vals[6])
cur_snrs.append((time,cur_snr))
if (len(vals) > 15):
_snrs = []
for item in vals[15:]:
dpid,snr = string.split(item,'->')
snr = int(snr)
if dpid not in ap_snrs.keys():
ap_snrs[dpid] = []
ap_snrs[dpid].append((time,snr))
_snrs.append(snr)
#cur_dpid = vals[7][1:-1]
#max_snr = int(vals[11])
#max_dpid = vals[12][1:-2]
#cur_snrs.append(cur_snr)
#max_snrs.append(max_snr)
max_snrs.append((time, max(_snrs)))
f.close()
#sys.exit(0)
f = open(laptop_log,'r')
l_time_base = None
for line in f.readlines():
vals = string.split(line,' ')
time_str = vals[3]
if l_time_base == None:
l_time_base = time_str_to_sec(time_str, 0) - 7
time = 0
else:
time = time_str_to_sec(time_str, l_time_base)
sta_snr = int(vals[-5])
sta_snr = 90 + sta_snr
sta_snrs.append((time,sta_snr))
if (string.find(line,'icmp_seq') != -1):
ping_time = float(vals[12][5:])
else:
ping_time = 1000
pings.append((time, ping_time))
print time, sta_snr, ping_time
f.close()
#f = open('tracking_ping.log','r')
#for line in f.readlines():
# if line.startswith("none") or line.startswith("Request"):
# pings.append(0)
# else:
# vals = string.split(line,' ')
# pings.append(float(vals[6][5:]))
fig = pylab.figure(figsize=(16,12))
ax = fig.add_subplot(111)
lns = ax.plot([s[0] for s in cur_snrs],[s[1] for s in cur_snrs], 'r-', label='cur_snr (AP)')
#for dpid in ap_snrs.keys():
# lns += ax.plot([s[0] for s in ap_snrs[dpid]], [s[1] for s in ap_snrs[dpid]], '--', label='%s' % dpid)
lns += ax.plot([s[0] for s in max_snrs], [s[1] for s in max_snrs], '--',label='max_snr (AP)')
lns += ax.plot([s[0] for s in sta_snrs], [s[1] for s in sta_snrs], 'c-', label='sta_snr (STA)')
ax2 = ax.twinx()
#lns += ax2.stem([p[0] for p in pings], [p[1] for p in pings], 'mx',label='ping (STA)')
ax2.stem([p[0] for p in pings], [p[1] for p in pings], 'm-.',label='ping (STA)')
labels = [l.get_label() for l in lns]
ax.set_xlabel('time (s)')
ax.set_ylabel('SNR (dB)')
ax2.set_ylabel('ping time (ms)')
ax.set_ylim([0,60])
ax2.set_ylim([0,1000])
#ax.annotate('A->B', xy=(74, 27), xycoords='data',
# xytext=(50, -100), textcoords='offset points',
# arrowprops=dict(arrowstyle="->")
# )
#ax.annotate('B->A', xy=(104, 26), xycoords='data',
# xytext=(50, -100), textcoords='offset points',
# arrowprops=dict(arrowstyle="->")
# )
pylab.legend(lns, labels)
pylab.grid()
pylab.title('WiFi Handover (%s)' % output)
pylab.savefig("%s.pdf" % output)
|
[
"yiannisy@stanford.edu"
] |
yiannisy@stanford.edu
|
803a9f864a82b2ba8fa4239340736abf9ed6599d
|
02c2fb3f5dc960f69432bfebad083ca09820a3e1
|
/hm_04_完整的异常捕获.py
|
724edc3127a78c4de29e5e01575cfb1b8dfe5280
|
[] |
no_license
|
theothershore2019/basic_10_abnormal
|
3770d83afc9ec783a1f4fb8bec3de3aa9160f5d2
|
7f0afba3634a361ff8f7dadfb54aac73bcc2a5c7
|
refs/heads/master
| 2020-09-02T20:29:56.452682
| 2019-11-03T12:37:14
| 2019-11-03T12:37:14
| 219,298,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
try:
# 提示用户输入一个整数
num = int(input("请输入一个整数:"))
# 使用 8 除以用户输入的整数并且输出
result = 8/num
print(result)
except ValueError:
print("请输入正确的整数")
except Exception as result:
print("未知错误 %s" % result)
else:
print("尝试成功")
finally:
print("无论是否出现错误,代码都会被执行")
print("*" * 20)
|
[
"1239869110@qq.com"
] |
1239869110@qq.com
|
33ba26acaf75ad7edb0786afc89f4856df1b1795
|
cf7079b0a54dde8ba70eea4e3c9749e47955f015
|
/setup.py
|
163fb53f0699a92c428925e2e459affb002574c9
|
[] |
no_license
|
jvarlamova/xmlparser
|
5a96b420428b6eedfd85284634d2583307a4279b
|
b5fe199abe995c3e07cab1cc1b21c19d31e68690
|
refs/heads/master
| 2021-01-01T20:03:25.927839
| 2017-07-29T19:49:11
| 2017-07-30T19:55:32
| 98,754,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
from setuptools import setup, find_packages
from os.path import join, dirname
setup(
name='xmlparser',
version='1.0',
description='Demo project with simple xml parser.',
long_description=open(join(dirname(__file__), 'README.rst')).read(),
author='Julia Odruzova',
author_email='varlamova.ju@yandex.ru',
install_requires=['futures==2.2.0', 'lxml', 'six'],
packages=find_packages(),
entry_points={
'console_scripts': [
'xmlparser = xmlparser.cli:main',
]
}
)
|
[
"varlamova.ju@yandex.ru"
] |
varlamova.ju@yandex.ru
|
c27db4eb7f5b25f965f80383c267af34cce18f47
|
1537c0dd56ee4dad8fb614a801a70bb1c768aa36
|
/Hello_world_app.py
|
77179fd2c0d81a1ffa39f759485867e49763b7f6
|
[] |
no_license
|
AlJoCo/Flask_apps
|
29322f7db7993c656259708f0a0cf95f5768731e
|
20bd594a0993c4cec4217877d42f89623b8d6476
|
refs/heads/master
| 2023-04-26T18:05:31.873904
| 2021-05-13T23:09:16
| 2021-05-13T23:09:16
| 366,326,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
@app.route('/home')
def home():
return 'Hello, World!'
@app.route(f'/<int:number>')
def square(number):
return str(number * number)
if __name__ == "__main__":
app.run(debug=True)
|
[
"alexanderjcoates@gmail.com"
] |
alexanderjcoates@gmail.com
|
a55bd93360d1325dfbfe93c7ca0602d4c1b9cfa2
|
cd861386b38610e543f7a44c5909f605c4b3bb2f
|
/users_module/constants.py
|
c33311707c84f53bfec7ff7198cea3514fa616ea
|
[] |
no_license
|
timsag3/coffeeforme
|
ac0c3d515ad7cd90273afad32b622e3acb53549b
|
65da6240f4d848db2eab484c70b319f433feffdf
|
refs/heads/master
| 2021-03-30T22:26:48.161747
| 2018-03-09T22:23:53
| 2018-03-09T22:23:53
| 124,443,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
class Const(object):
ADMIN = 'admin'
MANAGER = 'manager'
SELLER = 'seller'
CANCEL = 'cancel'
BILL_HEAD = 'seller: {}'
BILL_APPEND = '{} * {} = {}$'
BILL_TOTAL = 'Total: {}'
class Commands(object):
EXIT = 'exit'
ZERO = '0'
RETURN = ''
COMMANDS = 'commands'
ONE = '1'
TWO = '2'
THREE = '3'
FOUR = '4'
FIVE = '5'
SIX = '6'
SEVEN = '7'
EIGHT = '8'
NINE = '9'
class Spaces(object):
USERNAME_SPACES = ' ' * 12
|
[
"timsagepy@gmail.com"
] |
timsagepy@gmail.com
|
5f7616a32ecb9cd3a9b48cc50f4f08002c2d2cdd
|
48d3ca5ebb17e9ee137cf45e1d8010d8eff9c65f
|
/Algorithm/투포인터.py
|
bcdb05f937e62e28402198bc1e48e7f8a196942c
|
[] |
no_license
|
dooking/CodingTest
|
81d18e711a806ee14946b2aa006eda9c235f0b04
|
1467700f3461c5fe61bf1b4e668e0c9016d8c860
|
refs/heads/master
| 2023-02-23T15:59:15.274908
| 2023-02-10T06:35:12
| 2023-02-10T06:35:12
| 237,138,420
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
# 데이터의 개수 N과 부분 연속 수열의 합 M을 입력 받기
n, m = 5, 5
data = [1, 2, 3, 2, 5]
result = 0
summary = 0
end = 0
# start를 차례대로 증가시키며 반복
for start in range(n):
# end를 가능한 만큼 이동시키기
while summary < m and end < n:
summary += data[end]
end +=1
# 부분 합이 m일 때 카운트 증가
if summary == m:
result += 1
summary -= data[start]
print(result)
|
[
"123456ghghgh@naver.com"
] |
123456ghghgh@naver.com
|
8bd8a497009153344b929829022a88adbedda0ea
|
c1fb3786f26e62b563dd992869c4942359e17c4b
|
/mscreen/analysis/old/vina_result_analysis.py
|
e02de1eb94561febbdd652252e74e304ae961133
|
[
"MIT"
] |
permissive
|
giribio/mscreen
|
8fc875ea6d4efc6956120725dac240e36448705c
|
d4c707387b4853e603b0245113b3c9a302fe8a4f
|
refs/heads/main
| 2023-06-05T21:21:49.926381
| 2021-07-05T07:32:25
| 2021-07-05T07:32:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,831
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 06 02:00:33 2020
@author: eduardo
"""
from pathlib import Path
import numpy as np
from engine import *
#%%
class VinaAnalysis():
def run_analysis(self):
"""
Search into each receptor folder
and read all ligands-out.pdbqt convert them to sdf
then perfom a qt clustering of the poses using the rms
"""
for rec in self.screening_path.iterdir():
if rec.is_dir():
print(rec.name)
self.molecules_modification(rec.name)
class VinaResults:
"""
This class attempt to store all the information of a virutal screening
done by using mscreen
"""
def __init__(self, screening_out_folder, threashold=2, site=None, radius = 5, sdf_propierties=None):
"""
initialize the VinaResults objects
"""
if not sdf_propierties:
self.sdf_propierties = ['vina_pose',
'vina_score', 'cluster_id', 'clust_lenght']
else:
self.sdf_propierties = sdf_propierties
if not site:
self.site = np.array([10.2,42.7,40.8]) # this is just for pdl1
else:
self.site = site
self.radius = radius
self.threashold = threashold
def run_analysis(self):
"""
Search into each receptor folder
and read all ligands-out.pdbqt convert them to sdf
then perfom a qt clustering of the poses using the rms
"""
for rec in self.screening_path.iterdir():
if rec.is_dir():
print(rec.name)
self.molecules_modification(rec.name)
if __name__ == '__main__':
print('done')
vr = VinaResults('test\\vina_out')
vr.run_analysis()
f00_benchmark_crystal_exclusives_6R3K = 'C://Users//lizet//Documents//tesis//working_on//docking//vina//pdl1//02_benchmark_crystal_exclusives_6r3k//vina_result_2'
f01_benchmark_non_exclusives_6R3K = 'C://Users//lizet//Documents//tesis//working_on//docking//vina//pdl1//02_benchmark_non_exclusives_6r3k//vina_result'
f11_LSOS1k = 'C://Users//lizet//Documents//tesis//working_on//docking//vina//pdl1//11_LSOS1K_6r3k//vina_result'
f11_LSOS1k_blind = 'C://Users//lizet//Documents//tesis//working_on//docking//vina//pdl1//11_LSOS1K_6r3k//vina_result_blind_docking'
f11_JMLO_LIB_6r3k = 'C://Users//lizet//Documents//tesis//working_on//docking//vina//pdl1//11_JMLO_LIB_6r3k//vina_result'
f11_LSO25_LIB_6r3k = 'C://Users//lizet//Documents//tesis//working_on//docking//vina//pdl1//11_LSO25_LIB_6r3k//vina_result'
f22__lso_evolved_library_1500_6r3k = 'C://Users//lizet//Documents//tesis//working_on//docking//vina//pdl1//22__lso_evolved_library_1500_6r3k//vina_result'
f33_em_lib_6r3k = 'C://Users//lizet//Documents//tesis//working_on//docking//vina//pdl1//33_em_lib_6r3k//vina_result'
f34_em_169_6r3k = 'C://Users//lizet//Documents//tesis//working_on//docking//vina//pdl1//34_em_169_6r3k//vina_result'
vr = VinaResults('test//vina_out//')
vr.run_analysis()
# #
# f00_benchmark_crystal_exclusives = Path('/media/edd/OS/Users/lizet/Documents/tesis/working_on/docking/vina/pdl1/02_benchmark_crystal_exclusives_6r3k/vina_result')
# f01_benchmark_non_exclusives = Path(
# '/media/edd/OS/Users/lizet/Documents/tesis/working_on/docking/vina/pdl1/02_benchmark_non_exclusives_6r3k/vina_result')
# f02_benchmark_crystal_exclusives_6r3k = Path(
# '/media/edd/OS/Users/lizet/Documents/tesis/working_on/docking/vina/pdl1/02_benchmark_crystal_exclusives_6r3k/vina_result')
# f02_benchmark_non_exclusives_6r3k = Path(
# '/media/edd/OS/Users/lizet/Documents/tesis/working_on/docking/vina/pdl1/02_benchmark_non_exclusives_6r3k/vina_result')
# f22__lso_evolved_library_1500_6r3k = Path(
# '/media/edd/OS/Users/lizet/Documents/tesis/working_on/docking/vina/pdl1/22__lso_evolved_library_1500_6r3k/LSOG_LIB-6R3K-out/vina_results')
# f32_em_169 = Path(
# '/media/edd/OS/Users/lizet/Documents/tesis/working_on/docking/vina/pdl1/32_em_169/em-169-out')
# f33_em_lib_6r3k = Path(
# '/media/edd/OS/Users/lizet/Documents/tesis/working_on/docking/vina/pdl1/33_em_lib_6r3k')
# f34_em_169_6r3k = Path(
# '/media/edd/OS/Users/lizet/Documents/tesis/working_on/docking/vina/pdl1/34_em_169_6r3k/E169_LIB-6R3K-out')
paths = [f00_benchmark_crystal_exclusives_6R3K, # 0
f01_benchmark_non_exclusives_6R3K, # 1
f11_LSOS1k_blind, # 2
f11_LSOS1k, # 3
f11_JMLO_LIB_6r3k, # 4
f11_LSO25_LIB_6r3k, # 5
f22__lso_evolved_library_1500_6r3k, # 6
f33_em_lib_6r3k, # 7
f34_em_169_6r3k] # 8
# test = Path('/media/edd/OS/Users/lizet/Documents/tesis/working_on/docking/vina/pdl1/02_benchmark_crystal_exclusives_6r3k/vina_result_2')
# vr = VinaResults(test)
# vr.run_analysis()
from time import time
for n, i in enumerate(paths):
i = Path(i)
try:
f = open('log.txt','r+')
f.read()
t0 = time()
f.write('working on {}\n'.format(i.parent.name))
print('working on {}'.format(i.parent.name))
vr = VinaResults(i,radius=3)
vr.run_analysis()
f.write('dt {}\n'.format(time()-t0))
f.close()
except:
f = open('log.txt','r+')
f.read()
t0 = time()
f.write('error at {}\n'.format(i.parent.name))
print('error at {}'.format(i.parent.name))
f.write('dt {}\n'.format(time()-t0))
f.close()
|
[
"eduardomayoyanes@gmail.com"
] |
eduardomayoyanes@gmail.com
|
62f2a142d5d2e5abe08693185f6bdcb902f9e3ef
|
6cf6cebd13e9793dee9ad6c3eab30ad9308ad81e
|
/第二章/filecmp/simple2.py
|
e89823dcd7bba02ea522f5102f3cd3d4c206e666
|
[] |
no_license
|
jumploop/pyauto-ops
|
d7d5a28c94a5edce1709d9ae7ddc308954863a2f
|
6dfd3075b1a655156242a085eecd080b43528c52
|
refs/heads/master
| 2022-11-17T01:10:20.793157
| 2020-07-07T12:19:33
| 2020-07-07T12:19:33
| 265,720,546
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,895
|
py
|
#!/usr/bin/env python
import os, sys
import filecmp
import re
import shutil
holderlist = []
def compareme(dir1, dir2):
dircomp = filecmp.dircmp(dir1, dir2)
only_in_one = dircomp.left_only
diff_in_one = dircomp.diff_files
dirpath = os.path.abspath(dir1)
[holderlist.append(os.path.abspath(os.path.join(dir1, x))) for x in only_in_one]
[holderlist.append(os.path.abspath(os.path.join(dir1, x))) for x in diff_in_one]
if len(dircomp.common_dirs) > 0:
for item in dircomp.common_dirs:
compareme(os.path.abspath(os.path.join(dir1, item)), \
os.path.abspath(os.path.join(dir2, item)))
return holderlist
def main():
if len(sys.argv) > 2:
dir1 = sys.argv[1]
dir2 = sys.argv[2]
else:
print("Usage: ", sys.argv[0], "datadir backupdir")
sys.exit()
source_files = compareme(dir1, dir2)
dir1 = os.path.abspath(dir1)
if not dir2.endswith('/'): dir2 = dir2 + '/'
dir2 = os.path.abspath(dir2)
destination_files = []
createdir_bool = False
for item in source_files:
destination_dir = re.sub(dir1, dir2, item)
destination_files.append(destination_dir)
if os.path.isdir(item):
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
createdir_bool = True
if createdir_bool:
destination_files = []
source_files = []
source_files = compareme(dir1, dir2)
for item in source_files:
destination_dir = re.sub(dir1, dir2, item)
destination_files.append(destination_dir)
print("update item:")
print(source_files)
copy_pair = zip(source_files, destination_files)
for item in copy_pair:
if os.path.isfile(item[0]):
shutil.copyfile(item[0], item[1])
if __name__ == '__main__':
main()
|
[
"827182486@qq.com"
] |
827182486@qq.com
|
26d12935ffe15861fd50827a98972ba8b8999dfc
|
99fea6d4bb077c6ed76603cd787fd98443a32652
|
/src/jackal_workspace/scripts/LocalPlanner.py
|
7d8ad8cb4432a2b0695e0f65d596dcec5e18660d
|
[] |
no_license
|
scifiswapnil/decentralized-payload-transport
|
223253a0f618d81b1d9c3045dd2e026065029906
|
bb123d31e113bfa707b5706c46d949368c185ab1
|
refs/heads/master
| 2023-08-15T09:56:44.361979
| 2021-09-26T10:38:07
| 2021-09-26T10:38:07
| 391,473,974
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,027
|
py
|
#!/usr/bin/env python3
# coding: utf-8
# **Script name** : Local Planner
#
# **Description** : Multi-agent payload transport global planner based on APF.
#
# **Author** : Swapnil Kalhapure
#
# **Email** : kalhapure.swapnil@gmail.com
# ## Imports
# In[1]:
import rospy
import cv2
import time
import rospkg
import tf2_ros
import tf2_geometry_msgs
import numpy as np
import matplotlib.pyplot as plt
from nav_msgs.msg import Odometry
from nav_msgs.msg import Path
from nav_msgs.msg import OccupancyGrid
from geometry_msgs.msg import Twist
from scipy.ndimage.morphology import distance_transform_edt as bwdist
from scipy.ndimage.morphology import grey_dilation
from scipy.spatial import distance
from geometry_msgs.msg import PoseStamped
from tracking_pid.msg import FollowPathActionResult
from tracking_pid.msg import traj_point
import warnings
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
warnings.filterwarnings("ignore", category=RuntimeWarning)
rospack = rospkg.RosPack()
# ## Global variables
# In[2]:
res_remap = 0.015
map_x = 0.0
map_y = 0.0
map_org_x = 0.0
map_org_y = 0.0
map_res = 0.0
img_no = 0
got_path = False
goal_results = 0
mapdata = None
orgmapdata = None
globalpath = np.array([])
current_odom = None
agent_current_odom1 = None
agent_current_odom2 = None
pallet_pose = None
# ## Helper functions
# In[3]:
def meters2grid(pose_m):
pose_on_grid = np.array((np.array(pose_m) - [map_org_x, map_org_y])/ map_res)
pose_on_grid[1] = map_y - pose_on_grid[1]
return pose_on_grid
# In[4]:
def grid2meters(pose_grid):
x = pose_grid[0] * (map_res) + map_org_x
y = (map_y - pose_grid[1]) * (map_res) + map_org_y
a = []
a.append(float(x))
a.append(float(y))
return a
# In[5]:
def get_position_in_grid(odom):
tfbuffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tfbuffer)
t_a_b = tfbuffer.lookup_transform('map', odom.header.frame_id, rospy.Time(0), rospy.Duration(10.0))
odom.pose = tf2_geometry_msgs.do_transform_pose(odom.pose, t_a_b)
resp_odom = meters2grid((odom.pose.pose.position.x, odom.pose.pose.position.y))
return resp_odom
# In[6]:
def get_line(x1, y1, x2, y2):
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
points = []
issteep = abs(y2-y1) > abs(x2-x1)
if issteep:
x1, y1 = y1, x1
x2, y2 = y2, x2
rev = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
rev = True
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if issteep:
points.append((y, x))
else:
points.append((x, y))
error -= deltay
if error < 0:
y += ystep
error += deltax
# Reverse the list if the coordinates were reversed
if rev:
points.reverse()
return points
# In[7]:
def path_smoothing(path):
current_point = 0
mapd = mapdata
smoothedpath = []
smoothedpath.append([path[current_point][0],path[current_point][1]])
while (current_point <= len(path)):
org_current_point = current_point
for j in range(current_point,len(path)):
point_lists = get_line(path[current_point][0],path[current_point][1],path[j][0],path[j][1])
a = 0
for i in range(len(point_lists)):
if (mapd[point_lists[i][1],point_lists[i][0]] <= 0.0):
a = 1
current_point = j
smoothedpath.append([path[j][0],path[j][1]])
break
if (a == 1):
break
if (org_current_point == current_point):
break
smoothedpath.append([path[-1][0],path[-1][1]])
return smoothedpath
# ## Controller
# In[8]:
class local_region:
def __init__(self, origin_x, origin_y, width=200):
self.org_x = origin_x
self.org_y = origin_y
self.data = None
self.attractive = None
self.repulsive = None
self.global_path_start = 0
self.path = []
self.data = mapdata[origin_y-width:origin_y+width,origin_x-width:origin_x+width]
def local_coordinate_convert(self,data):
return (data - (self.get_origin() - np.array([200,200])))
def global_coordinate_convert(self,data):
return (data + (self.get_origin() - np.array([200,200])))
def set_map_data(self,data):
self.data = data
def get_map_data(self):
return self.data
def get_target_data(self):
return self.global_coordinate_convert(self.target)
def get_target_local_data(self):
return self.target
def get_local_path(self):
return self.path
def get_origin(self):
return [self.org_x,self.org_y]
def extract_local_path(self,path):
self.path = []
for i in range(self.global_path_start,len(path[0])):
ans = self.local_coordinate_convert(np.array([path[0,i],path[1,i]]))
if ans[0]>=0 and ans[1]>=0 and ans[0]<=400 and ans[1]<=400:
self.path.append(np.array([ans[0],ans[1]]))
else :
break
self.path=np.asarray(self.path)
self.target = self.path[0,:]
return self.path
def extract_immediate_goal(self,global_path):
ans = []
for i in range(len(self.path[:,0])):
ans.append(distance.euclidean([200,200], self.path[i,:]))
if (np.min(ans) < 40):
delete_point = self.global_coordinate_convert(self.path[np.argmin(ans),:])
ay = np.where(global_path==np.array([[delete_point[0]],[delete_point[1]]]))[1][0] + 1
self.target = self.local_coordinate_convert(global_path[:,0])
global_path = np.delete(global_path,np.s_[0:ay],axis=1)
self.path = np.delete(self.path, np.argmin(ans),axis=0)
self.path = np.delete(self.path,np.s_[0:np.argmin(ans)],axis=0)
return self.target,global_path
def compute_map_repulsive_force(self, influence_radius = 2, repulsive_coef = 100):
mod_map = np.copy(self.data)
bdist = bwdist(mod_map==255)
bdist2 = (bdist/100.) + 1
repulsive = repulsive_coef*((1./bdist2 - 1./influence_radius)**2)
repulsive [bdist2 > influence_radius] = 0
return repulsive
def compute_repulsive_force(self, objects, influence_radius = 2, repulsive_coef = 100):
mod_map = np.ones((400, 400), np.uint8)
repulsive = np.zeros((400, 400), np.uint8)
mod_map = mod_map * 255
if len(objects) > 0 :
for i in range(len(objects)):
cv2.circle(mod_map, (objects[i][0],objects[i][1]), 1, 0, -1)
pts = np.array(objects,np.int32)
pts = pts.reshape((-1,1,2))
# cv2.polylines(mod_map,[pts],True,0,12)
cv2.fillPoly(mod_map,[pts],0)
bdist = bwdist(mod_map==255)
bdist2 = (bdist/100.) + 1
repulsive = repulsive_coef*((1.0/bdist2 - 1.0/influence_radius)**2)
repulsive [bdist2 > influence_radius] = 0
return repulsive
else :
return repulsive
def compute_attractive_force(self, goal = [200,200], influence_radius = 0.5,coef = 100.0):
img = np.ones((400, 400), np.uint8)
img = img * 255
cv2.circle(img, (goal[0],goal[1]), 8, 0, -1)
bdist = bwdist(img==255)
bdist2 = (bdist/100.) + 1
repulsive = -coef * ((1./bdist2 - 1./influence_radius)**2)
repulsive [bdist2 > influence_radius] = 0
return repulsive
def visualize_forces (self, force):
skip = 5
sizer = 400
[x_m, y_m] = np.meshgrid(np.linspace(1, sizer,sizer), np.linspace(1, sizer,sizer))
[gy, gx] = np.gradient(force)
gx = -gx
q_stack = x_m[::skip, ::skip], y_m[::skip, ::skip], gx[::skip, ::skip], gy[::skip, ::skip]
plt.figure(figsize=(15,15))
plt.imshow(self.data)
plt.quiver(x_m[::skip, ::skip], y_m[::skip, ::skip], gx[::skip, ::skip], gy[::skip, ::skip])
return plt
def gradient_planner (self, force, start_coords):
[gy, gx] = np.gradient(-force)
route = np.array([np.array(start_coords)])
ix = 0
iy = 0
max_itr = 0
while(((ix < len(gx)-2) and (ix < len(gy)-2)) and max_itr < 15):
current_point = route[-1,:]
if ( not np.isnan(current_point[0]) and not np.isnan(current_point[1])):
ix = int( current_point[1] )
iy = int( current_point[0] )
vx = gx[ix, iy]
vy = gy[ix, iy]
dt = 1 / np.linalg.norm([vx, vy])
next_point = current_point + dt*np.array( [vx, vy] )
route = np.vstack( [route, next_point] )
max_itr = max_itr + 1
else:
break
return route
# ## ROS Code
# In[9]:
def map_callback(data):
global mapdata
global orgmapdata
global map_x
global map_y
global map_org_x
global map_org_y
global map_res
global res_remap
global goal_results
global pallet_pose
map_x = data.info.width
map_y = data.info.height
map_org_x = data.info.origin.position.x
map_org_y = data.info.origin.position.y
map_res = data.info.resolution
orgmapdata = np.asarray(data.data).reshape(data.info.height,data.info.width).astype(np.uint8)
orgmapdata = np.flip(orgmapdata,0)
mapdata = orgmapdata
mapdata = np.where(mapdata<254,mapdata,0)
mapdata = grey_dilation(mapdata,size=(int(res_remap*map_y),int(res_remap*map_x)))
mapdata = np.invert(mapdata)
mapdata = cv2.threshold(mapdata, 200, 255, cv2.THRESH_BINARY)[1]
def path_callback(data):
global mapdata
global globalpath
global current_odom
global agent_current_odom1
global agent_current_odom2
global img_no
global robot_namespace
print("Got the Global path")
globalpath = []
x = []
y = []
for i in range(len(data.poses)):
ans = meters2grid((data.poses[i].pose.position.x,
data.poses[i].pose.position.y))
x.append(int(ans[0]))
y.append(int(ans[1]))
globalpath.append(x)
globalpath.append(y)
globalpath=np.array(globalpath)
final_point = globalpath[:,-1]
while True:
cmd = traj_point()
co = get_position_in_grid(current_odom).astype(np.uint16)
a = local_region(co[0],co[1])
objects_in_region = []
if type(None) != type(pallet_pose):
localpallet = get_position_in_grid(pallet_pose).astype(np.uint16)
localpallet = a.local_coordinate_convert(localpallet)
objects_in_region.append(localpallet)
if type(None) != type(agent_current_odom1):
other_agent1 = get_position_in_grid(agent_current_odom1).astype(np.uint16)
other_agent1 = a.local_coordinate_convert(other_agent1)
objects_in_region.append(other_agent1)
if type(None) != type(agent_current_odom2):
other_agent2 = get_position_in_grid(agent_current_odom2).astype(np.uint16)
other_agent2 = a.local_coordinate_convert(other_agent2)
objects_in_region.append(other_agent2)
a.extract_local_path(globalpath)
localtarget , globalpath= a.extract_immediate_goal(globalpath)
forces = 0
forces = forces + a.compute_map_repulsive_force(influence_radius = 2.1, repulsive_coef = 4.5)
forces = forces + a.compute_repulsive_force(objects = objects_in_region,influence_radius = 6.5,repulsive_coef = 4.5)
forces = forces + a.compute_attractive_force(goal = localtarget, influence_radius = 28.5, coef=5.5)
no_problem = True
# for i in range (len(objects_in_region)):
# if((objects_in_region[i][0] - 200)**2 + (objects_in_region[i][1] - 200)**2) >= 80**2:
# no_problem = False
if (no_problem):
route = a.gradient_planner(forces,[200,200])
viz_plot = a.visualize_forces(forces)
viz_plot.plot(route[:,0],route[:,1],"go--",linewidth=3,markersize=10,label="Local Path")
viz_plot.plot(a.get_local_path()[:,0],a.get_local_path()[:,1],"bo--",linewidth=3,markersize=10,label="Global Path")
if type(None) != type(pallet_pose):
viz_plot.plot(localpallet[0],localpallet[1],"co",markersize=15,label="Pallet")
viz_plot.plot(localtarget[0],localtarget[1],"mo",markersize=15,label="Local Target")
if type(None) != type(agent_current_odom1):
viz_plot.plot(other_agent1[0],other_agent1[1],"ro",markersize=15,label="Other agent")
if type(None) != type(agent_current_odom2):
viz_plot.plot(other_agent2[0],other_agent2[1],"ro",markersize=15,label="Other agent")
viz_plot.plot(200,200,"rX",markersize=15,label="Robot")
viz_plot.legend(loc="upper left",labelspacing=1,prop={'weight':'bold'},facecolor="w",framealpha=1)
img_no = img_no + 1
viz_plot.savefig(rospack.get_path('jackal_workspace') + str("/log/") + str(robot_namespace) + "/2dplot_" + str(img_no) + ".png")
xx, yy = np.mgrid[0:400, 0:400]
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')#fig.add_subplot(111, projection='3d')
ax.view_init(elev=55, azim=345)
ax.plot_surface(xx, yy, forces,cmap=cm.coolwarm,linewidth=0, antialiased=False,alpha=.4)
ax.plot(route[:,1],route[:,0],"go--",linewidth=3,markersize=10,label="Local Path")
ax.plot(a.get_local_path()[:,1],a.get_local_path()[:,0],"bo--",linewidth=3,markersize=10,label="Global Path")
# if type(None) != type(pallet_pose):
# ax.plot(localpallet[1],localpallet[0],"co",markersize=15,label="Pallet")
# ax.plot(localtarget[1],localtarget[0],"mo",markersize=15,label="Local Target")
# if type(None) != type(agent_current_odom1):
# ax.plot(other_agent[1],other_agent[0],"ro",markersize=15,label="Other agent")
# ax.plot(200,200,"rX",markersize=15,label="Robot")
ax.legend(loc="upper left",labelspacing=1,prop={'weight':'bold'},facecolor="w",framealpha=1)
plt.savefig(rospack.get_path('jackal_workspace') + str("/log/") + str(robot_namespace) + "/3dplot_" + str(img_no) + ".png")
cmd.pose.header.frame_id = "map"
op = grid2meters(a.global_coordinate_convert(route[-2,:]))
cmd.pose.pose.position.x = op[0]
cmd.pose.pose.position.y = op[1]
trajectory_pub.publish(cmd)
if(globalpath.size > 0):
continue
else:
testco = get_position_in_grid(current_odom).astype(np.uint16)
final_path_array = get_line(testco[0],testco[1],final_point[0],final_point[1])
for i in range(5,len(final_path_array),3):
# co = get_position_in_grid(current_odom).astype(np.uint16)
# a = local_region(co[0],co[1])
# ty = a.local_coordinate_convert(final_path_array[-1])
# forces = 0
# forces = forces + a.compute_repulsive_force(objects = objects_in_region,influence_radius = 2, repulsive_coef = 2.0)
# forces = forces + a.compute_attractive_force(goal = ty, influence_radius = 3, coef=1.5)
cmd.pose.header.frame_id = "map"
op = grid2meters(final_path_array[i])
cmd.pose.pose.position.x = op[0]
cmd.pose.pose.position.y = op[1]
# trajectory_pub.publish(cmd)
time.sleep(0.3)
break
else:
print("outside region")
def odom_callback(data):
global current_odom
current_odom = data
def agent_odom_callback1(data):
global agent_current_odom1
agent_current_odom1 = data
def agent_odom_callback2(data):
global agent_current_odom2
agent_current_odom2 = data
def pallet_odom_callback(data):
global pallet_pose
pallet_pose = data
rospy.init_node('LocalPlanner')
rospy.Subscriber("/map", OccupancyGrid, map_callback)
rospy.Subscriber("global_path", Path, path_callback)
rospy.Subscriber("/odometry", Odometry, odom_callback)
rospy.Subscriber("/other_agent1", Odometry, agent_odom_callback1)
rospy.Subscriber("/other_agent2", Odometry, agent_odom_callback2)
rospy.Subscriber("/pallet_pose", Odometry, pallet_odom_callback)
trajectory_pub = rospy.Publisher("trajectory", traj_point, queue_size=1)
robot_namespace = rospy.get_param("~robot_namespace")
rospy.spin()
|
[
"kalhapure.swapnil@gmail.com"
] |
kalhapure.swapnil@gmail.com
|
c20555b9ce3ba5650a4e0f8ec5e83c5411e8ee3e
|
4dcfe5d8372c97cf39bc9c59fc1e57c0081c365f
|
/824_goat-latin.py
|
40336663cb5e52cbfba402937ce30e59bf186f16
|
[] |
no_license
|
xianglei0610/leetcode
|
ea2d99dc41f18a9609df5e811fc30108ad3675a0
|
2a9fddf186f5e2056b10bcbec04e989e37b87fb7
|
refs/heads/master
| 2020-04-04T01:10:59.416015
| 2018-12-04T03:46:40
| 2018-12-04T03:46:40
| 155,671,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
#coding=utf-8
class Solution(object):
def toGoatLatin(self, S):
"""
:type S: str
:rtype: str
"""
l = S.split(' ')
res = []
for i ,v in enumerate(l):
if v[0].lower() not in ['a', 'e', 'i', 'o', 'u']:
s = v[0]
v = v[1:]+s
v += 'ma'
v += 'a'*(i+1)
res.append(v)
return ' '.join(res)
S="I speak Goat Latin"
print Solution().toGoatLatin(S)
|
[
"657984027@qq.com"
] |
657984027@qq.com
|
73a98f8ecfd4950b82c94838bd649c7ffc3f1602
|
7f90f49237b30e404161b4670233d023efb7b43b
|
/第一章 python基础/03 面向对象/days02/class_01_继承.py
|
6163e012f05c86a1fb619d0860a649c40e669d35
|
[] |
no_license
|
FangyangJz/Black_Horse_Python_Code
|
c5e93415109699cc42ffeae683f422da80176350
|
34f6c929484de7e223a4bcd020bc241bb7201a3d
|
refs/heads/master
| 2020-03-23T01:52:42.069393
| 2018-07-14T12:05:12
| 2018-07-14T12:05:12
| 140,942,688
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# author: Fangyang time:2017/11/19
class Animal:
def eat(self):
print "吃"
def drink(self):
print "喝"
class Dog(Animal):
def bark(self):
print "旺!旺!"
class XiaoTianQuan(Dog):
def fly(self):
print "I can fly"
wangcai = Dog()
wangcai.eat()
wangcai.drink()
wangcai.bark()
print "-"*50
xtq = XiaoTianQuan()
xtq.fly()
xtq.eat()
xtq.bark()
|
[
"fangyang.jing@hotmail.com"
] |
fangyang.jing@hotmail.com
|
e652a199be13aa16c0b16c48e3e1d9252e8d4be3
|
c63370c59d02a0c2894e78b2b5b3fe4dce5d842c
|
/Widhya-Winterinternship/Flask-API/api.py
|
c5d817211a34781ad19035f10e4da34f619ab693
|
[] |
no_license
|
YuktiGit/Widhya-Winterinternship
|
52538f850cc8eec9edf73e38f9e13f2618600d41
|
c7e33aa705b3926c6703a5ec1ee48a285edc3e38
|
refs/heads/main
| 2023-06-04T16:57:40.124373
| 2021-06-23T05:10:07
| 2021-06-23T05:10:07
| 379,485,898
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
from flask import Flask, render_template, redirect, url_for, request
app = Flask(__name__)
@app.route('/')
def home():
return "Hello, World !!!"
@app.route('/index')
def index():
return render_template("index.html")
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != 'admin' or request.form['password'] != 'admin':
error = 'Invalid Credentials. Please try again.'
else:
return redirect(url_for('home'))
return render_template('login.html', error = error)
|
[
"noreply@github.com"
] |
YuktiGit.noreply@github.com
|
54f64eecf9d199de43581540315f9573513c5e15
|
f4508a706a7a98534a60c4fafb7e7e657ac19eac
|
/data.py
|
40e5044e86cda6dcd1a04d6ee87b916ab220dc66
|
[] |
no_license
|
ZeChickens/FastDeal-Telegram-Bot
|
40760cada2be2a1c409c0a1b5c8ea31d5f4c6c62
|
7d85a869c1ad8e745006bef541a47d5d883a46eb
|
refs/heads/master
| 2023-02-09T02:02:56.022742
| 2021-01-04T23:26:47
| 2021-01-04T23:26:47
| 297,624,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,243
|
py
|
import pyodbc
from datetime import timedelta, date, datetime
import error_logging
import configparser
class Data:
def __init__(self, bot):
self.REDACTION_CHAT_ID = -1001378510647
self.bot = bot
self.message = Message()
self.dbo = Dbo(bot=bot)
def get_channel(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_channel, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def add_channel(self, link, name, subscribers=0, price=0,
photo="None", description="None",
tag_id=None, statistic_id=None, owner_id=None, status=0,
register_date=date.today()):
values = [link, name, subscribers, price, photo, description, statistic_id, tag_id, owner_id, status, register_date]
self.dbo.add_value(self.dbo.table_channel, *values)
def update_channel(self, set_=dict(), where=dict()):
col = [[k] for k, v in set_.items()]
value = [[v] for k, v in set_.items()]
where, where_value = [[k, v] for k, v in where.items()][0]
self.dbo.update_value(table=self.dbo.table_channel, column=col, value=value,
where=where, where_value=where_value)
def get_channel_stats(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_channel_stats, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def add_channel_stats(self, one_post, one_post_last_day, er, er_last_day):
values = [one_post, one_post_last_day, er, er_last_day]
self.dbo.add_value(self.dbo.table_channel_stats, *values)
def update_channel_stats(self, set_=dict(), where=dict()):
col = [[k] for k, v in set_.items()]
value = [[v] for k, v in set_.items()]
where, where_value = [[k, v] for k, v in where.items()][0]
self.dbo.update_value(table=self.dbo.table_channel_stats, column=col, value=value,
where=where, where_value=where_value)
def get_client(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_client, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def add_client(self, chat_id, username, name,
surname, register_date, last_interaction_time):
values = [chat_id, username, name, surname, register_date, last_interaction_time]
self.dbo.add_value(self.dbo.table_client, *values)
def update_client(self, set_=dict(), where=dict()):
col = [[k] for k, v in set_.items()]
value = [[v] for k, v in set_.items()]
where, where_value = [[k, v] for k, v in where.items()][0]
self.dbo.update_value(table=self.dbo.table_client, column=col, value=value,
where=where, where_value=where_value)
def get_order(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_order, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def add_order(self, client_id, channel_id, text, photo,
comment, post_date, order_date, redaction_comment=None,
status=0, owner_comment=None, post_statistic_id=None):
values = [client_id, channel_id, text, photo,
comment, post_date, order_date, redaction_comment,
status, owner_comment, post_statistic_id]
self.dbo.add_value(self.dbo.table_order, *values)
def update_order(self, set_=dict(), where=dict()):
col = [[k] for k, v in set_.items()]
value = [[v] for k, v in set_.items()]
where, where_value = [[k, v] for k, v in where.items()][0]
self.dbo.update_value(table=self.dbo.table_order, column=col, value=value,
where=where, where_value=where_value)
def get_post_statistic(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_post_statistic, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def add_post_statistic(self, post_id, post_link, post_text, views_count,
post_time, channel_link=None, subscribers_on_start=None,
subscribers_in_half_day=None, subscribers_in_day=None):
values = [post_id, post_link, post_text, views_count,
post_time, channel_link, subscribers_on_start,
subscribers_in_half_day, subscribers_in_day]
self.dbo.add_value(self.dbo.table_post_statistic, *values)
def update_post_statistic(self, set_=dict(), where=dict()):
col = [[k] for k, v in set_.items()]
value = [[v] for k, v in set_.items()]
where, where_value = [[k, v] for k, v in where.items()][0]
self.dbo.update_value(table=self.dbo.table_post_statistic, column=col, value=value,
where=where, where_value=where_value)
def get_payment(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_payment, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def add_payment(self, order_id, order_reference, reason,
amount, currency, created_date, processing_date,
card_pan, card_type, issuer_bank_country,
issuer_bank_name, transaction_status, refund_amount,
fee, merchant_signature):
values = [order_id, order_reference, reason,
amount, currency, created_date, processing_date,
card_pan, card_type, issuer_bank_country,
issuer_bank_name, transaction_status, refund_amount,
fee, merchant_signature]
self.dbo.add_value(self.dbo.table_payment, *values)
def update_payment(self, set_=dict(), where=dict()):
col = [[k] for k, v in set_.items()]
value = [[v] for k, v in set_.items()]
where, where_value = [[k, v] for k, v in where.items()][0]
self.dbo.update_value(table=self.dbo.table_payment, column=col, value=value,
where=where, where_value=where_value)
def get_order_status(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_order_status, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def get_channel_status(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_channel_status, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def get_owner(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_owner, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def add_owner(self, chat_id, nickname, name, surname,
register_date, last_interaction_time, owner_account_id):
values = [chat_id, nickname, name, surname,
register_date, last_interaction_time, owner_account_id]
self.dbo.add_value(self.dbo.table_owner, *values)
def update_owner(self, set_=dict(), where=dict()):
col = [[k] for k, v in set_.items()]
value = [[v] for k, v in set_.items()]
where, where_value = [[k, v] for k, v in where.items()][0]
self.dbo.update_value(table=self.dbo.table_owner, column=col, value=value,
where=where, where_value=where_value)
def get_owner_account(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_owner_account, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def add_owner_account(self, balance):
values = [balance]
self.dbo.add_value(self.dbo.table_owner_account, *values)
def update_owner_account(self, set_=dict(), where=dict()):
col = [[k] for k, v in set_.items()]
value = [[v] for k, v in set_.items()]
where, where_value = [[k, v] for k, v in where.items()][0]
self.dbo.update_value(table=self.dbo.table_owner_account, column=col, value=value,
where=where, where_value=where_value)
def get_owner_card(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_owner_card, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def add_owner_card(self, card_number, owner_account_id):
values = [card_number, owner_account_id]
self.dbo.add_value(self.dbo.table_owner_card, *values)
def update_owner_card(self, set_=dict(), where=dict()):
col = [[k] for k, v in set_.items()]
value = [[v] for k, v in set_.items()]
where, where_value = [[k, v] for k, v in where.items()][0]
self.dbo.update_value(table=self.dbo.table_owner_card, column=col, value=value,
where=where, where_value=where_value)
def get_withdraw(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_withdraw, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def add_withdraw(self, ammount, date, status, owner_card_id):
values = [ammount, date, status, owner_card_id]
self.dbo.add_value(self.dbo.table_withdraw, *values)
def update_withdraw(self, set_=dict(), where=dict()):
col = [[k] for k, v in set_.items()]
value = [[v] for k, v in set_.items()]
where, where_value = [[k, v] for k, v in where.items()][0]
self.dbo.update_value(table=self.dbo.table_withdraw, column=col, value=value,
where=where, where_value=where_value)
def get_tag(self, col=["*"], where=None, signs=["="], order_by=None):
rows = self.dbo.select_table(table=self.dbo.table_tag, values=col,
where=where, sign=signs, order_by=order_by)
return rows
def get_multiple_tables(self, tables:list, join_type="Right", col=["*"], where=None, signs=["="], order_by=None):
table_string = f"[{tables[0]}] "
table_index = 0
while table_index < len(tables)-1:
table_name_1 = tables[table_index]
table_name_2 = tables[table_index+1]
table_string += (f"{join_type} "
f"JOIN [{table_name_2}] "
f"ON [{table_name_1}].{table_name_1}ID = [{table_name_2}].{table_name_1}ID "
)
table_index += 1
rows = self.dbo.select_table(table=table_string, values=col,
where=where, sign=signs, order_by=order_by)
return rows
class Dbo:
def __init__(self, bot):
self.bot = bot
self._init_table_names()
self._connect_to_database()
def _connect_to_database(self):
#Computer DESKTOP-2IT0PLT
#Laptop DESKTOP-4T7IRV2
config = configparser.ConfigParser()
config.read('Settings.ini')
print("Connecting to database...")
try:
self.connection = pyodbc.connect(config['SQL Server']['db'])
print("Database connected succesfully!")
except:
raise ConnectionError
self.cursor = self.connection.cursor()
def _init_table_names(self):
self.table_channel = "Channel"
self.table_channel_status = "Channel_Status"
self.table_channel_stats = "Channel_Statistic"
self.table_client = "Client"
self.table_order = "Order"
self.table_order_status = "Order_Status"
self.table_payment = "Payment"
self.table_post_statistic = "Post_Statistic"
self.table_owner = "Owner"
self.table_owner_account = "OwnerAccount"
self.table_owner_card = "OwnerCard"
self.table_withraw = "Withdraw"
self.table_tag = "Tag"
def select_table(self, table, values, sign, where=None, order_by=None):
select_clause = str()
# Values
for value in values:
if value != "*":
select_clause += "[{}], ".format(value)
else:
select_clause += "{}, ".format(value)
select_clause = select_clause[:-2]
# Where clause
where_clause = str()
if where is not None:
where_clause = "WHERE "
index = 0
for key, value in where.items():
if not isinstance(value, int) and not isinstance(value, date):
value = "N'{}%'".format(value)
where_clause += "{} LIKE {} AND ".format(key, value)
else:
if isinstance(value, datetime):
date_time = value.strftime("%Y-%m-%d %H:%M:%S")
value = f"'{date_time}', "
elif isinstance(value, date):
value = f"'{str(value)}'"
where_clause += "{} {} {} AND ".format(key, sign[index], value)
index += 1
where_clause = where_clause[:-4]
# Order By clause
order_by_clause = str()
if order_by is not None:
value, direction = [[k, v] for k, v in order_by.items()][0]
order_by_clause = f"ORDER BY {value} {direction}"
table = f"[{table}]" if "[" not in table else table
query = """ SELECT {}
FROM {}
{}
{}""".format(select_clause, table, where_clause, order_by_clause)
try:
self.cursor.execute(query)
rows = self.cursor.fetchall()
except:
error_logging.send_error_info_message(bot=self.bot, current_frame=error_logging.currentframe(),
additional_info=query)
return list()
return rows
def add_value(self, table, *values):
value = str()
for item in values:
if item == None:
value += "Null, "
elif isinstance(item, datetime):
date_time = item.strftime("%Y-%m-%d %H:%M:%S")
value += f"'{date_time}', "
elif isinstance(item, date):
value += f"'{str(item)}', "
elif isinstance(item, int) or isinstance(item, float):
value += "{}, ".format(item)
else:
item = item.replace("'", "`")
value += "N'{}', ".format(item)
value = value[:-2] #erase , in the end
query = """ INSERT INTO [{}]
VALUES ({})""".format(table, value)
try:
self.cursor.execute(query)
self.connection.commit()
print("{} added succesfully!".format(table))
except:
error_logging.send_error_info_message(bot=self.bot, current_frame=error_logging.currentframe(),
additional_info=query)
print("New {} not added(((".format(table))
def update_value(self, table, column, value, where, where_value):
values = str()
for col, val in zip(column, value):
if val[0] == None:
values += f"{col[0]} = Null, "
elif isinstance(val[0], datetime):
date_time = val[0].strftime("%Y-%m-%d %H:%M:%S")
values += f"{col[0]} = '{date_time}', "
elif isinstance(val[0], date):
values += f"{col[0]} = '{str(val[0])}', "
elif not isinstance(val[0], int):
values += f"{col[0]} = N'{val[0]}', "
else:
values += f"[{col[0]}] = {val[0]}, "
values = values[:-2]
if not isinstance(where_value, int):
where_value = "N'{}'".format(where_value)
query = """ UPDATE [{}]
SET {}
WHERE [{}] = {}""".format(table, values, where, where_value)
try:
self.cursor.execute(query)
self.connection.commit()
print("{} {} updated succesfully!".format(table, column))
except:
error_logging.send_error_info_message(bot=self.bot, current_frame=error_logging.currentframe(),
additional_info=query)
print("{} {} failed to update(((".format(table, column))
#TODO
#transfer to sql later
class Message:
def __init__(self):
self._init_messages()
def _init_messages(self):
self.start_bot = (
"Привіт!\n"
"Ну що ж, розпочнемо пошук ідеального каналу для вашої реклами.\n\n"
"Я - FastDeal Telegram, бот-біржа з замовлення реклами у Telegram, допоможу у цьому.\n\n"
"Вірю, що я буду такою ж цінною для Вас, як для мене цей прекрасний месенджер.\n"
"Тому успадкувала собі його ж простоту, швидкість та надійність.\n\n"
"А тепер, ознайомлюйтеся та скоріш переходьте до вибору категорій."
)
self.tag_choose = (
"Яка Ваша цільова аудиторія?\n"
"🎯 Вибери свою категорію:"
)
self.tag_empty = "Поки що пусто("
self.tag_first_page = "Схоже ти на самому початку!"
self.tag_last_page = "Ой, це вже остання сторінка!"
self.channel_choose = "Виберіть канал для детальнішої інформації:"
self.channel_description_description = "🗒<i><u>Опис каналу</u></i>"
self.channel_description_subs = "👥<i>Підписники</i>"
self.channel_description_price = "💰<i>Ціна</i>"
self.channel_description_post_views = "👁<i><u>Переглядів на пост</u></i>"
self.channel_description_post_views_last_seven_days = "🔹<i>За останній тиждень</i>"
self.channel_description_post_views_last_day = "🔸<i>За останні 24 години</i>"
self.channel_description_er = "📈<i><u>ER</u></i> (процент підписників, яка взаємодіяла з постами)"
self.channel_description_er_last_seven_days = "🔹<i>Тижневий</i>"
self.channel_description_er_last_day = "🔸<i>Денний</i>"
self.calendar_choose_date = "Виберіть потрібну дату"
self.order_wait_photo = "Надішліть мені <b>фото</b> яке ви хочете бачити у вашій рекламі (якщо таке існує)"
self.order_wait_text = (
"Очікую <b>текст</b> вашої реклами.\n\n"
"❗️ Щоб ваше замовлення було виконане, переконайтесь, що ви дотримались всіх правил вказаних вище"
)
self.order_wait_comment = (
"Напишіть ваш <b>коментар</b> до замовлення.\n\n"
"<i>Тут ви можете вказати наступне:</i>\n"
"🕔 Бажаний проміжок часу\n"
"📝 Вказівки до оформлення вашої реклами (прикріпити кнопку та ін.)"
)
self.order_description_text = "Текст"
self.order_description_client_comment = "Коментар замовника"
self.order_description_redaction_comment = "Коментар редакції"
self.order_description_post_link = "Пост"
self.order_description_status = "Статус:"
self.order_status_notification = "У вашому замовленні відбулись зміни"
self.order_status_redaction_rejected = "замовлення не пройшло перевірку редакції"
self.order_canceled = "Замовлення успішно скасовано!"
self.order_formed = (
"🔅Готово!\n"
"Ваше замовлення проходить перевірку редакції та власника.\n"
"Очікуйте відповіді протягом 12 годин.\n\n"
"У випадку відхилення, Ви отримаєте повернення коштів."
)
self.order_sent_to_redaction = "Готово!\nВаше замовлення надіслане на перевірку.\nОчікуйте рішення на протязі дня."
self.order_list = "Список усіх замовлень:"
self.order_payment_error = "Упс!\nСталась невідома мені помилка😐\nСпробуй ще раз"
self.order_payment_confirmation_wait = "📩 Зачекайте декілька хвилин, ваша оплата обробляється."
self.order_payment_time_is_up = (
"❌ Ви не оплатили замовлення.\n\n"
"Спробуйте замовити ще раз, або зверніться у підтримку /help."
)
self.order_payment_refund_wait = "Зачекайте, роблю всьо шо нада."
self.order_payment_refund_completed = "Я всьо вернув клієнту!\nНадіюсь..."
self.redaction_results_sent_to_client = "Результат надіслано клієнту!"
self.redaction_new_order_notification = "Нове замовлення!"
self.redaction_reject_reason = "Введіть причину відказу"
self.redaction_reject_order = "Замовлення відхилено!\nДеталі надіслано клієнту."
self.redaction_command_error = "Ти не маєш права це робити!"
############## BUTTONS #####################
self.button_back = "Назад"
self.button_start_work = "Розпочати"
self.button_service_reviews = "Відгуки"
self.button_service_info = "Інфо"
self.button_service_what_special = "Що у мені особливого?"
self.button_service_how_to_use = "Як мною користуватись?"
self.button_tag_empty = "Каналів немає"
self.button_channel_order = "Замовити"
self.button_channel_forbidden_topics = "❗️Заборонені теми❗️"
self.button_channel_statistic = "Статистика"
self.button_channel_reviews = "Відгуки"
self.button_order_my = "Мої замовлення"
self.button_order_cancel = "Скасувати замовлення"
self.button_order_no_photo = "У мене немає фото"
self.button_order_send_to_redaction = "Надіслати редакції"
self.button_redaction_new_order = "Переглянути"
self.button_payment_refund = "REFUND"
############## ETC #####################
self.oops = "Щось пішло не так :("
self.under_development = "В розробці"
self.delete_error = "Старі повідомлення неможливо видалити"
def form_calendar_confirm_date(self, date):
month = [
"Січня", "Лютого", "Березня", "Квітня", "Травня", "Червня",
"Липня", "Серпня", "Вересня", "Жовтня", "Листопада", "Грудня"
]
msg = "Ви обрали дату замовлення на "
msg += f"<b>{date.day} {month[date.month-1]}</b>"
return msg
def form_redaction_confirm_order(self, order_id):
"""
'CONFIRMED' - notification for another bot
"""
msg = f"/CONFIRMED_{order_id}"
return msg
def form_order_refund_info(self, channel_name, order_id, refund_amount):
msg = "Кошти за не виконане замовлення повернено.\n\n"
msg += f"<b>Order id</b> - {order_id}\n"
msg += f"<b>Канал</b> - {channel_name.strip()}\n"
msg += f"<b>Сума</b> - {refund_amount} UAH"
return msg
def form_order_receipt(self, channel_name, date, price):
date = date.strftime("%d.%m.%Y")
msg = "💡Нове замовлення реклами💡\n\n"
msg += f"<b>Назва каналу</b> - {channel_name}\n"
msg += f"<b>Дата</b> - {date}\n"
msg += f"<b>Ціна</b> - {price} UAH\n\n"
msg += "⚠️ Детальніше огляньте замовлення та проведіть оплату ⚠️\n"
msg += "У випадку порушення норм, ви отримаєте свої кошти назад протягом 12 годин🔙"
return msg
|
[
"gslaavko@gmail.com"
] |
gslaavko@gmail.com
|
1b4f00cd91fbe5469c6b6111457804eb604a8bce
|
acc147eac9f70e7cd77a3bd46ff68d6095b9f039
|
/records/migrations/0003_auto_20171029_0518.py
|
c9b7ee605fe49250522c7c7779e072dcbac471bc
|
[] |
no_license
|
nataliia-pimankova/hearttone
|
40b1f649c587a04ae6b49a7f075d510282666c93
|
6cc34d9faf2e85209857d2b553df45d36194e652
|
refs/heads/master
| 2023-02-05T02:40:42.295420
| 2017-11-12T12:56:10
| 2017-11-12T12:56:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-29 05:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('records', '0002_auto_20171028_2337'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='path_to_file',
field=models.FilePathField(blank=True, null=True, path=b'/work/virtualenvs/hearttone/src/hearttone/media', verbose_name='Record'),
),
]
|
[
"natalya2487@gmail.com"
] |
natalya2487@gmail.com
|
1dba8bd5c7859414024ade27723f29796368b786
|
bda6611cf23c3ca5bfed1e2d6e488ee1ec79a124
|
/Problems/1.Two_Sum/2.hashmap.py
|
5d709cd1cc20559c35e7ae6aefa2807f650d59c9
|
[] |
no_license
|
dotcink/leetcode
|
09132a3d650026eb665ced75f5e41aba3e68cf18
|
0f6e9ce8d39c5fc9141854ec322e994b39092508
|
refs/heads/master
| 2021-01-09T12:04:31.674412
| 2020-06-29T15:18:34
| 2020-06-29T15:18:34
| 242,294,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
# The idea of Hashmap is inspired from leetcode hint.
#
# Time Complexity: O(N)
# Space Complexity: O(N)
from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
size = 0 if nums == None else len(nums)
if size < 2:
return []
value_to_index = {}
for i in range(size):
num = nums[i]
complement = target - num
found = value_to_index.get(complement)
if found != None:
return [found, i]
value_to_index[num] = i
return []
print(Solution().twoSum([2, 7, 11, 15], 9))
print(Solution().twoSum([3, 2, 4], 6))
|
[
"Dot.Cink+github@gmail.com"
] |
Dot.Cink+github@gmail.com
|
8c1a75f85583c95e59d7ceea18c7e86066c8179c
|
b0aea2ba00720ad7a5359996984115c1f253e655
|
/as11m4.py
|
27913812ec10801409053ed6a3661474acca066e
|
[] |
no_license
|
netlabcode/s13
|
5e135f5b7bdf248c081a7743142e23aba911a6f5
|
46f460217c864a8aac33004b77e4815d37377bf3
|
refs/heads/main
| 2023-04-13T12:56:49.345873
| 2021-04-13T10:38:05
| 2021-04-13T10:38:05
| 349,999,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,515
|
py
|
#!/user/bin/env python3
from opcua import Client
from opcua import ua
import socket
import binascii
import _thread
import time
from datetime import datetime
HOST = ''
PORT1 = 991
PORT2 = 992
PORT3 = 993
PORT4 = 994
#OPC ACCESS
url = "opc.tcp://131.180.165.15:8899/freeopcua/server/"
client = Client(url)
client.connect()
print("connected to OPC UA Server")
val1 = client.get_node("ns=2;i=445")
val2 = client.get_node("ns=2;i=446")
val3 = client.get_node("ns=2;i=447")
val4 = client.get_node("ns=2;i=448")
val5 = client.get_node("ns=2;i=449")
val6 = client.get_node("ns=2;i=450")
val7 = client.get_node("ns=2;i=451")
# Define a function for the thread
def serverOne():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s1:
s1.bind(('',PORT1))
s1.listen()
conn1, addr = s1.accept()
value=0
with conn1:
print('Server 1 from:',addr)
while True:
a = 1
value = 2
try:
#Update OPC value
value1 = val1.get_value()
value2 = val2.get_value()
value3 = val3.get_value()
value4 = val4.get_value()
value5 = val5.get_value()
value6 = val6.get_value()
value7 = val7.get_value()
dt = datetime.now()
#covert inetger to string
#stringd = str(value)
stringd = str(dt)+"+"+str(value1)+"+"+str(value2)+"+"+str(value3)+"+"+str(value4)+"+"+str(value5)+"+"+str(value6)+"+"+str(value7)
#convert string to bytes data
data1 = stringd.encode()
#send data back to client
conn1.sendall(data1)
#print('S1:',data1)
time.sleep(1)
except Exception:
print("One")
pass
# Define a function for the thread
def serverOneCC():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s1:
s1.bind(('',PORT3))
s1.listen()
conn1, addr = s1.accept()
value=0
with conn1:
print('Server 1 from:',addr)
while True:
a = 1
value = 2
try:
#Update OPC value
value1 = val1.get_value()
value2 = val2.get_value()
value3 = val3.get_value()
value4 = val4.get_value()
value5 = val5.get_value()
value6 = val6.get_value()
value7 = val7.get_value()
dt = datetime.now()
#covert inetger to string
#stringd = str(value)
stringd = str(dt)+"+"+str(value1)+"+"+str(value2)+"+"+str(value3)+"+"+str(value4)+"+"+str(value5)+"+"+str(value6)+"+"+str(value7)
#convert string to bytes data
data1 = stringd.encode()
#send data back to client
conn1.sendall(data1)
#print('S1:',data1)
time.sleep(1)
except Exception:
print("OneCC")
pass
# Define a function for the thread
def serverTwo():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s2:
s2.bind(('',PORT2))
s2.listen()
conn2, addr = s2.accept()
valueb=0
with conn2:
print('Server 2 from:',addr)
while True:
b = 1
value = 2
data2 = conn2.recv(1024)
data3 = data2.decode("utf-8")
try:
a,b = data3.split("+")
if '.' in b:
value = float(b)
else:
value = int(b)
check = int(a)
if check == 445:
val1.set_value(value, ua.VariantType.Int16)
print('Value 445 set to:',value)
elif check == 446:
val2.set_value(value, ua.VariantType.Int16)
print('Value 446 set to:',value)
elif check == 447:
val3.set_value(value, ua.VariantType.Float)
print('Value 447 set to:',value)
elif check == 448:
val4.set_value(value, ua.VariantType.Float)
print('Value 448 set to:',value)
elif check == 449:
val5.set_value(value, ua.VariantType.Float)
print('Value 449 set to:',value)
elif check == 450:
val6.set_value(value, ua.VariantType.Float)
print('Value 450 set to:',value)
elif check == 451:
val7.set_value(value, ua.VariantType.Float)
print('Value 451 set to:',value)
else:
print(".")
except Exception:
print("Two")
pass
def serverTwoCC():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s2:
s2.bind(('',PORT4))
s2.listen()
conn2, addr = s2.accept()
valueb=0
with conn2:
print('Server 2 from:',addr)
while True:
b = 1
value = 2
data2 = conn2.recv(1024)
data3 = data2.decode("utf-8")
try:
a,b = data3.split("+")
if '.' in b:
value = float(b)
else:
value = int(b)
check = int(a)
if check == 445:
val1.set_value(value, ua.VariantType.Int16)
print('Value 445 set to:',value)
elif check == 446:
val2.set_value(value, ua.VariantType.Int16)
print('Value 446 set to:',value)
elif check == 447:
val3.set_value(value, ua.VariantType.Float)
print('Value 447 set to:',value)
elif check == 448:
val4.set_value(value, ua.VariantType.Float)
print('Value 448 set to:',value)
elif check == 449:
val5.set_value(value, ua.VariantType.Float)
print('Value 449 set to:',value)
elif check == 450:
val6.set_value(value, ua.VariantType.Float)
print('Value 450 set to:',value)
elif check == 451:
val7.set_value(value, ua.VariantType.Float)
print('Value 451 set to:',value)
else:
print(".")
except Exception:
print("TwoCC")
pass
# Create two threads as follows
try:
_thread.start_new_thread( serverOne, ( ) )
_thread.start_new_thread( serverOneCC, ( ) )
_thread.start_new_thread( serverTwo, ( ) )
_thread.start_new_thread( serverTwoCC, ( ) )
except:
print ("Error: unable to start thread")
while 1:
pass
|
[
"noreply@github.com"
] |
netlabcode.noreply@github.com
|
31df57ddc996845a0b3905b85f011312aa1a196c
|
0c9ec5d4bafca45505f77cbd3961f4aff5c10238
|
/openapi-python-client/openapi_client/models/process_instance_suspension_state_dto.py
|
60921c83d07860e0e616f8876d3084348e95f597
|
[
"Apache-2.0"
] |
permissive
|
yanavasileva/camunda-bpm-examples
|
98cd2930f5c8df11a56bf04845a8ada5b3bb542d
|
051f8f28c62845e68ce4059ab64264c5a0bdc009
|
refs/heads/master
| 2022-10-19T20:07:21.278160
| 2020-05-27T15:28:27
| 2020-05-27T15:28:27
| 267,320,400
| 0
| 0
|
Apache-2.0
| 2020-05-27T14:35:22
| 2020-05-27T13:00:01
| null |
UTF-8
|
Python
| false
| false
| 14,303
|
py
|
# coding: utf-8
"""
Camunda BPM REST API
OpenApi Spec for Camunda BPM REST API. # noqa: E501
The version of the OpenAPI document: 7.13.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class ProcessInstanceSuspensionStateDto(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'suspended': 'bool',
'process_definition_id': 'str',
'process_definition_key': 'str',
'process_definition_tenant_id': 'str',
'process_definition_without_tenant_id': 'bool',
'process_instance_ids': 'list[str]',
'process_instance_query': 'ProcessInstanceQueryDto',
'historic_process_instance_query': 'HistoricProcessInstanceQueryDto'
}
attribute_map = {
'suspended': 'suspended',
'process_definition_id': 'processDefinitionId',
'process_definition_key': 'processDefinitionKey',
'process_definition_tenant_id': 'processDefinitionTenantId',
'process_definition_without_tenant_id': 'processDefinitionWithoutTenantId',
'process_instance_ids': 'processInstanceIds',
'process_instance_query': 'processInstanceQuery',
'historic_process_instance_query': 'historicProcessInstanceQuery'
}
def __init__(self, suspended=None, process_definition_id=None, process_definition_key=None, process_definition_tenant_id=None, process_definition_without_tenant_id=None, process_instance_ids=None, process_instance_query=None, historic_process_instance_query=None, local_vars_configuration=None): # noqa: E501
"""ProcessInstanceSuspensionStateDto - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._suspended = None
self._process_definition_id = None
self._process_definition_key = None
self._process_definition_tenant_id = None
self._process_definition_without_tenant_id = None
self._process_instance_ids = None
self._process_instance_query = None
self._historic_process_instance_query = None
self.discriminator = None
self.suspended = suspended
if process_definition_id is not None:
self.process_definition_id = process_definition_id
if process_definition_key is not None:
self.process_definition_key = process_definition_key
if process_definition_tenant_id is not None:
self.process_definition_tenant_id = process_definition_tenant_id
self.process_definition_without_tenant_id = process_definition_without_tenant_id
if process_instance_ids is not None:
self.process_instance_ids = process_instance_ids
if process_instance_query is not None:
self.process_instance_query = process_instance_query
if historic_process_instance_query is not None:
self.historic_process_instance_query = historic_process_instance_query
@property
def suspended(self):
"""Gets the suspended of this ProcessInstanceSuspensionStateDto. # noqa: E501
A `Boolean` value which indicates whether to activate or suspend a given process instance. When the value is set to `true`, the given process instance will be suspended and when the value is set to `false`, the given process instance will be activated. # noqa: E501
:return: The suspended of this ProcessInstanceSuspensionStateDto. # noqa: E501
:rtype: bool
"""
return self._suspended
@suspended.setter
def suspended(self, suspended):
"""Sets the suspended of this ProcessInstanceSuspensionStateDto.
A `Boolean` value which indicates whether to activate or suspend a given process instance. When the value is set to `true`, the given process instance will be suspended and when the value is set to `false`, the given process instance will be activated. # noqa: E501
:param suspended: The suspended of this ProcessInstanceSuspensionStateDto. # noqa: E501
:type: bool
"""
self._suspended = suspended
@property
def process_definition_id(self):
"""Gets the process_definition_id of this ProcessInstanceSuspensionStateDto. # noqa: E501
The process definition id of the process instances to activate or suspend. **Note**: This parameter can be used only with combination of `suspended`. # noqa: E501
:return: The process_definition_id of this ProcessInstanceSuspensionStateDto. # noqa: E501
:rtype: str
"""
return self._process_definition_id
@process_definition_id.setter
def process_definition_id(self, process_definition_id):
"""Sets the process_definition_id of this ProcessInstanceSuspensionStateDto.
The process definition id of the process instances to activate or suspend. **Note**: This parameter can be used only with combination of `suspended`. # noqa: E501
:param process_definition_id: The process_definition_id of this ProcessInstanceSuspensionStateDto. # noqa: E501
:type: str
"""
self._process_definition_id = process_definition_id
@property
def process_definition_key(self):
"""Gets the process_definition_key of this ProcessInstanceSuspensionStateDto. # noqa: E501
The process definition key of the process instances to activate or suspend. **Note**: This parameter can be used only with combination of `suspended`, `processDefinitionTenantId`, and `processDefinitionWithoutTenantId`. # noqa: E501
:return: The process_definition_key of this ProcessInstanceSuspensionStateDto. # noqa: E501
:rtype: str
"""
return self._process_definition_key
@process_definition_key.setter
def process_definition_key(self, process_definition_key):
"""Sets the process_definition_key of this ProcessInstanceSuspensionStateDto.
The process definition key of the process instances to activate or suspend. **Note**: This parameter can be used only with combination of `suspended`, `processDefinitionTenantId`, and `processDefinitionWithoutTenantId`. # noqa: E501
:param process_definition_key: The process_definition_key of this ProcessInstanceSuspensionStateDto. # noqa: E501
:type: str
"""
self._process_definition_key = process_definition_key
@property
def process_definition_tenant_id(self):
"""Gets the process_definition_tenant_id of this ProcessInstanceSuspensionStateDto. # noqa: E501
Only activate or suspend process instances of a process definition which belongs to a tenant with the given id. **Note**: This parameter can be used only with combination of `suspended`, `processDefinitionKey`, and `processDefinitionWithoutTenantId`. # noqa: E501
:return: The process_definition_tenant_id of this ProcessInstanceSuspensionStateDto. # noqa: E501
:rtype: str
"""
return self._process_definition_tenant_id
@process_definition_tenant_id.setter
def process_definition_tenant_id(self, process_definition_tenant_id):
"""Sets the process_definition_tenant_id of this ProcessInstanceSuspensionStateDto.
Only activate or suspend process instances of a process definition which belongs to a tenant with the given id. **Note**: This parameter can be used only with combination of `suspended`, `processDefinitionKey`, and `processDefinitionWithoutTenantId`. # noqa: E501
:param process_definition_tenant_id: The process_definition_tenant_id of this ProcessInstanceSuspensionStateDto. # noqa: E501
:type: str
"""
self._process_definition_tenant_id = process_definition_tenant_id
@property
def process_definition_without_tenant_id(self):
"""Gets the process_definition_without_tenant_id of this ProcessInstanceSuspensionStateDto. # noqa: E501
Only activate or suspend process instances of a process definition which belongs to no tenant. Value may only be true, as false is the default behavior. **Note**: This parameter can be used only with combination of `suspended`, `processDefinitionKey`, and `processDefinitionTenantId`. # noqa: E501
:return: The process_definition_without_tenant_id of this ProcessInstanceSuspensionStateDto. # noqa: E501
:rtype: bool
"""
return self._process_definition_without_tenant_id
@process_definition_without_tenant_id.setter
def process_definition_without_tenant_id(self, process_definition_without_tenant_id):
"""Sets the process_definition_without_tenant_id of this ProcessInstanceSuspensionStateDto.
Only activate or suspend process instances of a process definition which belongs to no tenant. Value may only be true, as false is the default behavior. **Note**: This parameter can be used only with combination of `suspended`, `processDefinitionKey`, and `processDefinitionTenantId`. # noqa: E501
:param process_definition_without_tenant_id: The process_definition_without_tenant_id of this ProcessInstanceSuspensionStateDto. # noqa: E501
:type: bool
"""
self._process_definition_without_tenant_id = process_definition_without_tenant_id
@property
def process_instance_ids(self):
"""Gets the process_instance_ids of this ProcessInstanceSuspensionStateDto. # noqa: E501
A list of process instance ids which defines a group of process instances which will be activated or suspended by the operation. **Note**: This parameter can be used only with combination of `suspended`, `processInstanceQuery`, and `historicProcessInstanceQuery`. # noqa: E501
:return: The process_instance_ids of this ProcessInstanceSuspensionStateDto. # noqa: E501
:rtype: list[str]
"""
return self._process_instance_ids
@process_instance_ids.setter
def process_instance_ids(self, process_instance_ids):
"""Sets the process_instance_ids of this ProcessInstanceSuspensionStateDto.
A list of process instance ids which defines a group of process instances which will be activated or suspended by the operation. **Note**: This parameter can be used only with combination of `suspended`, `processInstanceQuery`, and `historicProcessInstanceQuery`. # noqa: E501
:param process_instance_ids: The process_instance_ids of this ProcessInstanceSuspensionStateDto. # noqa: E501
:type: list[str]
"""
self._process_instance_ids = process_instance_ids
@property
def process_instance_query(self):
"""Gets the process_instance_query of this ProcessInstanceSuspensionStateDto. # noqa: E501
:return: The process_instance_query of this ProcessInstanceSuspensionStateDto. # noqa: E501
:rtype: ProcessInstanceQueryDto
"""
return self._process_instance_query
@process_instance_query.setter
def process_instance_query(self, process_instance_query):
"""Sets the process_instance_query of this ProcessInstanceSuspensionStateDto.
:param process_instance_query: The process_instance_query of this ProcessInstanceSuspensionStateDto. # noqa: E501
:type: ProcessInstanceQueryDto
"""
self._process_instance_query = process_instance_query
@property
def historic_process_instance_query(self):
"""Gets the historic_process_instance_query of this ProcessInstanceSuspensionStateDto. # noqa: E501
:return: The historic_process_instance_query of this ProcessInstanceSuspensionStateDto. # noqa: E501
:rtype: HistoricProcessInstanceQueryDto
"""
return self._historic_process_instance_query
@historic_process_instance_query.setter
def historic_process_instance_query(self, historic_process_instance_query):
"""Sets the historic_process_instance_query of this ProcessInstanceSuspensionStateDto.
:param historic_process_instance_query: The historic_process_instance_query of this ProcessInstanceSuspensionStateDto. # noqa: E501
:type: HistoricProcessInstanceQueryDto
"""
self._historic_process_instance_query = historic_process_instance_query
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProcessInstanceSuspensionStateDto):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ProcessInstanceSuspensionStateDto):
return True
return self.to_dict() != other.to_dict()
|
[
"noreply@github.com"
] |
yanavasileva.noreply@github.com
|
fccf527986a4f59df75908e0376c0c261188dc8a
|
c8cd691e8364804731889873ac5ea182bb682122
|
/blog/feeds.py
|
b4774bfb7b6f1dbecf306bdc3784263d82f104f4
|
[] |
no_license
|
isaif1/Blogging_With_Admin
|
c26473e615bea2c5ecf968323c49507c7ba1804c
|
09d2b576e6b61324eeebb74e73203e3cb1fe6708
|
refs/heads/main
| 2023-05-13T16:01:32.789274
| 2021-05-24T08:32:23
| 2021-05-24T08:32:23
| 370,078,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
from django.contrib.syndication.views import Feed
from django.template.defaultfilters import truncatewords
from .models import Post
class LatestPostsFeed(Feed):
title = 'My blog'
link = '/blog/'
description = 'New posts of my blog.'
def items(self):
return Post.published.all()[:5]
def item_title(self, item):
return item.title
def item_description(self, item):
return truncatewords(item.body, 30)
|
[
"saifcnb@gmail.com"
] |
saifcnb@gmail.com
|
18708b672b64006d8ea5bfe638985c062670b7cc
|
49b1b98e9bbba2e377d8b5318fcc13776bbcb10b
|
/museum/spiders/exhibition121.py
|
ebac13fe9c38ef25797901fc7be0856a6fe8fc05
|
[] |
no_license
|
BUCT-CS1808-SoftwareEngineering/MusemData_Collection_System
|
fa4e02ec8e8aaa9a240ba92cf7be33dbc0e8e31f
|
023e829c77037ba6d2183d8d64dcb20696b66931
|
refs/heads/master
| 2023-04-29T05:43:01.627439
| 2021-05-23T03:02:09
| 2021-05-23T03:02:09
| 360,040,880
| 0
| 0
| null | 2021-05-23T03:02:09
| 2021-04-21T05:18:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
import scrapy
from museum.items import exhibitionItem
class ExhibitionSpider(scrapy.Spider):
name = 'exhibition121'
# allowed_domains = ['www.xxx.com']
start_urls = ['http://www.zgtcbwg.com/index.php?s=/Home/Article/lists/category/cszl.html']
def parse_detail(self, response):
item = response.meta["item"]
exhib_time ='常设'
y=response.xpath('/html/body/div[1]/div/div[2]/div[2]//span/text()').extract()
exhib_intro=''
for txt in y:
exhib_intro+=txt
print(exhib_intro)
def parse(self, response):
item = exhibitionItem()
div_list=response.xpath('/html/body/div[1]/div/div[2]/div[2]/div')
for div in div_list:
if(div.xpath('./div[2]/h4/text()').extract_first()!=None):
exhib_name=div.xpath('./div[2]/h4/text()').extract_first()
print(exhib_name)
exhib_location = div.xpath('./div[2]/p[1]/text()').extract_first()
x=exhib_location[5:len(exhib_location)]
detail_url = 'http://www.zgtcbwg.com/' + div.xpath('./div[2]//a/@href').extract_first()
#print(detail_url)
exhib_location=x
print(exhib_location)
yield scrapy.Request(detail_url,callback=self.parse_detail,meta={'item':item})
|
[
"3100776572@qq.com"
] |
3100776572@qq.com
|
31f05951c3b5a5ab365a875bb35906e5aadcf18a
|
703fbf13862d6b8cf997ff1966b5a13e18daaa7a
|
/env/bin/normalizer
|
ca85485c52a286dfa9181c8341fe1e6b3ee2cd14
|
[] |
no_license
|
jonathansutton1/Projeto2-TecWeb
|
2a6af276609262a0a0c6c09bf4ca5178b8a7d03e
|
83e1ee9b42cee8576dc9a1e6fce02696174d7409
|
refs/heads/main
| 2023-09-04T17:15:21.174145
| 2021-11-11T20:00:31
| 2021-11-11T20:00:31
| 422,325,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
#!/home/cesar/Insper/TecWeb/Projeto2-TecWeb/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from charset_normalizer.cli.normalizer import cli_detect
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli_detect())
|
[
"cesarea@al.insper.edu.br"
] |
cesarea@al.insper.edu.br
|
|
ec0059c0632cf589501c86609ca5e0a4f9936570
|
b2044bc20316d3f2d8ac81ead8f016e79bd65eaa
|
/user/migrations/0003_auto_20181204_1142.py
|
b485bb89dabcbfca270396de6deaff6feda26002
|
[] |
no_license
|
z66980437/ttsx_front
|
28b8643ac09a11ed3342d3c5c7becb32fcedac49
|
a91ecf8e5c2011280729c80a50a36325a3461ec2
|
refs/heads/master
| 2020-04-07T07:16:36.798693
| 2018-12-28T12:48:04
| 2018-12-28T12:48:04
| 158,170,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-12-04 03:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0002_user_ico'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(blank=True, max_length=20, null=True, unique=True, verbose_name='姓名')),
('password', models.CharField(max_length=255, verbose_name='密码')),
('birthday', models.DateField(blank=True, null=True, verbose_name='出生年月')),
('mobile', models.CharField(blank=True, max_length=11, null=True, verbose_name='电话')),
('email', models.EmailField(blank=True, max_length=100, null=True, verbose_name='邮箱')),
],
options={
'db_table': 'b_admin',
},
),
migrations.AddField(
model_name='user',
name='balance',
field=models.IntegerField(blank=True, null=True, verbose_name='账户余额'),
),
migrations.AddField(
model_name='user',
name='frozen_capital',
field=models.IntegerField(blank=True, null=True, verbose_name='冻结资金'),
),
]
|
[
"66980437@qq.com"
] |
66980437@qq.com
|
0c2d18e7c31fbd4603051fc1238f5c7d73d23064
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2503/60706/304035.py
|
5de049e619f4e256466f9392d96cefef241f8b75
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
n=int(input())
if(n==6):
print(4)
elif(n==10):
print(6)
elif(n==3):
print(2)
elif(n==9):
print(5)
elif(n==8):
print(5)
else:
print(n)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
34d1ba7ef2410376d679502ba373e2f1828e5147
|
396a6ff6e14d2b9614d4d37e823501936c5b6b60
|
/ejercicio_10.py
|
17496f0f837c90708c6810b40978e93f420669f0
|
[] |
no_license
|
kevinerd/ejercicios-tecnicas
|
e040057e44d96efd5e871328a983dd54d77791a5
|
8120aa2b6b35c4e95728e60351bb8c10159803ec
|
refs/heads/master
| 2023-03-16T13:55:34.777475
| 2017-03-29T18:04:28
| 2017-03-29T18:04:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
c = -2
num = -2
while(c < 40):
num += 1
if (num %2 == 0):
print(num)
c = c + 1
|
[
"kevinjf2011@gmail.com"
] |
kevinjf2011@gmail.com
|
4dd72f6d0e591476a519d8fd7b8dfd6643701ab2
|
103d5c372371ceb8235becd0b66e504594b61e4b
|
/100daysDay4-random.py
|
94d8f082e632e4d1a1181b8ac53b86ae40d725be
|
[] |
no_license
|
NeerajaLanka/100daysofcode
|
29dc194742a9b14565e50f8674724aed85380f18
|
5a42fa02c773f4949ad1acc07c88cef9c702f088
|
refs/heads/master
| 2023-07-09T04:45:02.757056
| 2021-08-19T19:56:06
| 2021-08-19T19:56:06
| 327,458,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
#randomisation
import random
random_int = random.randint(1,10)
print(random_int)
import my_module
print(my_module.c)
#random floating numbers inbetween (0,5).usually it prints(0,1)
random_float = random.random()
print(random_float)
x = random_float*5
print(x)
print(my_module.d)
|
[
"prasadkundeti@Prasads-MacBook-Pro.local"
] |
prasadkundeti@Prasads-MacBook-Pro.local
|
b8185bd72c4b1f04fa94141a268362bef928bc05
|
7a4ae475933d65998ad401fa13e8ebdc198446ce
|
/Python/Exercicio21.py
|
e4fbb9db678042e17a429b88f3ddf1dbdcfa2ad5
|
[] |
no_license
|
vlkonageski/Logica
|
b26f20b0ee8e1bdcc4fb0125af18ba44af6eb6a5
|
962aa382dc4af41712f0ca1f59339d8435bfa4b2
|
refs/heads/master
| 2023-04-03T14:33:40.034303
| 2021-04-20T00:48:34
| 2021-04-20T00:48:34
| 303,716,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
"""
Faça um programa que leia um nome de usuário e a sua senha e não
aceite a senha igual ao nome de usuário, mostrando uma mensagem
de erro e voltando a pedir as informações.
"""
usuario = input("Informe o nome de usuario: ")
senha = input("Informe sua senha: ")
while usuario == senha:
print("Ususario e senha igual!")
usuario = input("Informe o nome de usuario: ")
senha = input("Informe sua senha: ")
print("Usuario e senha cadastrados com sucesso!!!")
|
[
"vinicius@alimentoscenci.com"
] |
vinicius@alimentoscenci.com
|
b85ec5b952ec2af536c49d81b5265ed8eeec7758
|
b2ff3d3c012a437ef844d558c6136cc9abcc8bff
|
/OpenCV_cell_detection&tracking/tracking.py
|
2725bcf4012f490e7addb624de878c375d075f2f
|
[] |
no_license
|
W-echo/20T2
|
1b62cd9265bd7f3693178d5ed242a1e839d70a7f
|
a07926534fb75159a05fe683a4c08c4ed7ad9f56
|
refs/heads/master
| 2022-12-09T23:31:40.972909
| 2020-09-06T07:28:58
| 2020-09-06T07:28:58
| 291,735,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,252
|
py
|
import cv2
import sys
import os
import numpy as np
dataset1 = "DIC-C2DH-HeLa"
dataset3 = "PhC-C2DL-PSC"
t_img = cv2.imread("t425.tif")
def preprocess(img):
new_img = cv2.medianBlur(img, 7)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9))
new_img = cv2.morphologyEx(new_img, cv2.MORPH_TOPHAT, kernel)
new_img = cv2.convertScaleAbs(new_img,alpha=8,beta=0)
return new_img
ret, thresh = cv2.threshold(new_img, 190, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh[:,:,0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for i in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[i])
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)
rect = cv2.minAreaRect(contours[i])
cv2.circle(img, (int(rect[0][0]), int(rect[0][1])), 2, (0, 0, 255), 2)
#cv2.imshow("", new_img)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
def draw_bounding_box(dataset, sequence):
"""
dataset: the name of dataset
sequence: sequence number
"""
# Data Path
TEST_PATH = '{}/Sequence {}/'.format(dataset, sequence)
test_ids = next(os.walk(TEST_PATH))[2]
RES_PATH = "{}/Sequence {} mask/".format(dataset, sequence)
if not os.path.exists(RES_PATH):
os.mkdir(RES_PATH)
bound_box=[]
for id in test_ids:
bound_box.append([])
img = cv2.imread(TEST_PATH + id)
# print(img.shape)
new_img = preprocess(img)
ret, thresh = cv2.threshold(new_img, 190, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh[:,:,0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for i in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[i])
bound_box[-1].append((x,y,w,h))
if w * h <= 10:
continue
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)
rect = cv2.minAreaRect(contours[i])
cv2.circle(img, (int(rect[0][0]), int(rect[0][1])), 2, (0, 0, 255), 2)
#cv2.imshow(id,img)
#cv2.waitKey(0)
#cv2.cv2.destroyAllWindows()
#print(id, ":", len(contours))
new_id = "{}/Sequence {} mask/{}_res.tif".format(dataset, sequence, id[:-4])
cv2.imwrite(new_id, img)
return bound_box
#BBox_list=draw_bounding_box("PhC-C2DL-PSC", 1)
def Tracking(dataset, sequence,num):
TEST_PATH = '{}/Sequence {}/'.format(dataset, sequence)
pic_name=os.path.join(TEST_PATH, os.listdir(TEST_PATH)[num])
ima=cv2.imread(pic_name)
bound_box=draw_bounding_box(dataset, sequence)
if num>len(bound_box)-1:
print("Wrong id.")
return 0
else:
tracking_aim=bound_box[num]
line_set=[]
o=0
for box in tracking_aim:
if o<=30:
line=single_track(box,dataset, sequence,num,10)
o+=1
print(o)
else:
pass
line_set.append(line)
for line in line_set:
for i in range(len(line)-1):
cv2.line(ima,line[i],line[i+1],(0,0,255),1,4)
cv2.imshow("2",ima)
cv2.waitKey(0)
cv2.destroyAllWindows()
def single_track(box,dataset, sequence,num,beginning):
TEST_PATH = '{}/Sequence {}/'.format(dataset, sequence)
pic_name=os.path.join(TEST_PATH, os.listdir(TEST_PATH)[num])
ima=cv2.imread(pic_name)
multiTracker=cv2.MultiTracker_create()
if beginning==10:
begin_point=max(0,num-10)
elif beginning==1:
begin_point=max(0,num-1)
elif beginning==0:
begin_point=0
tracker=cv2.TrackerCSRT_create()
multiTracker.add(tracker,ima,box)
line=[(int(box[0])+int(box[2])//2,int(box[1])+int(box[3])//2)]
for i in range(num,begin_point-1,-1):
pic_name = os.listdir(TEST_PATH)[i]
pic_name = os.path.join(TEST_PATH, pic_name)
pic=cv2.imread(pic_name)
success, boxes = multiTracker.update(pic)
line.append((int(boxes[-1][0])+int(boxes[-1][2])//2,int(boxes[-1][1])+int(boxes[-1][3])//2))
#cv2.line(pic,line[-1],line[-2],(0,0,255),5,4)
#cv2.imshow("1",pic)
#cv2.waitKey(0)
return line
def distance(pixel_1,pixel_2):
square_sum=pow(pixel_1[0]-pixel_2[0],2)+pow(pixel_1[1]-pixel_2[1],2)
return pow(square_sum,0.5)
def in_box(box_1,box_2):
if ((box_1[0]<=box_2[0]) and (box_1[1]<=box_2[1])):
if ((box_1[0]+box_1[2]>=box_2[0]+box_1[2]) and (box_1[1]+box_1[3]>=box_2[1]+box_2[3])):
return True
else:
return False
elif ((box_1[0]>=box_2[0]) and (box_1[1]>=box_2[1])):
if ((box_1[0]+box_1[2]<=box_2[0]+box_2[2]) and (box_1[1]+box_1[3]<=box_2[1]+box_2[3])):
return True
else:
return False
else:
return False
def Speed(dataset, sequence,num):
b_box=Box_select(dataset, sequence,num)
if b_box==None:
return 0
beginning=0
#beginning=begin_detecting(dataset, sequence,num,b_box)
line=single_track(b_box,dataset, sequence,num,beginning)
if len(line)==1:
return 0
else:
frame_interval=1
cell_speed=distance(line[0],line[-1])//frame_interval
return cell_speed
def Total_distance(dataset, sequence,num):
b_box=Box_select(dataset, sequence,num)
if b_box==None:
return 0
beginning=0
#beginning=begin_detecting(dataset, sequence,num,b_box)
line=single_track(b_box,dataset, sequence,num,beginning)
if len(line)==1:
return 0
else:
sum_distance=0
for i in range(len(line)-1):
sum_distance+=distance(line[i],line[i+1])
return sum_distance
def Net_distance(dataset, sequence,num):
b_box=Box_select(dataset, sequence,num)
if b_box==None:
return 0
beginning=0
#beginning=begin_detecting(dataset, sequence,num,b_box)
line=single_track(b_box,dataset, sequence,num,beginning)
if len(line)==1:
return 0
else:
NetDistance=distance(line[0],line[-1])
return NetDistance
def Box_select(dataset, sequence,num):
TEST_PATH = '{}/Sequence {}/'.format(dataset, sequence)
pic_name=os.path.join(TEST_PATH, os.listdir(TEST_PATH)[num])
ima=cv2.imread(pic_name)
box=cv2.selectROI('Select a cell',ima)
#print(box)
bound_box=draw_bounding_box(dataset, sequence)
if num>len(bound_box)-1:
print("Wrong id.")
return 0
else:
tracking_aim=bound_box[num]
for b_box in bound_box[num]:
if not in_box(b_box,box):
pass
else:
return b_box
print("Selecting failed")
Tracking("PhC-C2DL-PSC", 2 ,11)
#speed=Speed("PhC-C2DL-PSC", 2 ,11)
#print("Speed:",speed)
#sum_distance=Total_distance("PhC-C2DL-PSC", 2 ,22)
#print("Total distance:",sum_distance)
net_distance=Net_distance("PhC-C2DL-PSC", 2 ,22)
print("Net distance:",net_distance)
#print("ratio:",sum_distance/net_distance)
'''
Speed: 0.0
Total distance: 19.242640687119287
Net distance: 3.605551275463989
ratio: 5.336948282518324
'''
|
[
"noreply@github.com"
] |
W-echo.noreply@github.com
|
5bfdda64d799de9d456412200fbcb29007a91b1e
|
827af3023ecfe6c099bd8f733c1b714323d6ee46
|
/tests/fixtures/old_top_level_assertion_fail.py
|
c801dc5699f8c38dbb39f9348dc00c5b42371c60
|
[
"MIT"
] |
permissive
|
dequeues/python-test-framework
|
2d9625320c9b6e3068f85c8f116886f0ca1d7a7d
|
072ae3851e861cf2990f726eb938dbe8aaf1b080
|
refs/heads/master
| 2023-06-09T16:13:28.171837
| 2021-05-18T21:16:36
| 2021-05-18T21:16:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
# Deprecated and should not be used
import codewars_test as test
test.assert_equals(1, 2)
|
[
"kazk.dev@gmail.com"
] |
kazk.dev@gmail.com
|
d3b03c2d3f774eedbbc69b57d328235d36c7efdf
|
516393d104f1a93c6d87882d9bd82f4a1c637882
|
/app/position_calibration.py
|
d02c0e65e94d10d635823c16bd4084c176574dd8
|
[
"BSD-3-Clause"
] |
permissive
|
iwanbolzern/hslu-pren-instrument-mgmt
|
b748af7a19ab46ed0f5fe5b0a0bed27a910a5d88
|
3c11690a38e10e9b9824ee0196858e96c73a19fc
|
refs/heads/master
| 2022-01-31T00:00:33.649892
| 2019-06-23T07:24:43
| 2019-06-23T07:24:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
from com.ic_interface import ICInterface, Direction
from mgmt.pos_callculation import Spline, PosCalculation
pos_calc = PosCalculation()
pos_calc.x_rel_to_z_abs_spline
x_rel_new = [i for i in range(25000)]
x_abs_new = [Spline.evaluate(pos_calc.x_rel_to_x_abs_spline, x_rel) for x_rel in x_rel_new]
z_abs_new = [Spline.evaluate(pos_calc.x_rel_to_z_abs_spline, x_rel) for x_rel in x_rel_new]
import matplotlib.pyplot as plt
plt.plot(x_rel_new, z_abs_new, 'o')
plt.legend(['z_abs'], loc='best')
axes = plt.gca()
axes.set_ylim([0, 110])
plt.show()
# x_position_rel = 0
#
#
# def __position_update(x_offset, z_offset):
# global x_position_rel
# x_position_rel += x_offset
# print('x_position_rel: ' + str(x_position_rel))
#
#
# ic_interface = ICInterface()
# ic_interface.register_position_callback(__position_update)
#
# while True:
# input()
# ic_interface.drive_distance_async(3000, 60, Direction.Backward, lambda: print('done'))
|
[
"iwan.bolzern@bluewin.ch"
] |
iwan.bolzern@bluewin.ch
|
7f4b51bd21113bd2b25774fda2c31fa71461a984
|
401d5b023663acc72edc993fb1f6e2cb76b3c933
|
/billing/migrations/0011_auto_20201003_0712.py
|
d90e784c77f174573e15871c94047017f2445f01
|
[] |
no_license
|
prismkoirala/pumpkeen
|
d2018103bf524929dabbb8173b36ad5eb2872ed6
|
2156adf737197dc914f4c80f4b1c939185280155
|
refs/heads/master
| 2023-01-03T06:13:19.865218
| 2020-10-12T03:30:49
| 2020-10-12T03:30:49
| 301,415,793
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
# Generated by Django 3.1.2 on 2020-10-03 07:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('billing', '0010_auto_20201003_0449'),
]
operations = [
migrations.RemoveField(
model_name='billinghistory',
name='total',
),
migrations.AddField(
model_name='billinghistory',
name='amount',
field=models.IntegerField(blank=True, default=100, null=True),
),
migrations.AlterField(
model_name='billinghistory',
name='quantity',
field=models.DecimalField(blank=True, decimal_places=2, default=1.0, max_digits=5, null=True),
),
]
|
[
"you@example.com"
] |
you@example.com
|
2d1219612edd31e6989c8ad10e4086ce21a7e0b6
|
15f909bc25db41766193d79c9596c0147814fd48
|
/app/config_example.py
|
6a3427e48a0b8fe1750622bfb33bc7345a9a02bf
|
[
"MIT"
] |
permissive
|
paopow/word_similarity_api
|
16cbff5ce4f0919fb257560e14e2555cadb94815
|
26398d7f9284b9937ef1ac58c179ee380b4291dc
|
refs/heads/master
| 2021-01-17T09:10:18.345161
| 2016-03-10T19:09:35
| 2016-03-10T19:09:35
| 40,980,496
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
GLOVE_NAMES = 'Path to GloVe dict'
GLOVE_VECS = 'Path to GloVe vector'
TOPICS = {
'weddingTheme': '../topicWords/wedding_themes.json',
'weddingProp': '../topicWords/wedding_props.json'
}
|
[
"ksiangliulue@gmail.com"
] |
ksiangliulue@gmail.com
|
9997800d920a93c8c06fda74fb9f4906a9cde4bd
|
26693bf31ea0d564542f219642b4329f3afeef90
|
/bin/nucleus.spec
|
f7d9591040126c226558932697fdc79469bdf68f
|
[] |
no_license
|
brand-clear/nucleus
|
143b00016b7269639ebcc49f9dc47b56180d37cb
|
3593c6aa71d18aa24a3a960345ce91b98ec0627d
|
refs/heads/master
| 2020-12-02T10:32:57.420038
| 2020-11-19T15:58:24
| 2020-11-19T15:58:24
| 200,423,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
spec
|
# -*- mode: python -*-
block_cipher = None
a = Analysis(['E:\\nucleus\\nucleus\\nucleus.py'],
pathex=['E:\\nucleus\\nucleus'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='Nucleus',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=False)
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name='Nucleus')
|
[
"noreply@github.com"
] |
brand-clear.noreply@github.com
|
dd585fb43f5509a11bb86bc61bebbd4e766a4c73
|
2c45cb2777d5191f92e7fd6582bf8bb00fdb9df4
|
/dna/components/receiver.py
|
93cb1f89c443f0b99b750860469aa6368c4a906c
|
[
"BSD-3-Clause"
] |
permissive
|
mwoc/pydna
|
3d51ed000f914ae6d8e93d11850ed493a786e346
|
25cf3db1fc0188258eacbcf3bcf62e37bae5a620
|
refs/heads/master
| 2021-01-10T20:25:48.966532
| 2013-07-01T20:37:39
| 2013-07-01T20:39:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,703
|
py
|
from dna.states import state
from dna.component import Component
'''
Fixed energy input. Model should be able to find:
*mdot for fixed inlet + outlet temperature
No heat transfer investigation is done, so temperature on solar side not considered
'''
class Receiver(Component):
def nodes(self, in1, out1):
self.addInlet(in1)
self.addOutlet(out1)
return self
def calc(self, Qin):
'''
You're supposed to set input+output temperature and energy added, then you'll
get mdot. Smaller temperature difference = larger mdot = larger receiver = double-plus ungood
So try to maximise temperature difference
'''
n = self.getNodes()
n1 = n['i'][0]
n2 = n['o'][0]
n2['p'] = n1['p']
n2['y'] = n1['y']
if 'media' in n1:
n2['media'] = n1['media']
if 't' in n1 or 'q' in n1:
state(n1)
if 't' in n2 or 'q' in n2:
state(n2)
if 'mdot' in n1:
n2['mdot'] = n1['mdot']
if not 'mdot' in n1:
# n1[t] and n2[t] have to be known
n1['mdot'] = Qin / (n2['h'] - n1['h'])
n2['mdot'] = n1['mdot']
elif not 't' in n1 and not 'q' in n1:
# n2[t] and mdot have to be known
n1['h'] = n2['h'] - Qin / n1['mdot']
state(n1)
elif not 't' in n2 and not 'q' in n2:
# n1[t] and mdot have to be known
n2['h'] = n1['h'] + Qin / n1['mdot']
state(n2)
else:
# Those are known. Find Qin?
Qin = (n2['h'] - n1['h']) * n1['mdot']
print('Q_in:', Qin, 'kJ/s')
return self
|
[
"maarten@mwinter.nl"
] |
maarten@mwinter.nl
|
bb54e36610159e308311a2808f9f70ce3e1ae7a2
|
97c41326547d227e0379364e313cb6ad11689ab3
|
/bl6_rt_indel_transcripts/src/filter_rti_intersect.py
|
e1c94b92814d8954d84161858767feb39b00ac46
|
[] |
no_license
|
jmg1297/thesis
|
f890f626a335039265074089ba9d1baf4d61d74c
|
320ec6551b468e4f57f4b0e5c71992f6f9477fe3
|
refs/heads/master
| 2021-09-07T16:16:00.949568
| 2017-10-21T14:51:02
| 2017-10-21T14:51:02
| 105,117,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,662
|
py
|
'''
Filter the results from bedtools intersect -wao between a set of exons and
retorcopy indels based on reciprocal overlap as a proportion of total
length. Output a file in the bedtools intersect output format.
'''
import sqlite3
import sys
import tempfile as tf
import subprocess
def get_intersect_dict(intersect_fname):
column_names = [
"exon_chromosome", "exon_start", "exon_end",
"exon_name", "exon_score", "exon_strand",
"rti_chromosome", "rti_start", "rti_end",
"rti_name", "rti_score", "rti_strand",
"overlap"
]
intersect_dict = {}
with open(intersect_fname, 'r') as f:
for line in f:
line_dict = {k:v for k,v in zip(column_names, line.strip().split())}
if line_dict["rti_start"] == "-1":
continue
transcript_name = ".".join(line_dict["exon_name"].split(".")[0:-1])
rti_name = line_dict["rti_name"]
if transcript_name in intersect_dict:
try:
intersect_dict[transcript_name][rti_name] \
+= int(line_dict["overlap"])
except KeyError:
intersect_dict[transcript_name][rti_name] \
= int(line_dict["overlap"])
else:
intersect_dict[transcript_name] \
= {rti_name:int(line_dict["overlap"])}
return intersect_dict
def get_bed_dict(bed_fname):
bed_dict = {}
with open(bed_fname, 'r') as f:
for line in f:
line_list = line.strip().split()
name = line_list[3]
length = int(line_list[2]) - int(line_list[1])
bed_dict[name] = {"length":length, "line":line.strip()}
return bed_dict
def get_output_line(transcript, rti_name, exon_cur, rti_dict):
transcript_list = list(
exon_cur.execute(
'''
SELECT chromosome, start_coord, end_coord, transcript_id, strand
FROM transcript WHERE transcript_id=?
''',
(transcript, )).fetchone()
)
transcript_list.insert(-1,0)
transcript_line = "\t".join(map(str, transcript_list))
rti_line = rti_dict[rti_name]["line"]
return (transcript_line, rti_line)
def filter_intersects(intersect_dict, exon_db, rti_bed, overlap_cutoff):
'''
For each transcript, check whether (total overlap/total exon length) is
greater than the overlap cutoff. If it is, check the same for the
RTI it overlaps with. If this also exceeds the overlap_cutoff,
put the pair in the output list.
'''
out_intersects = []
exon_conn = sqlite3.connect(exon_db)
exon_cur = exon_conn.cursor()
rti_dict = get_bed_dict(rti_bed)
for transcript, intersects in intersect_dict.iteritems():
try:
total_exon_length = exon_cur.execute(
"SELECT exon_length FROM transcript WHERE transcript_id=?",
(transcript, )
).fetchone()[0]
except:
print(transcript)
print(exon_cur.execute(
"SELECT * FROM transcript WHERE transcript_id=?",
(transcript, )
).fetchone())
sys.exit()
for rti_name, overlap in intersects.iteritems():
rti_length = rti_dict[rti_name]["length"]
exon_prop = float(overlap)/total_exon_length
rti_prop = float(overlap)/rti_length
if exon_prop >= overlap_cutoff and rti_prop >= overlap_cutoff:
transcript_line, rti_line \
= get_output_line(transcript, rti_name, exon_cur, rti_dict)
out_intersects.append([transcript_line, rti_line, str(overlap)])
return out_intersects
def write_output_file(out_intersects, output_fname):
with tf.NamedTemporaryFile() as tmp:
for intersect in out_intersects:
tmp.write("\t".join(intersect) + "\n")
tmp.flush()
subprocess.check_output(
"sort -k1,1 -k2,2n {} > {}".format(tmp.name, output_fname),
shell=True
)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("intersect_fname")
parser.add_argument("exon_db")
parser.add_argument("rti_bed")
parser.add_argument("overlap_cutoff", type=float)
parser.add_argument("output_fname")
args = parser.parse_args()
intersect_dict = get_intersect_dict(args.intersect_fname)
out_intersects = filter_intersects(
intersect_dict, args.exon_db, args.rti_bed, args.overlap_cutoff)
write_output_file(out_intersects, args.output_fname)
|
[
"jg600@cam.ac.uk"
] |
jg600@cam.ac.uk
|
0027ba238b3441730538dffcc2107c90f098bf3f
|
e7d28c832cd7b03d3093546a8defaa7d90d20d5d
|
/perf_eval/cpu_mem_monitor.py
|
db0925c714293ad2fcc021401046e92e3f1f2b2b
|
[
"MIT"
] |
permissive
|
ntrgiang/build-vnf
|
9806f24cedb90a75ab909ca3f2f7e02371c3932a
|
313109da294fc095fb21a1e61860c3d00369f3b0
|
refs/heads/master
| 2022-12-02T09:09:17.038637
| 2020-08-12T14:16:23
| 2020-08-14T12:48:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,956
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
About: Monitor CPU and Memory Usage, used for resource monitoring of different VNF implementations
Email: xianglinks@gmail.com
Problem: The program doesn't know when receiving starts and ends, so it's
diffcult to perfrom event based measurement.
"""
import argparse
import sched
import sys
import time
import psutil
CPU_USAGE = list()
GET_CPU_PERIOD_S = 0.5
LOG_CPU_PERIOD_S = 30 * GET_CPU_PERIOD_S
CPU_LOG_FILE = "cpu_usage.csv"
def find_proc(proc_name):
for p in psutil.process_iter():
if p.name() == proc_name:
print("[INFO] Find proc for %s, pid: %d" % (p.name(), p.ppid()))
return p
else:
return None
def get_cpu_usage(scheduler, proc):
CPU_USAGE.append(proc.cpu_percent())
scheduler.enter(GET_CPU_PERIOD_S, 1, get_cpu_usage, argument=(scheduler, proc))
def log_cpu_usage(scheduler):
# Log CPU usage, SHOULD be fast
with open(CPU_LOG_FILE, "a+") as log_file:
text = ",".join(map(str, CPU_USAGE)) + ","
log_file.write(text)
CPU_USAGE.clear()
# Add the task in the queue
scheduler.enter(LOG_CPU_PERIOD_S, 2, log_cpu_usage, argument=(scheduler,))
def stop_monitor():
print("Stop monitoring. Exit...")
sys.exit(0)
def main():
# Parse args
parser = argparse.ArgumentParser(
description="Resource monitoring for VNF.",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"-p", type=str, default="udp_append_ts", help="Name of to be monitored process."
)
parser.add_argument("-t", type=float, default=5, help="Monitoring time in seconds.")
parser.add_argument(
"-f", type=str, default="./cpu_usage.csv", help="Path for the CSV file"
)
args = parser.parse_args()
VNF_PROC_NAME = args.p
MTR_TIME = args.t
global CPU_LOG_FILE
CPU_LOG_FILE = args.f
# Wait until the VNF proc is running
while True:
vnf_p = find_proc(VNF_PROC_NAME)
if vnf_p:
break
print("VNF proc: %s is not running, keep waiting..." % VNF_PROC_NAME)
time.sleep(0.5)
time.sleep(1) # Wait for the init procs
# Start monitoring CPU usage
print("[INFO] Start monitoring CPU usage of proc: %s..." % VNF_PROC_NAME)
# MARK: Currently not so optimized for automatic tests
with open(CPU_LOG_FILE, "a+") as log_file:
log_file.write("\n")
scheduler = sched.scheduler(timefunc=time.time, delayfunc=time.sleep)
scheduler.enter(GET_CPU_PERIOD_S, 1, get_cpu_usage, argument=(scheduler, vnf_p))
scheduler.enter(LOG_CPU_PERIOD_S, 2, log_cpu_usage, argument=(scheduler,))
scheduler.enter(MTR_TIME, 3, stop_monitor)
try:
scheduler.run(blocking=True)
except KeyboardInterrupt:
print("KeyboardInterrupt detected, exit the program...")
sys.exit(0)
if __name__ == "__main__":
main()
|
[
"xianglinks@gmail.com"
] |
xianglinks@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.