blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d3fefa4ee994579db02f2ad3e22422232f0820dd
|
65000b7417776881d2438bffc077803a857fcdaa
|
/Email_App/Email/tests.py
|
72f84ce3b3044d1d8a4e643b5bead1b48ac4a20e
|
[] |
no_license
|
achugh95/Email_App
|
d19d0b3df1432c1241fae44d6a8a9cc5b29c8981
|
5797ba38df6d779507d90156738e72c8031fb905
|
refs/heads/master
| 2022-05-09T06:34:44.292339
| 2021-09-24T07:52:50
| 2021-09-24T07:52:50
| 222,181,715
| 0
| 0
| null | 2022-04-22T22:44:10
| 2019-11-17T01:30:44
|
Python
|
UTF-8
|
Python
| false
| false
| 286
|
py
|
from django.test import TestCase
from django.urls import reverse, resolve
# # Create your tests here.
class TestUrls(TestCase):
# Send Email
def test_email(self):
url = reverse('Send Email')
# print(url)
assert resolve(url).view_name == 'Send Email'
|
[
"Anshul.Chugh@hitachiconsulting.com"
] |
Anshul.Chugh@hitachiconsulting.com
|
3c7257d79ae2404a17be3a11ab128ed77c307cfe
|
64a9d58b7dea3678d9eb5e2e9185a3d8a44e441b
|
/AXF/AXF/settings.py
|
2c0e8c70d63f069c7bae69944da6ce45ff5bf173
|
[] |
no_license
|
Mrk2239/KProject
|
f20ec99bd4eb44015c07b120d882cf857a2eaa7f
|
8a16a9ef5dac4b608ba8188b7a9c715d570b4a84
|
refs/heads/master
| 2022-12-10T00:03:37.589071
| 2018-08-26T08:13:23
| 2018-08-26T08:13:23
| 146,062,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,352
|
py
|
"""
Django settings for AXF project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
'''
全局设置
'''
import os
# django默认的工程绝对路径
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# django默认的生成的全局秘钥
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^^!^gqh+ol=d1n8p976ll+(@+g04&_4*0=(0%@!-ul%2=9ge_c'
# DEBUG模式,默认True,部署生产环境时置为False
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# 允许所有IP访问
ALLOWED_HOSTS = ["*"]
# Application definition
# 配置应用
INSTALLED_APPS = [
# 默认的django应用
# 站点管理
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 自定义的应用
'App',
]
# django默认的中间件
MIDDLEWARE = [
# django默认的中间件
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# django默认的路由文件
ROOT_URLCONF = 'AXF.urls'
# django默认的模板配置
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 模板路径
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# django默认的WSGI配置
WSGI_APPLICATION = 'AXF.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# django默认的数据库配置
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# 配置数据库
DATABASES = {
'default': {
# 数据库引擎
'ENGINE': 'django.db.backends.mysql',
# 数据库名称
'NAME': 'axf',
# 账号和密码
'USER': 'root',
'PASSWORD': '123456',
# IP和端口
'HOST': 'localhost',
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
# django默认的密码校验器
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
# django默认的国际化配置
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
# django默认的静态文件访问路由
STATIC_URL = '/static/'
# 定义上传文件的路径
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/uploads')
# 自定义静态文件的位置
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# 回归测试
if __name__ == '__main__':
print(STATICFILES_DIRS)
pass
|
[
"839916251@qq.com"
] |
839916251@qq.com
|
94e46fe72a7cb0cef2f91a4ff00ccf807435b804
|
02a23d5e3e89fde40985fd1514274d806c26c4bf
|
/venv/Scripts/pip3.7-script.py
|
8d637258051ce302de878085fbb0a892f573f52a
|
[] |
no_license
|
abbosjon-kudratov/pyLoremIpsum
|
e4025b185eee70907c242408c7eaf0d37524525e
|
05f7384625d43877f73988cf7d162b5320cf4118
|
refs/heads/master
| 2021-06-21T00:33:58.155927
| 2021-06-16T12:34:57
| 2021-06-16T12:34:57
| 193,081,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
#!C:\Users\AK\Desktop\untitled\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
|
[
"abbosjon.kudratov@gmail.com"
] |
abbosjon.kudratov@gmail.com
|
6e4523ea31c0b5751e5eb3e0856a9d49e93e48af
|
84b0bccd66eda3ab0f9ee9f11d566791234772bc
|
/annabot.py
|
3ae9e5bafc048abd67157758fd614c9862810428
|
[] |
no_license
|
prositen/boten-anna
|
a6a57c7340aba3b273badd31f03faccb38122d6b
|
b731202fd507d52df2055c92e675c5140b815dca
|
refs/heads/master
| 2021-01-10T04:40:28.963021
| 2019-10-25T10:59:01
| 2019-10-25T10:59:01
| 53,566,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
from urllib.error import HTTPError
from slackbot.bot import Bot
__author__ = 'anna'
def main():
try:
bot = Bot()
bot.run()
except HTTPError as e:
print(e.msg)
if __name__ == '__main__':
main()
|
[
"anna.holmgren@wigetmedia.com"
] |
anna.holmgren@wigetmedia.com
|
bab9b0281dfa8ebcc3593333c0c723ce02e2b0a8
|
9a3a2727c33dbf42a690df7fbb226e0fabb45fab
|
/sem_7/zi/lab_3/main.py
|
acc54867fc138761a5afeee996e2360c3c14f9c6
|
[] |
no_license
|
sloppysloppy1/bmstu
|
4979f1a0d7d08740c8b4bbb320d9df4a72271ad0
|
862fa10dc41a9a0d5d65e3f6c88d2a610082ede8
|
refs/heads/master
| 2023-08-05T00:52:17.830147
| 2021-09-13T15:29:41
| 2021-09-13T15:29:41
| 382,121,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,343
|
py
|
from misc import sbox, inv_sbox, rcon, start
from struct import pack
from numpy import empty
Nb = 4 # число столбцов
Nk = 4 # длина ключа
Nr = 10 # кол-во раундов шифрования
def subbytes(state, mode = 'encode'):
if mode == 'encode':
for i in range(Nb):
for j in range(Nb):
state[i][j] = sbox[state[i][j]]
else:
for i in range(Nb):
for j in range(Nb):
state[i][j] = inv_sbox[state[i][j]]
return state
def shiftrows(state, mode = 'encode'):
if mode == 'encode':
for i in range(Nb):
if i != 0:
state[i] = state[i][1:] + state[i][:1]
else:
for i in range(Nb):
if i != 0:
state[i] = state[i][-1:] + state[i][:-1]
return state
def ml(number):
if number < 0x80:
number = (number << 1)
else:
number = (number << 1) ^ 0x1b
return number % 0x100
def ml3(number):
return ml(number) ^ number
def ml9(number):
return ml(ml(ml(number))) ^ number
def mlb(number):
return ml(ml(ml(number))) ^ ml(number) ^ number
def mld(number):
return ml(ml(ml(number))) ^ ml(ml(number)) ^ number
def mle(number):
return ml(ml(ml(number))) ^ ml(ml(number)) ^ ml(number)
def mixcolumns(state, mode = 'encode'):
if mode == 'encode':
fixed = [['02','03','01','01'], ['01','02','03','01'],
['01','01','02','03'], ['03','01','01','01']]
for i in range(Nb):
state_0 = (ml(state[0][i]) ^ ml3(state[1][i]) ^
state[2][i] ^ state[3][i])
state_1 = (ml(state[1][i]) ^ ml3(state[2][i]) ^
state[0][i] ^ state[3][i])
state_2 = (ml(state[2][i]) ^ ml3(state[3][i]) ^
state[0][i] ^ state[1][i])
state_3 = (ml(state[3][i]) ^ ml3(state[0][i]) ^
state[2][i] ^ state[1][i])
else:
for i in range(Nb):
state_0 = (mle(state[0][i]) ^ mlb(state[1][i]) ^
mld(state[2][i]) ^ ml9(state[3][i]))
state_1 = (mle(state[1][i]) ^ mlb(state[2][i]) ^
mld(state[3][i]) ^ ml9(state[0][i]))
state_2 = (mle(state[2][i]) ^ mlb(state[3][i]) ^
mld(state[0][i]) ^ ml9(state[1][i]))
state_3 = (mle(state[3][i]) ^ mlb(state[0][i]) ^
mld(state[1][i]) ^ ml9(state[2][i]))
state[0][i] = state_0
state[1][i] = state_1
state[2][i] = state_2
state[3][i] = state_3
return state
def addroundkey(state, roundkey):
for i in range(Nb):
for j in range(Nb):
state[i][j] ^= roundkey[i][j]
return state
def keyexpansion(key):
keyschedule = empty([4, 4 * 11], dtype=object)
for i in range(Nb):
for j in range(Nb):
keyschedule[i][j] = key[i][j]
for j in range (Nb, Nb * (Nr + 1)):
for i in range (Nb):
if (j % Nb == 0):
tmp = [keyschedule[row][j-1] for row in range(1, Nb)]
tmp.append(keyschedule[0][j-1])
keyschedule[i][j] = (sbox[tmp[i]] ^ keyschedule[i][j - Nb]
^ rcon[i][j // Nb - 1])
else:
keyschedule[i][j] = (keyschedule[i][j - 1]
^ keyschedule[i][j - Nb])
return keyschedule
def encode(state_start, key_start):
state = [[0] * Nb for i in range(Nb)]
key = [[0] * Nb for i in range(Nb)]
if (len(key_start) < 16):
for i in range(len(key_start), 16):
key_start += b'\x00'
if (len(state_start) < 16):
for i in range(len(state_start), 16):
state_start += b'\x00'
for i in range(Nb):
for j in range(Nb):
state[i][j] = state_start[i + Nb * j]
key[i][j] = key_start[i + 4 * j]
keyschedule = keyexpansion(key)
state = addroundkey(state, keyschedule[:, 0:Nb])
for i in range(1, Nr + 1):
tmp = i * Nb
curr_key = keyschedule[:, tmp:tmp + Nb]
state = subbytes(state)
state = shiftrows(state)
if i != 10:
state = mixcolumns(state)
state = addroundkey(state, curr_key)
return state
def decode(state_start, key_start):
state = [[0] * Nb for i in range(Nb)]
key = [[0] * Nb for i in range(Nb)]
if (len(key_start) < 16):
for i in range(len(key_start), 16):
key_start += b'\x00'
if (len(state_start) < 16):
for i in range(len(state_start), 16):
state_start += b'\x00'
for i in range(Nb):
for j in range(Nb):
state[i][j] = state_start[i + Nb * j]
key[i][j] = key_start[i + 4 * j]
keyschedule = keyexpansion(key)
tmp = Nr * Nb
state = addroundkey(state, keyschedule[:, tmp:tmp + Nb])
for i in range(Nr - 1, -1, -1):
tmp = i * Nb
curr_key = keyschedule[:, tmp:tmp + Nb]
state = shiftrows(state, 'decode')
state = subbytes(state, 'decode')
state = addroundkey(state, curr_key)
if i != 0:
state = mixcolumns(state, 'decode')
return state
if __name__ == "__main__":
filename_in = input("input filename: ")
file = open(filename_in, 'rb')
temp_key = input("input key: ")
key = bytes(temp_key, 'utf-8')
filename_out = input("input output filename: ")
file_out = open(filename_out, 'wb')
option = input("encode or decode: ")
while True:
temp_char = file.read(16)
if not temp_char:
break
if option == 'encode':
result = encode(temp_char, key)
else:
result = decode(temp_char, key)
for j in range(Nb):
for i in range(Nb):
file_out.write(pack("B", result[i][j]))
file.close()
file_out.close()
|
[
"tetetey11235@gmail.com"
] |
tetetey11235@gmail.com
|
60718ef720815b3f844367318a5baacf79a15918
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_landladies.py
|
6672cc8cfdc66753c69f06fa6a2d1f144b459e2a
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from xai.brain.wordbase.nouns._landlady import _LANDLADY
#calss header
class _LANDLADIES(_LANDLADY, ):
def __init__(self,):
_LANDLADY.__init__(self)
self.name = "LANDLADIES"
self.specie = 'nouns'
self.basic = "landlady"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
b1c35e0fb55c07bf703ad14443790084425ce3b6
|
76c561562bbcd0b17b50d0ee5de6f7c65aa86f3f
|
/testCase/test_resumeDownloads.py
|
9bcf1174b1d5fa9a67ab0dbd0c7a157bd79f7248
|
[] |
no_license
|
Root9512/APITest
|
f9474032858c71649365836fa29842be0a37356a
|
b1e10d77d42f67c53deb921c5cfae2b7bbb55e85
|
refs/heads/master
| 2020-04-08T07:52:51.905016
| 2018-11-26T11:09:50
| 2018-11-26T11:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 960
|
py
|
# encoding:utf-8
'''
Created on 2018年11月26日
@author: chengchen
'''
import unittest
import requests
from requests.exceptions import HTTPError
import json
from common import urlParam, paramete, statusCode
class TestAPI(unittest.TestCase):
def setUp(self):
print("start test")
pass
def tearDown(self):
print("end test")
pass
class test_resumeDownloads(TestAPI):
def test_resumeDownloads(self):
ResumeDownloads_url = urlParam.urlParam.resumeDownloads_url
form_data = paramete.paramete.form_resumeDownloads
headers = {'content-type': 'application/json'}
if 'true' in form_data.values():
print "数据完整性如下:"
r = requests.post(url=ResumeDownloads_url, data=json.dumps(form_data), headers=headers)
print r.text
status = statusCode.codeStatus(r)
print(status)
if __name__ == '__main__':
unittest.main()
|
[
"986107670@qq.com"
] |
986107670@qq.com
|
7b05e6cce3777136da14b1da6f218feb04923a1e
|
f82e897eeea8a6ad8d6f0a4f3b662ea4b7b2e6b5
|
/howmanymiles/tests.py
|
d341bdf306eb2d3435fec3782310b598753a917b
|
[] |
no_license
|
dellsystem/estimatemymiles.com
|
44e447599e9f0406674ffa6006a3948f3733e35c
|
a45c03c96020d3427810029ed82ad2d033e0a8d0
|
refs/heads/master
| 2021-01-17T06:04:37.223443
| 2014-05-06T21:11:33
| 2014-05-06T21:11:33
| 14,461,991
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
from django.test import TestCase
from howmanymiles.models import Alliance, Airline, FareClass, MileageMultiplier
class AirlineTestCase(TestCase):
fixtures = ['test/airlines.json']
def test_get_qualifying_names(self):
SJ = Airline.objects.get(pk='SJ')
SJ_miles_name = 'Sleaziness Qualifying Miles (SQM)'
SJ_segments_name = 'Sleaziness Qualifying Segments (SQS)'
self.assertEqual(SJ.get_qualifying_miles_name(), SJ_miles_name)
self.assertEqual(SJ.get_qualifying_segments_name(), SJ_segments_name)
class MileageMultiplierTestCase(TestCase):
fixtures = ['test/airlines.json', 'test/mileage_multipliers.json']
def test_get_num_miles(self):
# Qualifying miles: 25%; regular miles: 50%; minimum miles: 15
m = MileageMultiplier.objects.get(pk=1)
normal_miles = m.get_num_miles(100)
qualifying_miles = m.get_num_miles(100, is_qualifying=True)
minimum_miles = m.get_num_miles(10)
minimum_qualifying_miles = m.get_num_miles(10, is_qualifying=True)
self.assertEqual(normal_miles, 50)
self.assertEqual(qualifying_miles, 25)
self.assertEqual(minimum_miles, 15)
self.assertEqual(minimum_qualifying_miles, 15)
|
[
"ilostwaldo@gmail.com"
] |
ilostwaldo@gmail.com
|
e7c106444bd90e1df4644957dca453b04db6d660
|
da92cde8a4bd0f50595fd35952303367aec12352
|
/loggingconf.py
|
bfacbec77bea82214aa9c121f4280583e14ce3d6
|
[
"MIT"
] |
permissive
|
ocworld/loggingconf
|
91e17b352ccbf42dd68c3676e078d71b93b84928
|
5e218698d1790068adee65ddf9fc16a176819154
|
refs/heads/master
| 2020-05-06T13:48:33.824015
| 2019-04-08T14:08:36
| 2019-04-08T14:08:36
| 180,157,508
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
# -*- coding: utf-8 -*-
import os
import logging.config
import yaml
import json
import io
def file_config(fname, encoding='utf-8'):
fmt2func = {'.yaml': _yaml_config,
'.yml': _yaml_config,
'.json': _json_config,
'.ini': _ini_config}
_, ext = os.path.splitext(fname)
fmt2func[ext](fname, encoding)
def _yaml_config(fname, encoding='utf-8'):
with io.open(fname, mode='r', encoding=encoding) as f:
data = yaml.safe_load(f)
logging.config.dictConfig(data)
def _json_config(fname, encoding='utf-8'):
with io.open(fname, mode='r', encoding=encoding) as f:
data = json.load(f)
logging.config.dictConfig(data)
def _ini_config(fname, *unused):
del unused
logging.config.fileConfig(fname)
|
[
"ocworld@gmail.com"
] |
ocworld@gmail.com
|
7992c91892339efa0474a1d3662b2f8254f39e87
|
668939e86200a534ef6953f3ee4e0c484c5867f2
|
/Classification.py
|
2798c0c3ed0335d60fb9d1dfe88034fed31005b0
|
[] |
no_license
|
x2ever/Word-Classification
|
836a4d8e8d8a9c2cce8dc84d983bd6a2eead531d
|
2d3c087307601639213b9bc713746ce8163f2acf
|
refs/heads/master
| 2020-06-03T18:01:19.434530
| 2019-06-13T05:57:53
| 2019-06-13T05:57:53
| 191,675,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
RAMDOM_SEED = 999
class Classification():
def __init__(self, data):
self.X_train, self.X_test, self.y_train, self.y_test = data
def classify(self, model):
model.fit(self.X_train, self.y_train)
y_pred = model.predict(self.X_test)
print('Accuracy: {}'.format(accuracy_score(self.y_test, y_pred)))
def Kmeans(self):
n_clusters = len(np.unique(self.y_train))
clf = KMeans(n_clusters = n_clusters, random_state=RAMDOM_SEED)
clf.fit(self.X_train)
y_labels_train = clf.labels_
y_labels_test = clf.predict(self.X_test)
self.X_train = y_labels_train[:, np.newaxis]
self.X_test = y_labels_test[:, np.newaxis]
return self
if __name__ == "__main__":
Classification(load_digits())\
.Kmeans()\
.classify(model=SVC())
|
[
"x2ever@naver.com"
] |
x2ever@naver.com
|
1356deffbc44010a09de412170f5f02c8eb78359
|
86dedea07b16b11f6804db5603a882783ac021ba
|
/config.py
|
05029a22cdc88d5f05b09b7a8aba54f5328cb7ed
|
[] |
no_license
|
zezhou-zhang/Linux_FileSystem_Silmulation
|
4c5543eff56cd445e9b9efc0568e56dee436e4b2
|
31ced15c1754d559eab2af933f54544cdefdfb82
|
refs/heads/master
| 2020-12-14T06:45:58.337968
| 2020-01-18T16:19:08
| 2020-01-18T16:19:08
| 234,672,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
#Total blocks must be greater than block size
#Inode size must be less than Block size
#Min size of file name is 5
'''
TOTAL_NO_OF_BLOCKS = 1024
BLOCK_SIZE = 512
MAX_NUM_INODES = 64
INODE_SIZE = 256
MAX_FILE_NAME_SIZE = 16
'''
TOTAL_NO_OF_BLOCKS = 1024
BLOCK_SIZE = 256
MAX_NUM_INODES = 32
INODE_SIZE = 128
MAX_FILE_NAME_SIZE = 8
|
[
"noreply@github.com"
] |
zezhou-zhang.noreply@github.com
|
ef29c27637886d0cef2b253b22a2f9354fccdef6
|
663de8810049c93e1c233fc1f6eb6d1adbd720b1
|
/learn-python-the-hard-way/Exercise 29 What If/ex29.py
|
e6a906215a2a2a8797df5fbcde6bba151fbfb256
|
[] |
no_license
|
raindrop-aqua/learn-python
|
781947407261237649cdb874f9b43e6d22397c68
|
ebc0c07e740d731de7923720db29d4bfaa93ecc9
|
refs/heads/master
| 2020-04-09T11:10:50.995887
| 2014-11-16T11:02:34
| 2014-11-16T11:02:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
people = 20
cats = 30
dogs = 15
if people < cats:
print "Too many cats! The world is doomed!"
if people > cats:
print "Not meny cats! The world is saved!"
if people < dogs:
print "The world is drooled on!"
if people > dogs:
print "The world is dry!"
dogs += 5
if people >= dogs:
print "People are greater than or equal to dogs."
if people <= dogs:
print "People are less than or equal to dogs."
if people == dogs:
print "People are dogs."
|
[
"m-atsumi@mail.apsys.co.jp"
] |
m-atsumi@mail.apsys.co.jp
|
561ce50e54e161aed1113ea400eb18b93aa2b7c5
|
cce1e235c2c8e58d83af6dbadeb471ca62b710a1
|
/codewars/6kyu/sum_of_digits_digital_root.py
|
07fbe00ef5d894252cf5835ebb0f854f28aeaeb7
|
[] |
no_license
|
SebastianThomas1/coding_challenges
|
6b51ce046b458c44db809687b6809d16d066566f
|
bd3bc6be7a975b6255e4b2198c953d56bd74e75a
|
refs/heads/master
| 2023-03-03T00:18:00.147369
| 2021-02-08T21:52:02
| 2021-02-08T21:52:02
| 336,688,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
# Sebastian Thomas (coding at sebastianthomas dot de)
# https://www.codewars.com/kata/541c8630095125aba6000c00
#
# Sum of Digits / Digital Root
def digital_root(n):
return n if n < 10 else digital_root(sum(map(int, str(n))))
if __name__ == '__main__':
print(digital_root(16)) # 7
print(digital_root(456)) # 6
|
[
"sigma.online@gmx.de"
] |
sigma.online@gmx.de
|
e54cd2147be5a55ad55054b01e9133ba8ec8168f
|
58ca8ad15949b96ae566fd598dbbd8e039c453e2
|
/django_04_external_libs/django_04_external_libs/settings.py
|
5b7df9a61a83cb1b487aa4b67b4eef376fe39ccf
|
[
"MIT"
] |
permissive
|
javiromero/pycones2017
|
28fae3c4dacf154fdeafe74059793620b2a389e3
|
a252aed4027ee0fee524ec85c0ee9b9e8970573e
|
refs/heads/master
| 2021-07-05T06:52:13.663499
| 2017-09-25T07:59:22
| 2017-09-25T07:59:22
| 104,718,986
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,631
|
py
|
"""
Django settings for django_04_external_libs project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z0k5sse%47n5@e6xnhoi1b683r6c#5do!7o$*a9+3z)p)*%&vr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_04_external_libs.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_04_external_libs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# CONFIGURACION DE LOGGING POR DEFECTO EN DJANGO
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'django.server': {
'()': 'django.utils.log.ServerFormatter',
'format': '[%(server_time)s] %(message)s',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'django.server': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'django.server',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
},
},
'loggers': {
'django': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
},
'django.server': {
'handlers': ['django.server'],
'level': 'INFO',
'propagate': False,
},
},
}
########################## AÑADIR CONFIGURACION A ROOT ###########################
'root': {
'level': 'ERROR',
'handlers': ['console'],
}
}
|
[
"javi.azuaga@gmail.com"
] |
javi.azuaga@gmail.com
|
51a344220414e865543db0af532016ae778992ae
|
702574ec18a35258ce1a028c8ecf3dd91197b514
|
/triangular_lattice/correlation_analyze.py
|
dbed0d8a4c9f862fd55ca1d25c8b3605cf27aaec
|
[
"MIT"
] |
permissive
|
ssh0/growing-string
|
4d5096225e4478913c654646d664f59d4bf0e88b
|
2e43916e91157dfb4253775149b35ec9d81ef14d
|
refs/heads/master
| 2020-04-12T03:10:44.130839
| 2017-03-24T07:51:44
| 2017-03-24T07:51:44
| 56,047,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,236
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-10-13
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
if __name__ == '__main__':
fig, ax = plt.subplots()
result_data_paths = [
## frame=1000, before modified Lp, strings=100, [0, 4]
# "./results/data/correlation/beta=0.00_170125_012601.npz",
# "./results/data/correlation/beta=2.00_170125_013838.npz",
# "./results/data/correlation/beta=4.00_170125_015505.npz",
# "./results/data/correlation/beta=6.00_170125_021432.npz",
# "./results/data/correlation/beta=8.00_170125_023702.npz",
# "./results/data/correlation/beta=10.00_170125_025938.npz",
## frame=1000, after modified Lp, strings=100, [0, 0]
"./results/data/correlation/beta=0.00_170125_171427.npz",
"./results/data/correlation/beta=2.00_170125_174836.npz",
"./results/data/correlation/beta=4.00_170125_182723.npz",
"./results/data/correlation/beta=6.00_170125_191135.npz",
"./results/data/correlation/beta=8.00_170125_200849.npz",
"./results/data/correlation/beta=10.00_170125_210225.npz",
"./results/data/correlation/beta=12.00_170125_193941.npz",
"./results/data/correlation/beta=14.00_170125_203620.npz",
"./results/data/correlation/beta=16.00_170125_212026.npz",
"./results/data/correlation/beta=18.00_170125_225012.npz",
"./results/data/correlation/beta=20.00_170125_233341.npz",
]
for result_data_path in result_data_paths:
# for beta in [0., 5., 10., 15., 20.]:
# for beta in [float(i) for i in range(18)]:
# result_data_path = "./results/data/correlation/beta=%2.2f_161013_141137.npz" % beta
# result_data_path = "./results/data/correlation/beta=%2.2f_161013_154842.npz" % beta
# result_data_path = "./results/data/correlation/beta=%2.2f_161015_155449.npz" % beta
# result_data_path = "./results/data/correlation/beta=%2.2f_161018_220923.npz" % beta
# result_data_path = "./results/data/correlation/beta=%2.2f_161122_152015.npz" % beta
data = np.load(result_data_path)
beta = data['beta']
num_of_strings = data['num_of_strings']
L = data['L']
frames = data['frames']
Lp = data['Lp']
Cs = data['Cs']
ax.plot(Lp, Cs, '.',
color=cm.viridis(float(beta) / (2 * len(result_data_paths))),
label=r'$\beta = %2.2f$' % beta)
# グラフから,偶数番目と奇数番目で値が局在することが分かる。
# 実際に偶数番目と奇数番目を分けて表示する。
# Lp_even, Lp_odd = Lp[::2], Lp[1::2]
# Cs_even, Cs_odd = Cs[::2], Cs[1::2]
# ax.plot(Lp_even, Cs_even, '.', label=r'(even) $\beta = %2.2f$' % beta)
# ax.plot(Lp_odd, Cs_odd, '.', label=r'(odd) $\beta = %2.2f$' % beta)
fig.subplots_adjust(right=0.8)
ax.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
ax.set_xlabel('Path length')
ax.set_ylabel('Correlation of the vectors')
ax.set_title('Correlation of the vectors')
ax.set_ylim(ax.get_ylim()[0], 1.001)
plt.show()
|
[
"fuji101ijuf@gmail.com"
] |
fuji101ijuf@gmail.com
|
af050aadec9261133daf6994fe057fcb40d1bccc
|
2cd62248839ab346e688d1fa7e60ac6656a1a809
|
/apps/cart/context_processors.py
|
cdacdca08013dd60f42595ad0e8c5c9f6fcc6e35
|
[] |
no_license
|
pbpoon/dda
|
5899c5b823f7132075089385e0fd23440cc17c59
|
b0b699dab3cc8efef2a91fb0706adcca130e0911
|
refs/heads/master
| 2022-12-08T21:46:40.304772
| 2019-04-09T05:02:37
| 2019-04-09T05:02:37
| 160,053,343
| 0
| 0
| null | 2022-12-08T01:30:58
| 2018-12-02T14:02:38
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 159
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by pbpoon on 2018/11/8
from .cart import Cart
def cart(request):
return {'cart': Cart(request)}
|
[
"pbpoon@gmail.com"
] |
pbpoon@gmail.com
|
d5afc0eab559fa7d6e948a07ff38d4fc0877bb65
|
0b9fa16ec8ff2d006d67b3d6ef07bc2aca64bf43
|
/timeline_tweets.py
|
fff5534035d7bb5cc983f3bb741570cf896dc2cd
|
[] |
no_license
|
pratik-anurag/Streaming-Tweets-Dashboard
|
4f8e0b1eae2fa3f311fdb727ee7af6ba9a80c9ab
|
836fbdd2bd2d512cc8183cf35c029be2d945e891
|
refs/heads/master
| 2020-11-24T23:08:10.917576
| 2020-02-13T07:05:21
| 2020-02-13T07:05:21
| 228,378,884
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
import tweepy
import csv
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
def get_all_tweets(screen_name):
alltweets = []
new_tweets = api.user_timeline(screen_name = screen_name,count=200)
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
while len(new_tweets) > 0:
# print("getting tweets before %s" % (oldest))
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
# print("...%s tweets downloaded so far" % (len(alltweets)))
outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in alltweets]
with open('%s_tweets.csv' % screen_name, 'w') as f:
writer = csv.writer(f)
writer.writerow(["id","created_at","text"])
writer.writerows(outtweets)
pass
def followers(screen_name):
allfollowers =[]
followers = api.followers(screen_name = screen_name,count=200)
allfollowers.extend(followers)
for i in range(0,len(allfollowers)):
print(allfollowers[i].name)
followers("Pratikanurag")
|
[
"noreply@github.com"
] |
pratik-anurag.noreply@github.com
|
57316fb2b7eb230937a40de9de0ec04b4ed70bbe
|
db2c985d72a05cff4557b63101376d7eea953984
|
/test_uppgift39.py
|
4ef15a5e75fc73973bdd2d1d50afc9670d11de77
|
[] |
no_license
|
rupertpupkin-coder/kyh-practice
|
dbbdddfc0068c5ac84157a16778fa5e2807fc362
|
673c36e777f8ae36d330c89d903e1072002cbcfe
|
refs/heads/master
| 2023-01-06T03:17:40.607749
| 2020-10-14T12:46:52
| 2020-10-14T12:46:52
| 291,699,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
import uppgift39
def test_maxiumum_number_a():
a = 123
b = 33
c = 22
assert uppgift39.maximum(a, b, c) == 123
def test_maxiumum_number_b():
a = 1
b = 3
c = 2
assert uppgift39.maximum(a, b, c) == 3
def test_maxiumum_number_c():
a = 1
b = 2
c = 22
assert uppgift39.maximum(a, b, c) == 22
def test_add_all_numbers_a():
expected = 300
got = uppgift39.add([100, 99, 101])
assert expected == got
def test_add_all_numbers_b():
expected = 5
got = uppgift39.add([2, 1, 2])
assert expected == got
def test_add_all_numbers_c():
expected = 14
got = uppgift39.add([7, 6, 1])
assert expected == got
def test_product_numbers_a():
expected = 10
got = uppgift39.multiply([5, 2, 1])
assert expected == got
def test_product_numbers_b():
expected = 20
got = uppgift39.multiply([5, 2, 2])
assert expected == got
def test_product_numbers_c():
expected = 100
got = uppgift39.multiply([50, 2, 1])
assert expected == got
|
[
"jacob.nordlund@student.kyh.se"
] |
jacob.nordlund@student.kyh.se
|
821b778589dea12a5a23150e4651c7036c3f8e25
|
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
|
/.history/3-lists_20200405001050.py
|
a8cc662d2b4e4efa3fa7a450b46a3f2cf65c568d
|
[] |
no_license
|
CaptainStorm21/Python-Foundation
|
01b5fbaf7a913506518cf22e0339dd948e65cea1
|
a385adeda74f43dd7fb2d99d326b0be23db25024
|
refs/heads/master
| 2021-05-23T01:29:18.885239
| 2020-04-23T19:18:06
| 2020-04-23T19:18:06
| 253,171,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# A List is a collection which is ordered and changeable. Allows duplicate members.
numbers = [ 3, 23, 111, 3423, 352]
print(numbers)
print(type(numbers))
#using a constructor
listNum = list (( 213, 11, 342, 2342, 55432))
print(listNum)
fruits = ['Apples', 'Oranges', 'Grapes', 'Pears']
print(fruits[2])
#Get len
print(len(fruits))
#append to the list
fruits.append('Mango')
print(fruits)
#remove from the list
fruits.remove('Grapes')
print(fruits)
#insert into a spot
fruits.insert(2, 'Coconut')
print(fruits)
#remove from a spot
fruits.pop(4)
print(fruits)
#reverse list
fruits.reverse
print(fruits)
|
[
"tikana4@yahoo.com"
] |
tikana4@yahoo.com
|
da8128a5008aa88db4003c335a46badc556ef918
|
53abaa06681e773b69d07ad88a29eb83a1b3a42a
|
/247. Strobogrammatic Number II.py
|
647c4d7b744a0de87ccf6c389030ed0635e10845
|
[] |
no_license
|
wym1993/Leetcode
|
991c38f8c9f69cb79b6840d077dc961b6dc89a1e
|
2316300ce52cd8e2cead5be0310ec6de80a8d357
|
refs/heads/master
| 2021-01-20T05:13:59.390724
| 2017-11-15T01:16:56
| 2017-11-15T01:16:56
| 89,764,400
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
class Solution(object):
def findStrobogrammatic(self, n):
result = []
hash = {'0':'0', '1':'1', '6':'9', '8':'8', '9':'6'}
self.helper(result, [None]*n, 0, n-1, hash)
return result
def helper(self, result, item, start, end, hash):
if start > end:
result.append(''.join(item))
return
for key in hash:
if start == end and key in ('6','9'):
continue
if start != end and start == 0 and key == '0':
continue
item[start], item[end] = key, hash[key]
self.helper(result, item, start+1, end-1, hash)
|
[
"wang532@illinois.edu"
] |
wang532@illinois.edu
|
f9c657605b595502917fd601bcc709a4b0304b0d
|
57dea986afe3bb4e4eb1b20340399648c6fd647e
|
/lab2.py
|
1e4a7ea8964a0b1df09f3390a4b40cd16e678df1
|
[] |
no_license
|
clawdebob/selenium_labs
|
24f7271cd249ebb057328080033248dda8727743
|
8422246ba5573b09b8ce6174dad32cf93cf30246
|
refs/heads/master
| 2021-03-04T14:07:14.853284
| 2020-03-09T19:04:16
| 2020-03-09T19:04:16
| 246,040,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
import unittest
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class Labs(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def test_convert_tables(self):
writer = pd.ExcelWriter('output/output.xlsx', engine='xlsxwriter')
driver = self.driver
main_link = "http://ab-w.net/HTML/table.php"
tag = "table"
driver.get(main_link)
self.assertIn("Создание таблиц в HTML", driver.title)
WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.TAG_NAME, tag))
)
tables = driver.find_elements_by_css_selector("table[border]")
for idx, element in enumerate(tables):
source = "<table>" + element.get_attribute("innerHTML") + "</table>"
data = pd.read_html(source)
data[0].to_excel(writer, sheet_name = "Table " + str(idx + 1))
writer.save()
if __name__ == "__main__":
unittest.main()
|
[
"mok938@gmail.com"
] |
mok938@gmail.com
|
640181fd94a01f424dc43dbd9ddf797593d0b7a9
|
ddeceb24e7fb0335a769dfc537b254f8e7da8c0b
|
/pyramid_wiring/scope.py
|
ca1ea358ec09d2a401cfd291d8ffbd0422c4abeb
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
veeti/pyramid_wiring
|
1aa9d0787134671d9f2c60b4f5a0c0bb3e1a4310
|
517a2366b9816fa32531376ba26da0a04c309be0
|
refs/heads/master
| 2021-01-10T02:10:34.017416
| 2015-06-02T14:55:15
| 2015-06-02T14:55:15
| 36,740,540
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
from pyramid.threadlocal import get_current_request
from wiring import interface
from wiring.scopes import IScope
@interface.implements(IScope)
class RequestScope(object):
"""
A wiring scope where provided instances are cached per request.
"""
def _get(self):
request = get_current_request()
if not hasattr(request, '_wiring_scope'):
request._wiring_scope = {}
return request._wiring_scope
def __getitem__(self, specification):
return self._get().__getitem__(specification)
def __setitem__(self, specification, instance):
return self._get().__setitem__(specification, instance)
def __contains__(self, specification):
return self._get().__contains__(specification)
|
[
"veeti.paananen@rojekti.fi"
] |
veeti.paananen@rojekti.fi
|
418d205b9add068cf4cebeae5eda985ec9c89fb6
|
25279eb6ef2b9646852c12c008ba9bfaf3565646
|
/ValoresLista.py
|
14e4034c130617f6d4409768a3b110f5070cc77d
|
[] |
no_license
|
Gabrielgjs/python
|
5c6dfdb35bf42cf47cffef24778b62ec3a1916d5
|
f4b2dca07c8df50490ec0ea51365259e82fa914e
|
refs/heads/main
| 2023-08-04T06:41:40.492310
| 2021-08-31T21:10:51
| 2021-08-31T21:10:51
| 401,487,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
numeros = []
while True:
n = int(input('Digite um valor: '))
if n not in numeros:
numeros.append(n)
print('Valor adicionado')
else:
print('Valor duplicado, não vou adicionar')
r = str(input('Quer continuar [S/N]')).strip()
if r in 'Nn':
break
print(f'Os valores são {sorted(numeros)}')
|
[
"gjsgabriel83@gmail.com"
] |
gjsgabriel83@gmail.com
|
315cd0f3a46ccea82af893ed13206c1766c2b58a
|
90f62b35b1ae137aab6d3835c71211937df9e5ee
|
/newssum/parsers/__init__.py
|
788da3fb68e8ea39cb6746874e6e179636c0df05
|
[] |
no_license
|
mikelkl/news_summarization
|
2ebfad3f10b481c405b43849851958f93ea0ef3b
|
d5cc27521fdec82ccd273b6aac4f07e83dcb56ac
|
refs/heads/master
| 2021-09-09T12:56:06.252228
| 2018-03-16T09:59:35
| 2018-03-16T09:59:35
| 118,727,762
| 17
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
from .story import StoryParser
from .plaintext import PlaintextParser
from .tweets import TweetsParser
|
[
"mikelkl@foxmail.com"
] |
mikelkl@foxmail.com
|
78c1bfde08978675cf06dfb8a6c2923fde34c79d
|
1bb3c20ce5a53889eac280826d7d54194c7db33f
|
/math1/2839.py
|
96138db8e605efa844eb6edafdd7856bb3fd63d9
|
[] |
no_license
|
yejiiha/BaekJoon_step
|
bd2c040597766613985ae8d3a943999cb35d6671
|
3eaedbb832f14c51f3fb990e7e140f00d732df1e
|
refs/heads/master
| 2023-03-24T06:36:28.851139
| 2021-03-21T14:15:40
| 2021-03-21T14:15:40
| 285,825,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
n = int(input())
box = 0
if 3 <= n <= 5000:
while True:
if n % 5 == 0:
box = box + (n // 5)
print(box)
break
n = n-3
box += 1
if n < 0:
print(-1)
break
|
[
"1126yezy@gmail.com"
] |
1126yezy@gmail.com
|
5a65881b50835a602343bd7b929b52b0d41c6425
|
ce8563184a6cf8210279b79cd1ccfd4040e35b55
|
/src/ebay_rest/api/sell_logistics/models/order.py
|
b38e234854aa3ce71ae6c1c27ea126404975fc6f
|
[
"MIT"
] |
permissive
|
craiga/ebay_rest
|
43dc107f9eeba9a04924f36ee8cf57af3854bc9a
|
a0be2677c65a787e9566df848ffa3ad0c309a9d9
|
refs/heads/main
| 2023-08-29T09:14:08.896434
| 2021-09-05T23:07:36
| 2021-09-05T23:07:36
| 411,585,541
| 0
| 0
|
MIT
| 2021-09-29T08:10:44
| 2021-09-29T08:10:43
| null |
UTF-8
|
Python
| false
| false
| 5,731
|
py
|
# coding: utf-8
"""
Logistics API
<span class=\"tablenote\"><b>Note:</b> This is a <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#limited\" target=\"_blank\"> <img src=\"/cms/img/docs/partners-api.svg\" class=\"legend-icon partners-icon\" title=\"Limited Release\" alt=\"Limited Release\" />(Limited Release)</a> API available only to select developers approved by business units.</span><br /><br />The <b>Logistics API</b> resources offer the following capabilities: <ul><li><b>shipping_quote</b> – Consolidates into a list a set of live shipping rates, or quotes, from which you can select a rate to ship a package.</li> <li><b>shipment</b> – Creates a \"shipment\" for the selected shipping rate.</li></ul> Call <b>createShippingQuote</b> to get a list of live shipping rates. The rates returned are all valid for a specific time window and all quoted prices are at eBay-negotiated rates. <br><br>Select one of the live rates and using its associated <b>rateId</b>, create a \"shipment\" for the package by calling <b>createFromShippingQuote</b>. Creating a shipment completes an agreement, and the cost of the base service and any added shipping options are summed into the returned <b>totalShippingCost</b> value. This action also generates a shipping label that you can use to ship the package. The total cost of the shipment is incurred when the package is shipped using the supplied shipping label. <p class=\"tablenote\"><b>Important!</b> Sellers must set up a payment method via their eBay account before they can use the methods in this API to create a shipment and the associated shipping label.</p> # noqa: E501
OpenAPI spec version: v1_beta.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Order(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'channel': 'str',
'order_id': 'str'
}
attribute_map = {
'channel': 'channel',
'order_id': 'orderId'
}
def __init__(self, channel=None, order_id=None): # noqa: E501
"""Order - a model defined in Swagger""" # noqa: E501
self._channel = None
self._order_id = None
self.discriminator = None
if channel is not None:
self.channel = channel
if order_id is not None:
self.order_id = order_id
@property
def channel(self):
"""Gets the channel of this Order. # noqa: E501
The e-commerce platform or environment where the order was created. Use the value EBAY to get the rates available for eBay orders. # noqa: E501
:return: The channel of this Order. # noqa: E501
:rtype: str
"""
return self._channel
@channel.setter
def channel(self, channel):
"""Sets the channel of this Order.
The e-commerce platform or environment where the order was created. Use the value EBAY to get the rates available for eBay orders. # noqa: E501
:param channel: The channel of this Order. # noqa: E501
:type: str
"""
self._channel = channel
@property
def order_id(self):
"""Gets the order_id of this Order. # noqa: E501
The unique ID of the order supplied by the channel of origin. For eBay orders, this would be the orderId. # noqa: E501
:return: The order_id of this Order. # noqa: E501
:rtype: str
"""
return self._order_id
@order_id.setter
def order_id(self, order_id):
"""Sets the order_id of this Order.
The unique ID of the order supplied by the channel of origin. For eBay orders, this would be the orderId. # noqa: E501
:param order_id: The order_id of this Order. # noqa: E501
:type: str
"""
self._order_id = order_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Order, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Order):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"matecsaj@gmail.com"
] |
matecsaj@gmail.com
|
2c4300c2a660d0c5fdf063c3a024a00110bd7d3f
|
74b97e20b06a58ada94278f82ce511403fcddf21
|
/test/scenarios/synapse/output/extflatten/src/synapse/azext_synapse/vendored_sdks/synapse/aio/operations/_sql_pool_restore_point_operations.py
|
82112426d08bd39a69a7952163fa375e87b1288b
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/autorest.az
|
b171eb495efdb815dc051dface3800c3e5e35b8e
|
64f403a5fe74be28e46a90b6b77f8d2bc9a12baf
|
refs/heads/master
| 2023-09-01T13:22:21.784354
| 2022-11-01T02:34:12
| 2022-11-01T02:34:12
| 226,059,721
| 24
| 17
|
MIT
| 2023-02-08T00:46:07
| 2019-12-05T09:04:00
|
Python
|
UTF-8
|
Python
| false
| false
| 13,222
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SQLPoolRestorePointOperations:
"""SQLPoolRestorePointOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~synapse_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
**kwargs
) -> AsyncIterable["models.RestorePointListResult"]:
"""Get SQL pool backup.
Get SQL pool backup information.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RestorePointListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~synapse_management_client.models.RestorePointListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RestorePointListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RestorePointListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/restorePoints'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
restore_point_label: str,
**kwargs
) -> Optional["models.RestorePoint"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.RestorePoint"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
parameters = models.CreateSQLPoolRestorePointDefinition(restore_point_label=restore_point_label)
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CreateSQLPoolRestorePointDefinition')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RestorePoint', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RestorePoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/restorePoints'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
restore_point_label: str,
**kwargs
) -> AsyncLROPoller["models.RestorePoint"]:
"""Creates a restore point for a data warehouse.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param restore_point_label: The restore point label to apply.
:type restore_point_label: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RestorePoint or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~synapse_management_client.models.RestorePoint]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.RestorePoint"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
restore_point_label=restore_point_label,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RestorePoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/restorePoints'} # type: ignore
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
a9e739c3260d11561e816c441abaa8c039d92793
|
b1fb42b72f91d1a1375da6c86c2fff2f765d47af
|
/COOL_Py/__init__.py
|
82ecead75d94397a80e059caed1f7cf17798de3d
|
[] |
no_license
|
Sanny26/COOL_Py
|
4e07450506ed753f7e923dc44090f972e3f97ddb
|
7a0ff6baf049781ae9e1f8cb43c60f9ab2a9ddfe
|
refs/heads/master
| 2021-08-17T00:59:41.269191
| 2017-11-20T16:16:35
| 2017-11-20T16:16:35
| 109,790,359
| 0
| 0
| null | 2017-11-20T16:16:35
| 2017-11-07T05:22:13
|
Python
|
UTF-8
|
Python
| false
| false
| 197
|
py
|
"""Init Script."""
# COOL_Py #
# An interpreter to convert Stanford's COOL Programming Language to Python.
# Authors: Santhoshini Reddy(github.com/Sanny26) and Chris Andrew(github.com/chrizandr)
|
[
"chris.g14@iiits.in"
] |
chris.g14@iiits.in
|
ae25e164b3de18879ba35f674d2f6da9fecc7fe5
|
ee3fd7cd456eeb7e557068b338d0c25811a20fe3
|
/python_grammer/python_grammer/lambda_.py
|
f39100be4cd066a7db515e94713485dabf951931
|
[] |
no_license
|
YuSunjo/algo_study
|
25f819f5e87b90f2ab9731abf95da8e7ee61e3bd
|
5849915afeefacbf58a9f6f31dc7a5b7852fce99
|
refs/heads/master
| 2023-09-04T00:29:19.893800
| 2021-11-14T09:23:11
| 2021-11-14T09:23:21
| 397,067,435
| 0
| 0
| null | 2021-11-12T01:51:15
| 2021-08-17T02:45:32
|
Python
|
UTF-8
|
Python
| false
| false
| 273
|
py
|
# 람다식 연습
from functools import reduce
x = int(input())
plus_ten = lambda x: x + 10
print(plus_ten(x))
####################################
array = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
result = list(filter(lambda x: x > 5 and x < 15, array))
print(result)
|
[
"tnswh2023@gmail.com"
] |
tnswh2023@gmail.com
|
6bd500fcc614046db97e8ea5d519d3220dec7997
|
f1c77538f89171190e730a3ca1faf36051d1969a
|
/setup.py
|
c4b1b2527ba368c2aea02d0c7da46f7a45928eb7
|
[] |
no_license
|
manyids2/mateintwo
|
3d7438be2a1c66702967cb4e1ebda80f0801e3af
|
6d46ebf71388ef83712759fad13f75014f353b7f
|
refs/heads/master
| 2020-03-29T12:38:57.047859
| 2018-09-22T19:37:04
| 2018-09-22T19:37:04
| 149,910,297
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
from setuptools import setup
setup(
name='mateintwo',
version='0.1',
description='The mate in two trainer - Lazlo Polgar',
url='http://github.com/why/mateintwo',
author='F T Horde',
author_email='why@example.com',
license='MIT',
packages=['mateintwo'],
zip_safe=False)
|
[
"manyids2@gmail.com"
] |
manyids2@gmail.com
|
4c787a293e5b423e13c7eb460146d8f18003ede6
|
b477da605201a2e173c2e867a910e7d2db382eba
|
/_posts/coding/summary/sort/quick_select.py
|
21e4763bfac090cd090dcbf605d373dcd29c295b
|
[
"MIT"
] |
permissive
|
teckoo/teckoo.github.io
|
9222f7e3e7e8d0572ad23c6b8a8046c98c94fcf7
|
097063d86e6372f2b5a7e85f9a5d1bf8d744e459
|
refs/heads/master
| 2023-04-28T21:49:21.860709
| 2022-10-26T20:07:18
| 2022-10-26T20:07:18
| 23,817,328
| 2
| 2
|
MIT
| 2023-04-12T05:48:00
| 2014-09-09T02:59:09
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,378
|
py
|
# Python3 program of Quick Select
# Standard partition process of QuickSort().
# It considers the last element as pivot
# and moves all smaller element to left of
# it and greater elements to right
def partition(arr, l, r):
x = arr[r]
cur = l
for i in range(l, r):
if arr[i] <= x:
arr[cur], arr[i] = arr[i], arr[cur]
cur += 1
arr[cur], arr[r] = arr[r], arr[cur]
return cur
# finds the kth position (of the sorted array)
# in a given unsorted array i.e this function
# can be used to find both kth largest and
# kth smallest element in the array.
# ASSUMPTION: all elements in arr[] are distinct
def kthSmallest(arr, l, r, k):
# if k is smaller than number of
# elements in array
if (k > 0 and k <= r - l + 1):
# Partition the array around last
# element and get position of pivot
# element in sorted array
index = partition(arr, l, r)
# if position is same as k
if (index - l == k - 1):
return arr[index]
# If position is more, recur
# for left subarray
if (index - l > k - 1):
return kthSmallest(arr, l, index - 1, k)
# Else recur for right subarray
return kthSmallest(arr, index + 1, r,
k - index + l - 1)
return INT_MAX
# Driver Code
arr = [ 10, 4, 5, 11, 6, 26, 8 ]
n = len(arr)
k = 3
print("K-th smallest element is ", end = "")
print(kthSmallest(arr, 0, n - 1, k))
|
[
"c2.programmer@gmail.com"
] |
c2.programmer@gmail.com
|
2a27a7720b6273098962a537332e897cc3ea044f
|
79f7cb78a7b0aeb40b44d0e5e56651c94e178569
|
/homework4.py
|
6d774cff43fa60dcca523a4c6521138f771a056f
|
[] |
no_license
|
oleksiiberezhnyi/BeetrootAcademy
|
8ae2b3db6fbb7b012a1b4a871ae5f40a08c73be3
|
e19891cb0ae9d1201d59d477f839c0d46e668164
|
refs/heads/master
| 2023-03-07T22:27:58.303497
| 2021-02-18T07:09:06
| 2021-02-18T07:09:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,254
|
py
|
# Import
import random
import math
from datetime import datetime
# Check correct inputs
while True:
# User input task number
task_number = input("\nВведи номер завдання від 1 до 4. Якщо хочеш завершити роботи – введи 0: ")
if task_number.isalpha(): # If letter than repeat input
print("Потрібно ввести цифру від 1 до 4!")
continue
elif int(task_number) == 1: # Run task 1
print("\nВиконуємо завдання №1\n")
random_number = random.randrange(1, 10, 1) # Random number
while True: # Compare random_number with user_number
user_number = input("Введи число від 1 до 10: ")
if user_number.isalpha():
print(f"Потрібно ввести число!")
continue
elif int(user_number) == random_number:
print(f"Вітаю! Ти вгадав!\nПрограма дійсно згенерувала число {user_number}!")
break
else:
print(f"Нажаль, це не число {user_number}. Спробуй ще раз!")
continue
continue
elif int(task_number) == 2: # Run task 2
print("\nВиконуємо завдання №2\n")
user_name = input("Привіт! Як тебе звати? ")
print(f"Приємно познайомитися, {user_name}")
# Check correct data
while True:
user_birthday = input("Введи свою дату народження (ДД.ММ.РРРР):")
# Check entered dataformat
if len(user_birthday) != 10 \
or user_birthday[2] != "." \
or user_birthday[5] != "." \
or user_birthday[0: 2].isalpha() \
or user_birthday[3: 5].isalpha() \
or user_birthday[6:].isalpha():
print("Ти використав неправильний формат дати!")
continue
# Calculate
else:
today = datetime.now()
birthday = datetime.strptime(user_birthday, "%d.%m.%Y") # format bithday to DD.MM.YYYY
age = today - birthday # calculate full age
next_birthday = datetime(birthday.year + int(age.days // 365.2425) + 1, birthday.month, birthday.day)
next_birthday_days = next_birthday - today # calculate remaining days
print(f"ОГО! На сьогодні тобі повних аж: "
f"{int(age.days // 365.2425)} років та "
f"{int((age.days / 365.2425 - age.days // 365.2425) * 365.2425 // 30)} місяців! "
f"До {int(age.days // 365.2425 + 1)}-річчя тобі лишилося: {next_birthday_days.days} днів",
sep=("")
)
break
continue
elif int(task_number) == 3: # Run task 3
print("\nВиконуємо завдання №3\n")
text = input("Введіть довільний текст: ")
i = 1
# random letters from text
while i <= 5:
random_text = random.sample(text, k=len(text))
print(f"Варіант №{i}:", "".join(random_text))
i += 1
continue
elif int(task_number) == 4: # Run task 4
print("\nВиконуємо завдання №4\n")
print("Перевіримо, чи пам'ятаєш ти тригонометрію")
alpha = math.radians(30) # translate to degrees
beta = math.radians(60) # translate to degrees
res = math.sin(alpha) * math.cos(beta) + math.cos(alpha) * math.sin(beta)
print("Дано:\n"
"\u03B1 = 30\u00B0\n"
"\u03B2 = 60\u00B0\n"
)
user_res = input("Яке значення виразу \x1B[3msin\x1B[23m\u03B1\x1B[3mcos\x1B[23m\u03B2 + "
"\x1B[3mcos\x1B[23m\u03B1\x1B[3msin\x1B[23m\u03B2 = "
) # italic(sin a cos b + cos a sin b)
if user_res.isalpha():
print("Результат - це число, а не літера!")
elif float(user_res) == float(res):
print("ОГО! Да ти геній!")
else:
print(f"Неправильно!\nНічого, я також не знав, поки в Google не спитав. Насправді, значення виразу:\n"
f"\x1B[3msin\x1B[23m\u03B1\x1B[3mcos\x1B[23m\u03B2 + "
f"\x1B[3mcos\x1B[23m\u03B1\x1B[3msin\x1B[23m\u03B2 = "
f"\x1B[3msin\x1B[23m(\u03B1+\u03B2) = \x1B[3msin\x1B[23m(30\u00B0 + 60\u00B0) = 1"
) # italic(sin a cos b + cos a sin b = sin(a + b) = sin(30 + 60) = )
continue
elif int(task_number) == 0: # Exit
print("Бувай! Гарного тобі дня!")
break
else: # if other number input
print("Такого завдання не існує!")
continue
|
[
"o.v.ber@me.com"
] |
o.v.ber@me.com
|
e01174dfdba0df3f15f8197479c59ee549454632
|
21a11c5d319861d5d9ec02a385ef673efa410b1c
|
/bot/telegrambot.py
|
339c8db713a9ee1885182d3818e4650a3d6f0b4e
|
[] |
no_license
|
Muslim2209/orderBot
|
89ec6b227a0639f4cdde16ccff24ae3cdb4dccb6
|
93f04f73bd482a8292d5c8e022762a52912aaaaf
|
refs/heads/master
| 2022-12-23T21:55:28.572279
| 2020-02-25T11:30:59
| 2020-02-25T11:30:59
| 242,982,926
| 0
| 0
| null | 2022-12-08T03:41:03
| 2020-02-25T11:25:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,489
|
py
|
from django_telegrambot.apps import DjangoTelegramBot
from telegram.ext import MessageHandler, Filters
from bot import constants
from bot.base_commands import register, sticker
from bot.controller import Controller
from bot.models import TelegramUser, Command
def bot_control(bot, update):
user, created = TelegramUser.objects.get_or_create(
telegram_id=update.message.chat_id)
if created:
user.first_name = update.message.from_user.first_name
user.last_name = update.message.from_user.last_name
user.username = update.message.from_user.username
user.save()
try:
cart = user.cart.all()
except:
cart = None
if not user.is_registered:
return register(bot, user, update)
if update.message.text == '/start':
Controller(bot, update, user).start()
try:
last_command = Command.objects.filter(user=user).last()
except Command.DoesNotExist:
last_command = None
if update.message.text == 'Home':
Controller(bot, update, user).go_home()
elif update.message.text in ["Orqaga", 'Back', "Назад", "orqaga", 'back', "назад"]:
Controller(bot, update, user).go_home()
elif last_command.to_menu == constants.language:
Controller(bot, update, user, cart, last_command).language_select()
elif last_command.to_menu == constants.category:
Controller(bot, update, user, cart, last_command).category_select()
elif last_command.to_menu == constants.product:
Controller(bot, update, user, cart, last_command).product_select()
elif last_command.to_menu == constants.pieces:
Controller(bot, update, user, cart, last_command).pieces_select()
elif last_command.to_menu == constants.add_to_cart:
Controller(bot, update, user, cart, last_command).add_to_card()
elif last_command.current_menu == constants.finish_order:
Controller(bot, update, user, cart, last_command).finish_order()
elif last_command.current_menu == constants.feedback:
Controller(bot, update, user, cart, last_command).feedback()
elif last_command.to_menu == constants.home:
Controller(bot, update, user, cart, last_command).home_control()
else:
bot.sendMessage(update.message.chat_id, text='???')
def main():
dp = DjangoTelegramBot.dispatcher
dp.add_handler(MessageHandler(Filters.all, bot_control))
dp.add_handler(MessageHandler(Filters.sticker, sticker))
|
[
"abdulhakiym2209@gmail.com"
] |
abdulhakiym2209@gmail.com
|
fb2b2833d0209bd72fb8680a1ce9862be9c4b473
|
34db6d2ead4589ac8d360bdefc7b66c65a82fa77
|
/API Executable.py
|
7ce9e1fdccf5b0e2f478ac851fa4ce860b02671d
|
[] |
no_license
|
glowsplint/api-executable
|
e2cd891725a7bb7bf1db8ff3d8682a1a33096d16
|
b3e3eebc89653a8b23b6b27861b9ef04fcbaa9c4
|
refs/heads/master
| 2023-04-06T10:00:39.104443
| 2021-04-09T06:05:46
| 2021-04-09T06:05:46
| 356,155,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,881
|
py
|
'''
# BigSchedules Production API Caller
This script will call the BigSchedules API using our production credentials. Ensure that the number of seconds per iterations do not exceed 1 per second.
For the production environment, the API call limit is maximum 2,000 calls per day and 150 calls per minute, with a maximum of 30,000 calls per month.
The script requires an input file 'BigSchedules Port Pairs - *.csv'.
--------
Features
--------
1. If calls have already been done today (i.e. the json files exist in the <today_path> directory), it will not rerun those calls.
2. Will raise an exception if PulseSecure is on
'''
import pkg_resources.py2_warn
import requests
import json
import os
import pandas as pd
import numpy as np
import joblib
from datetime import datetime, timedelta
from pathlib import Path
from tqdm import tqdm
from secrets import PRODUCTION_KEY
'''
Complete all API calls and save the responses by date and batch into respective folders.
'''
# Check if directory already exists - if exist then calculate outstanding, if not use df
today_path = Path('responses/' + datetime.now().strftime('%Y-%m-%d'))
port_pairs_files = list(Path('.').glob('BigSchedules Port Pairs - *.csv'))
if len(port_pairs_files) != 1:
print('Exception: There is more than one file matching the pattern "BigSchedules Port Pairs - *.csv". Please ensure that there is only one file matching this pattern.')
input()
raise Exception(
'There is more than one file matching the pattern "BigSchedules Port Pairs - *.csv". Please ensure that there is only one file matching this pattern.')
try:
df = pd.read_csv(port_pairs_files[0])
os.makedirs(today_path)
except FileExistsError:
path = Path(os.getcwd()) / today_path / 'batch one'
completed_df = pd.DataFrame([item.name for item in sorted(
path.glob('*.json'))]).rename({0: 'pol_pod'}, axis=1)
df['pol_pod'] = df.port_of_loading + '-' + df.port_of_discharge
if len(completed_df):
completed_df['pol_pod'] = completed_df.pol_pod.str[:11]
print(f'Detected {len(completed_df)} existing API calls today.')
outstanding = set(df.pol_pod) - set(completed_df.pol_pod)
else:
outstanding = set(df.pol_pod)
if len(outstanding) == 0:
df = pd.DataFrame({'pol_pod': []})
else:
df = pd.DataFrame(outstanding)[0].str.split(
'-', expand=True).rename({0: 'port_of_loading', 1: 'port_of_discharge'}, axis=1)
os.chdir(today_path)
batch_paths = ['batch one', 'batch two']
for batch in batch_paths:
try:
os.makedirs(batch)
except FileExistsError:
pass
url = 'https://apis.cargosmart.com/openapi/schedules/routeschedules'
weeks = 6
def set_params(df, index, departureFrom):
credentials = {
'appKey': PRODUCTION_KEY,
'departureFrom': departureFrom,
'searchDuration': weeks
}
parameters = {
'porID': df.iloc[index]['port_of_loading'],
'fndID': df.iloc[index]['port_of_discharge']
}
credentials.update(parameters)
return credentials
def write_json(response, output_file):
with open(output_file, 'w') as w:
json.dump(response, w, indent=2)
print('Running first batch of API calls...')
os.chdir(batch_paths[0])
for i in tqdm(range(len(df))):
departureFrom = datetime.now().strftime('%Y-%m-%d') + 'T00:00:00.000Z'
try:
response = requests.get(url, params=set_params(df, i, departureFrom))
write_json(response.json(),
f'{set_params(df, i, departureFrom)["porID"]}-{set_params(df, i, departureFrom)["fndID"]}.json')
except requests.exceptions.SSLError:
raise Exception(
'You are connected on PulseSecure! You will need to turn off PulseSecure to run this script.')
except Exception as e:
pass
print('Running second batch of API calls...')
os.chdir('../' + batch_paths[1])
for i in tqdm(range(len(df))):
departureFrom = (datetime.now() + timedelta(weeks=weeks)
).strftime('%Y-%m-%d') + 'T00:00:00.000Z'
try:
response = requests.get(url, params=set_params(df, i, departureFrom))
write_json(response.json(),
f'{set_params(df, i, departureFrom)["porID"]}-{set_params(df, i, departureFrom)["fndID"]}.json')
except Exception as e:
pass
'''
Assemble the file from the saved API responses.
'''
class Hasher(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
def replaceHasherWithBlank(variable, blank):
if isinstance(variable, Hasher):
return blank
return variable
def find_cy(data, i, j, total_legs, port_of_loading, blank):
'''
Find the first leg where transportMode == "VESSEL", take the defaultCutoff in the same leg,
checking that the fromPoint of that leg is the POL.
'''
for k in range(total_legs):
if data['routeGroupsList'][i]['route'][j]['leg'][k]['transportMode'] == 'VESSEL':
if port_of_loading == data['routeGroupsList'][i]['route'][j]['leg'][k]['fromPoint']['location']['unlocode']:
return Hasher(data['routeGroupsList'][i]['route'][j]['leg'][k]['fromPoint'])['defaultCutoff']
return blank
def find_routing(data, i, j, total_legs, port_of_discharge, blank):
'''
Find the first leg where transportMode == "VESSEL", take the toPoint in the same leg,
checking that the toPoint of that leg is not the POD.
'''
for k in range(total_legs):
if data['routeGroupsList'][i]['route'][j]['leg'][k]['transportMode'] == 'VESSEL':
routing = data['routeGroupsList'][i]['route'][j]['leg'][k]['toPoint']['location']['unlocode']
if port_of_discharge != routing:
return routing
return blank
def find_vsv(data, i, j, total_legs, blank):
'''
Find the first leg where transportMode == "VESSEL",
take the vessel name in the same leg,
take the service name in the same leg,
take the externalVoyageNumber.
'''
for k in range(total_legs):
if data['routeGroupsList'][i]['route'][j]['leg'][k]['transportMode'] == 'VESSEL':
vessel = Hasher(Hasher(data['routeGroupsList'][i]['route'][j]['leg'][k])[
'vessel'])['name']
service = Hasher(Hasher(data['routeGroupsList'][i]['route'][j]['leg'][k])[
'service'])['name']
voyage = Hasher(data['routeGroupsList'][i]['route'][j]['leg'][k])[
'externalVoyageNumber']
return vessel, service, voyage
return blank, blank, blank
def get_relevant_fields(data, i, j):
'''Every route should be a single row in the spreadsheet'''
blank = np.nan
# Fields that don't crash the script
port_of_loading = data['routeGroupsList'][i]['por']['location']['unlocode']
port_of_discharge = data['routeGroupsList'][i]['fnd']['location']['unlocode']
departure_date = data['routeGroupsList'][i]['route'][j]['por']['etd']
arrival_date = data['routeGroupsList'][i]['route'][j]['fnd']['eta']
transit = data['routeGroupsList'][i]['route'][j]['transitTime']
carrier = data['routeGroupsList'][i]['carrier']['name']
update_date = data['routeGroupsList'][i]['route'][j]['touchTime']
# created_date = pd.Timestamp.today()
total_legs = len(data['routeGroupsList'][i]['route'][j]['leg'])
cy_cutoff_date = find_cy(data, i, j, total_legs, port_of_loading, blank)
routing = find_routing(data, i, j, total_legs, port_of_discharge, blank)
'''
Loading terminal is currently:
loading_terminal = Hasher(
data['routeGroupsList'][i]['route'][j]['por']['location'])['facility']['name']
Loading terminal in a future release could be:
Within the route, take the first leg, get the fromPoint.location.facility.name
'''
loading_terminal = Hasher(
data['routeGroupsList'][i]['route'][j]['por']['location'])['facility']['name']
'''
Discharge terminal is currently:
discharge_terminal = Hasher(
data['routeGroupsList'][i]['route'][j]['fnd']['location'])['facility']['name']
Discharge terminal in a future release could be:
Within the route, take the last leg, get the toPoint.location.facility.name
'''
discharge_terminal = Hasher(
data['routeGroupsList'][i]['route'][j]['fnd']['location'])['facility']['name']
cy_cutoff_date = replaceHasherWithBlank(cy_cutoff_date, blank)
loading_terminal = replaceHasherWithBlank(loading_terminal, blank)
discharge_terminal = replaceHasherWithBlank(discharge_terminal, blank)
vessel, service, voyage = find_vsv(data, i, j, total_legs, blank)
vessel = replaceHasherWithBlank(vessel, blank)
service = replaceHasherWithBlank(service, blank)
voyage = replaceHasherWithBlank(voyage, blank)
return {
"port_of_loading": port_of_loading,
"port_of_discharge": port_of_discharge,
"cy_cutoff_date": cy_cutoff_date,
"departure_date": departure_date,
"arrival_date": arrival_date,
"transit": transit,
"service": service,
"vessel": vessel,
"voyage": voyage,
"carrier": carrier,
"routing": routing,
"loading_terminal": loading_terminal,
"discharge_terminal": discharge_terminal,
"update_date": update_date,
# "created_date": created_date,
"source": "BigSchedules API"
}
def extract_data(data):
if len(data['routeGroupsList']):
df = pd.DataFrame(([get_relevant_fields(data, i, j)
for i in range(len(data['routeGroupsList']))
for j in range(len(data['routeGroupsList'][i]['route']))]))
df.sort_values('departure_date', inplace=True)
df.reset_index(drop=True, inplace=True)
return df
else:
return
def create_data_list(current_path):
data_list = []
for _, file in enumerate(sorted(current_path.glob('*.json'))):
with open(file, 'r') as jsonfile:
data = json.load(jsonfile)
data_list.append(data)
return data_list
def create_df(data_list):
df = pd.concat([extract_data(Hasher(data))
for data in data_list], ignore_index=True)
df.cy_cutoff_date = pd.to_datetime(
df.cy_cutoff_date, utc=True).dt.strftime('%d/%m/%Y')
df.departure_date = pd.to_datetime(
df.departure_date).dt.strftime('%d/%m/%Y')
df.arrival_date = pd.to_datetime(df.arrival_date).dt.strftime('%d/%m/%Y')
df.update_date = pd.to_datetime(df.update_date).dt.strftime('%d/%m/%Y')
# df.created_date = pd.to_datetime(df.created_date).dt.strftime('%d/%m/%Y')
return df
list_of_dfs = []
for batch in batch_paths:
os.chdir('../' + batch)
path = Path(os.getcwd())
data_list = create_data_list(path)
list_of_dfs.append(create_df(data_list))
full_df = pd.concat(list_of_dfs).reset_index(drop=True)
# # Replace Unicode characters
# full_df.carrier = full_df.carrier.str.replace('Ü', 'U')
# full_df.carrier = full_df.carrier.str.replace('Ç', 'C')
# # Prepare APAC version with US port pairs removed
# apac_df = full_df.loc[full_df.port_of_loading.str[:2]
# != 'US'].reset_index(drop=True).copy()
# Determine number of failed (timeout) API Calls
def isAPICallError(path_to_file, error_msg):
with open(path_to_file) as jsonfile:
data = json.load(jsonfile)
try:
if data['message'][:len(error_msg)] == error_msg:
return path_to_file.name
except KeyError:
pass
files_in_folder = sorted(path.glob('*.json'))
timeout_list = [isAPICallError(item, "Call ssm2014 timeout")
for item in files_in_folder if isAPICallError(item, "Call ssm2014 timeout") is not None]
timeout_list = timeout_list + [isAPICallError(item, "Call ssm2014 Request failed with status code 500")
for item in files_in_folder if isAPICallError(item, "Call ssm2014 Request failed with status code 500") is not None]
print(f'There are {len(timeout_list)} timeout errors in this run.')
os.chdir('../../../')
full_df_name = f'schedules_{len(full_df)} - {datetime.now().strftime("%d.%m.%Y")}.csv'
full_df.to_csv(f'{full_df_name}', index=False)
print(f'{full_df_name} has been created in the current directory.')
# apac_df_name = f'schedules_{len(apac_df)} - {datetime.now().strftime("%d.%m.%Y")} (APAC).csv'
# apac_df.to_csv(f'{apac_df_name}', index=False)
# print(f'{apac_df_name} has been created in the current directory.')
# log_df_name = f'error_list - {datetime.now().strftime("%d.%m.%Y")}.csv'
# log_df.to_csv(f'{log_df_name}', index=False)
# print(f'{log_df_name} has been created in the current directory.')
print('The API Executable has finished running. Press any key to exit.')
input()
|
[
"tym.teo.23@gmail.com"
] |
tym.teo.23@gmail.com
|
8df6a09d9f9f8209e6e2fa4e19fe04bc964d2b7f
|
a019d939d94ec583b27e3f729b30e6c6a33f677f
|
/blog/migrations/0001_initial.py
|
4e0a53b0190cc6a470847efa997acc1aee54a92a
|
[] |
no_license
|
Sakulaczx/lo-blog
|
85b8f6d588dc3255ed951717377e81cbc6e3d5c2
|
9a02396c899147f607d7926bc89dbdf46a81fb9e
|
refs/heads/master
| 2020-04-01T09:25:22.739907
| 2018-10-15T08:54:56
| 2018-10-15T08:54:56
| 153,074,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,885
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2018-09-14 16:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('date_time', models.DateField(auto_now_add=True)),
('content', models.TextField(blank=True, null=True)),
('digest', models.TextField(blank=True, null=True)),
('view', models.BigIntegerField(default=0)),
('comment', models.BigIntegerField(default=0)),
('picture', models.CharField(max_length=200)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u4f5c\u8005')),
],
options={
'ordering': ['-date_time'],
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='\u6a21\u5757')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('last_mod_time', models.DateTimeField(auto_now=True, verbose_name='\u4fee\u6539\u65f6\u95f4')),
],
options={
'ordering': ['name'],
'verbose_name': '\u6a21\u5757',
'verbose_name_plural': '\u6a21\u5757',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='\u6807\u9898')),
('source_id', models.CharField(max_length=25, verbose_name='\u6587\u7ae0id\u6216source\u540d\u79f0')),
('create_time', models.DateTimeField(auto_now=True, verbose_name='\u8bc4\u8bba\u65f6\u95f4')),
('user_name', models.CharField(max_length=25, verbose_name='\u8bc4\u8bba\u7528\u6237')),
('url', models.CharField(max_length=100, verbose_name='\u94fe\u63a5')),
('comment', models.CharField(max_length=500, verbose_name='\u8bc4\u8bba\u5185\u5bb9')),
],
),
migrations.CreateModel(
name='MindMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.TextField()),
('title', models.CharField(default=b'new mindmap', max_length=200)),
('modify_user', models.IntegerField(default=-1)),
('create_user', models.IntegerField(default=-1)),
('category_id', models.IntegerField(blank=True, null=True)),
('modify_time', models.DateTimeField(auto_now=True)),
('create_time', models.DateTimeField(auto_now_add=True)),
('group', models.ManyToManyField(blank=True, to='auth.Group')),
],
),
migrations.CreateModel(
name='MindMapImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('filename', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag_name', models.CharField(max_length=30, verbose_name='\u6807\u7b7e\u540d')),
],
),
migrations.AddField(
model_name='mindmap',
name='image',
field=models.ManyToManyField(blank=True, to='blog.MindMapImage'),
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category', verbose_name='\u6a21\u5757'),
),
migrations.AddField(
model_name='article',
name='tag',
field=models.ManyToManyField(to='blog.Tag'),
),
]
|
[
"291290367@qq.com"
] |
291290367@qq.com
|
0db314792d79f06fc66bc54cd045b4f8982e0602
|
20527de15bff7810c8cb3d8b45d919ce413c22ec
|
/scripts/run_integration_test.py
|
38401ce3f9149c3431aa3c8713a118d3fe132a38
|
[
"MIT"
] |
permissive
|
modulexcite/pyre-check
|
7fd921b4f323e149aebe9a3eb03f6db124d23822
|
a84e85f735fa80a95cd4a0a10d1b88bff3495c8e
|
refs/heads/master
| 2020-06-27T07:56:18.019456
| 2019-07-31T01:11:01
| 2019-07-31T01:14:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,200
|
py
|
#!/usr/bin/env python3
import argparse
import filecmp
import fileinput
import json
import logging
import os
import shutil
import subprocess
import sys
import tempfile
from contextlib import contextmanager
LOG = logging.getLogger(__name__)
def is_readable_directory(directory: str) -> bool:
return os.path.isdir(directory) and os.access(directory, os.R_OK)
def assert_readable_directory(directory: str) -> None:
if not os.path.isdir(directory):
raise Exception("{} is not a valid directory.".format(directory))
if not os.access(directory, os.R_OK):
raise Exception("{} is not a readable directory.".format(directory))
def extract_typeshed(configuration_file: str):
try:
with open(configuration_file) as file:
configuration = json.load(file)
typeshed = configuration.get("typeshed")
version_hash = configuration.get("version")
if not typeshed:
return None
if version_hash:
typeshed = typeshed.replace("%V", version_hash)
return typeshed
except Exception as e:
LOG.error("Exception raised while reading %s:", configuration_file)
LOG.error("%s", e)
return None
def get_typeshed_from_github(base_directory: str):
typeshed = os.path.join(base_directory, "typeshed")
os.mkdir(typeshed)
result = subprocess.run(
["git", "clone", "https://github.com/python/typeshed.git", typeshed]
)
if result.returncode != 0:
return None
assert_readable_directory(typeshed)
# Prune all non-essential directories.
for entry in os.listdir(typeshed):
if entry in ["stdlib", "third_party"]:
continue
full_path = os.path.join(typeshed, entry)
if os.path.isfile(full_path):
os.remove(full_path)
elif os.path.isdir(full_path):
shutil.rmtree(full_path)
return typeshed
def find_test_typeshed(base_directory: str) -> str:
test_typeshed = os.getenv("PYRE_TEST_TYPESHED_LOCATION")
if test_typeshed and is_readable_directory(test_typeshed):
LOG.info("Using typeshed from environment: %s", test_typeshed)
return test_typeshed
# Check if we can infer typeshed from a .pyre_configuration
# file living in a directory above.
path = os.getcwd()
while True:
configuration = os.path.join(path, ".pyre_configuration")
if os.path.isfile(configuration):
test_typeshed = extract_typeshed(configuration)
if test_typeshed and is_readable_directory(test_typeshed):
LOG.info("Using typeshed from configuration: %s", test_typeshed)
return test_typeshed
parent_directory = os.path.dirname(path)
if parent_directory == path:
# We have reached the root.
break
path = parent_directory
# Try and fetch it from the web in a temporary directory.
temporary_typeshed = get_typeshed_from_github(base_directory)
if temporary_typeshed and is_readable_directory(temporary_typeshed):
LOG.info("Using typeshed from the web: %s", temporary_typeshed)
return temporary_typeshed
raise Exception("Could not find a valid typeshed to use")
def poor_mans_rsync(source_directory, destination_directory, ignored_files=None):
ignored_files = ignored_files or []
# Do not delete the server directory while copying!
assert_readable_directory(source_directory)
source_files = [
entry
for entry in os.listdir(source_directory)
if entry not in ignored_files
and os.path.isfile(os.path.join(source_directory, entry))
]
assert_readable_directory(destination_directory)
destination_files = [
entry
for entry in os.listdir(destination_directory)
if entry not in ignored_files
and os.path.isfile(os.path.join(destination_directory, entry))
]
source_directories = [
entry
for entry in os.listdir(source_directory)
if os.path.isdir(os.path.join(source_directory, entry))
]
destination_directories = [
entry
for entry in os.listdir(destination_directory)
if os.path.isdir(os.path.join(destination_directory, entry))
]
# Copy all directories over blindly.
for directory in source_directories:
source = os.path.join(source_directory, directory)
destination = os.path.join(destination_directory, directory)
if os.path.isdir(destination):
shutil.rmtree(destination)
shutil.copytree(source, destination)
# Delete any missing directories.
for directory in destination_directories:
if directory not in source_directories:
destination = os.path.join(destination_directory, directory)
shutil.rmtree(destination)
for filename in destination_files:
if filename not in source_files:
LOG.info("Removing file '%s' from destination" % filename)
os.remove(os.path.join(destination_directory, filename))
# Compare files across source and destination.
(match, mismatch, error) = filecmp.cmpfiles(
source_directory, destination_directory, source_files, shallow=False
)
for filename in match:
LOG.info("Skipping file '%s' because it matches" % filename)
for filename in mismatch:
LOG.info("Copying file '%s' due to mismatch" % filename)
shutil.copy2(os.path.join(source_directory, filename), destination_directory)
for filename in error:
LOG.info("Copying file '%s' because it is missing" % filename)
shutil.copy2(os.path.join(source_directory, filename), destination_directory)
class Repository:
def __init__(self, base_directory: str, repository_path: str) -> None:
self._test_typeshed_location = find_test_typeshed(base_directory)
# Parse list of fake commits.
assert_readable_directory(repository_path)
self._base_repository_path = os.path.realpath(repository_path)
commits_list = os.listdir(self._base_repository_path)
list.sort(commits_list)
for commit in commits_list:
assert_readable_directory(os.path.join(self._base_repository_path, commit))
self._commits_list = iter(commits_list)
# Move into the temporary repository directory.
self._pyre_directory = os.path.join(base_directory, "repository")
os.mkdir(self._pyre_directory)
os.chdir(self._pyre_directory)
# Seed the repository with the base commit.
self.__next__()
def get_repository_directory(self) -> str:
return self._pyre_directory
def __iter__(self):
return self
def __next__(self):
self._current_commit = self._commits_list.__next__()
LOG.info("Moving to commit named: %s" % self._current_commit)
# Last empty path is needed to terminate the path with a directory separator.
original_path = os.path.join(
self._base_repository_path, self._current_commit, ""
)
self._copy_commit(original_path, ".")
self._resolve_typeshed_location(".pyre_configuration")
return self._current_commit
def _copy_commit(self, original_path, destination_path):
"""
Copies the next commit at original_path to destination path. Can be
overridden by child classes to change copying logic.
"""
# I could not find the right flags for rsync to touch/write
# only the changed files. This is crucial for watchman to
# generate the right notifications. Hence, this.
poor_mans_rsync(original_path, destination_path)
def _resolve_typeshed_location(self, filename):
with fileinput.input(filename, inplace=True) as f:
for line in f:
print(
line.replace(
"PYRE_TEST_TYPESHED_LOCATION", self._test_typeshed_location
),
end="",
)
def get_pyre_errors(self):
# Run the full check first so that watchman updates have time to propagate.
check_errors = self.run_pyre("check")
incremental_errors = self.run_pyre("incremental")
return (incremental_errors, check_errors)
def run_pyre(self, command: str, *arguments: str) -> str:
pyre_client = os.getenv("PYRE_TEST_CLIENT_LOCATION", "pyre")
try:
output = subprocess.check_output(
[
pyre_client,
"--noninteractive",
"--show-parse-errors",
"--output=json",
command,
*arguments,
]
)
except subprocess.CalledProcessError as error:
if error.returncode not in [0, 1]:
raise error
output = error.output
return output.decode("utf-8")
def run_integration_test(repository_path) -> int:
if not shutil.which("watchman"):
LOG.error("The integration test cannot work if watchman is not installed!")
return 1
with tempfile.TemporaryDirectory() as base_directory:
discrepancies = {}
repository = Repository(base_directory, repository_path)
with _watch_directory(repository.get_repository_directory()):
try:
repository.run_pyre("start")
for commit in repository:
(actual_error, expected_error) = repository.get_pyre_errors()
if actual_error != expected_error:
discrepancies[commit] = (actual_error, expected_error)
repository.run_pyre("stop")
except Exception as uncaught_pyre_exception:
LOG.error("Uncaught exception: `%s`", str(uncaught_pyre_exception))
LOG.info("Pyre rage: %s", repository.run_pyre("rage"))
raise uncaught_pyre_exception
if discrepancies:
LOG.error("Pyre rage:")
print(repository.run_pyre("rage"), file=sys.stderr)
LOG.error("Found discrepancies between incremental and complete checks!")
for revision, (actual_error, expected_error) in discrepancies.items():
print("Difference found for revision: {}".format(revision))
print("Actual errors (pyre incremental): {}".format(actual_error))
print("Expected errors (pyre check): {}".format(expected_error))
return 1
return 0
# In general, saved state load/saves are a distributed system problem - the file systems
# are completely different. Make sure that Pyre doesn't rely on absolute paths when
# loading via this test.
def run_saved_state_test(repository_path: str) -> int:
# Copy files over to a temporary directory.
original_directory = os.getcwd()
saved_state_path = tempfile.NamedTemporaryFile().name
with tempfile.TemporaryDirectory() as saved_state_create_directory:
repository = Repository(saved_state_create_directory, repository_path)
repository.run_pyre("--save-initial-state-to", saved_state_path, "incremental")
repository.__next__()
expected_errors = repository.run_pyre("check")
repository.run_pyre("stop")
os.chdir(original_directory)
with tempfile.TemporaryDirectory() as saved_state_load_directory:
repository = Repository(saved_state_load_directory, repository_path)
repository.__next__()
repository.run_pyre("--load-initial-state-from", saved_state_path, "start")
actual_errors = repository.run_pyre("incremental")
repository.run_pyre("stop")
if actual_errors != expected_errors:
LOG.error("Actual errors are not equal to expected errors.")
print("Actual errors (pyre incremental): {}".format(actual_errors))
print("Expected errors (pyre check): {}".format(expected_errors))
return 1
return 0
@contextmanager
def _watch_directory(source_directory):
subprocess.check_call(
["watchman", "watch", source_directory],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
yield
subprocess.check_call(
["watchman", "watch-del", source_directory],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format=" >>> %(asctime)s %(levelname)s %(message)s"
)
parser = argparse.ArgumentParser()
parser.add_argument(
"repository_location", help="Path to directory with fake commit list"
)
arguments = parser.parse_args()
retries = 3
while retries > 0:
try:
exit_code = run_integration_test(arguments.repository_location)
if exit_code != 0:
sys.exit(exit_code)
sys.exit(run_saved_state_test(arguments.repository_location))
except Exception:
# Retry the integration test for uncaught exceptions. Caught issues will
# result in an exit code of 1.
retries = retries - 1
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
aa47efbc4e1cf1b997bbbcb0139310b0393aca0f
|
e8e50cd161c26be486f70fe78c67ede364613ee5
|
/News/models.py
|
97e2927d4c84da33529b1dee7c916ea2e5e48916
|
[] |
no_license
|
kuzhelny/my-apartment-app-project
|
28ac730a61563c1bab235581e3e92d36b69fa52d
|
c11edeb8246c6a2613c544857206460575225a57
|
refs/heads/master
| 2021-01-10T08:01:06.026096
| 2015-11-05T16:37:37
| 2015-11-05T16:37:37
| 45,148,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
from django.db import models
from django.contrib import admin
from django.core.files.images import ImageFile
class NewsItem(models.Model):
title = models.CharField(max_length=50)
subtitle = models.CharField(max_length=50)
content = models.TextField()
image = models.ImageField(upload_to='News/images')
def __unicode__(self):
return self.title
admin.site.register(NewsItem)
|
[
"kuzhelny@mail.com"
] |
kuzhelny@mail.com
|
e57d1a4c67e73780bdd1b919d7a80cc6e1d24e07
|
6b2a23ffce99bcd76d4eba47e37cd059b1af5fef
|
/Mxonline/apps/users/urls.py
|
14b0f0a41ea9b5732bbee61729c0ae10db39f4d8
|
[] |
no_license
|
Kylin92/OnlineEdu
|
80d91bd76d248d5172a5869d1b2f619477fbb448
|
7d091cb1c6d2045ade12a915273e6a689067ca00
|
refs/heads/master
| 2022-12-17T08:31:56.811263
| 2019-09-01T08:26:14
| 2019-09-01T08:26:14
| 133,017,817
| 0
| 1
| null | 2022-11-22T02:18:22
| 2018-05-11T09:14:36
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,251
|
py
|
from django.conf.urls import url
from users.views import UserInfoView, UploadImageView, UpdatePwdView, SendEmailCodeView, UpdateEmailView, MyCourseView, \
MyFavOrgView, MyFavTeacherView, MyFavCourseView, MyMessageView
app_name = "users"
urlpatterns = [
# 用户信息
url(r'^info/$', UserInfoView.as_view(), name="user_info"),
# 用户头像上传
url(r'image/upload/$', UploadImageView.as_view(), name="image_upload"),
# 用户个人中心修改密码
url(r'update/pwd/$', UpdatePwdView.as_view(), name="update_pwd"),
# 专用于发送验证码的
url(r'sendemail_code/$',SendEmailCodeView.as_view(),name="sendemail_code"),
url(r'update_email/$', UpdateEmailView.as_view(), name="update_email"),
# 用户中心我的课程
url(r'mycourse/$', MyCourseView.as_view(), name="mycourse"),
# 我收藏的课程机构
url(r'myfav/org/$', MyFavOrgView.as_view(), name="myfav_org"),
# 我收藏的授课讲师
url(r'myfav/teacher/$', MyFavTeacherView.as_view(), name="myfav_teacher"),
# 我收藏的课程
url(r'myfav/course/$', MyFavCourseView.as_view(), name="myfav_course"),
# 我的消息;
url(r'my_message/$', MyMessageView.as_view(), name="my_message"),
]
|
[
"2169927630@qq.com"
] |
2169927630@qq.com
|
054e99527f45c4e5d0b459d66048d8538c26a041
|
97aa17aaa27d4dcb1b23a35ec42118ed0a5a5dc1
|
/DifferenceMethod.py
|
9d3565cbac20247cc07d9b3a23a734ddeab23c31
|
[] |
no_license
|
billadona/Numerical-Analysis-Tool
|
74bd63c1636f271d0554a3863880b8ee3bd16ede
|
f507e0bd44f3d346815bc113a07cfe682aa89b35
|
refs/heads/master
| 2021-01-20T05:09:24.337626
| 2017-04-29T00:35:41
| 2017-04-29T00:35:41
| 89,754,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,665
|
py
|
from sympy import *
from Tkinter import *
from mpmath import *
import ttk
import sys
MAX_SIZE = 50
lst_x = [0] * MAX_SIZE
lst_y = [0] * MAX_SIZE
lengt = 0
def closePop(pop):
pop.destroy()
def checkInt(strg):
try:
int(strg)
return True
except ValueError:
return False
def differenceM():
Sfunc = StringVar() #Holds the given function by the user
Sh_start = StringVar() #String var for starting h
Sh_end = StringVar() #String var for ending h
Sx = StringVar() #String var for x
#Create new pop up window
top = Toplevel()
top.title("Difference Method")
diffMFrame = ttk.Frame(top, padding="15 20 15 20")
diffMFrame.grid(column=0, row=0)
diffMFrame.columnconfigure(0, weight=1)
diffMFrame.rowconfigure(0, weight=1)
#create label
Label(diffMFrame, text="Function").grid(row=0, column=0, pady=(0,10))
Label(diffMFrame, text="Initial h (step)").grid(row=1, column=0, pady=(0, 10))
Label(diffMFrame, text="Ending h (step)").grid(row=2, column=0, pady=(0, 10))
Label(diffMFrame, text="x").grid(row=3, column=0, pady=(0,10))
#Input field for function
Entry(diffMFrame, textvariable=Sfunc).grid(row=0, column=1, pady=(0, 10))
Entry(diffMFrame, textvariable=Sh_start).grid(row=1, column=1, pady=(0, 10))
Entry(diffMFrame, textvariable=Sh_end).grid(row=2, column=1, pady=(0, 10))
Entry(diffMFrame, textvariable=Sx).grid(row=3, column=1, pady=(0, 10))
buttn_two_frwrd = ttk.Button(diffMFrame, text="Two-Point Forward", command = lambda: twoPointDiff(Sfunc.get(), Sh_start, Sh_end, Sx)).grid(row=4, column=0, pady=(0, 10))
ttk.Button(diffMFrame, text="Centered Difference Forward", command = lambda: centralDiff(Sfunc.get(), Sh_start, Sh_end, Sx)).grid(row=5, column=0, pady=(0, 10))
def twoPointDiff(sf, numh, numh_end, numx):
#no h input from user
if len(sf) == 0:
print 'input neededed in function entry field'
sys.exit()
#no initial h input from user
if len(numh.get()) == 0:
print 'input neededed for initial h'
sys.exit()
#Check if the input value is an Int
if checkInt(numh.get()):
numH = int(numh.get())
else:
numH = float(numh.get())
#No x input from the user
if len(numx.get()) == 0:
print 'input neededed for x'
#Check if the input value is an int
if checkInt(numx.get()):
numX = int(numx.get())
else:
numX = float(numx.get())
x, y = symbols("x y")
#sympify converts the string to a sympy expr
try:
#create a sympy expression if input is valid
expr = sympify(sf)
except ValueError:
#exit program if input function is not valid
print 'Inputted function is not valid'
sys.exit()
#no h input from user
if len(numh_end.get()) == 0:
num = numX + numH #f(x + h)
n = expr.evalf(5, subs={x: num})
n = n - ( expr.evalf(5, subs={x: numX}) ) #f(x + h) - f(x)
fx = n / numH #f(x + h) - f(x) / 2
aproximation_str = "The approximation: " + str(fx)
#Value of f'(x)
#store the derivative of f(x)
dervFx = expr.diff(x)
#compute f'(x)
dervFx_num = expr.evalf(5, subs={x: numX})
#store the derivative of f'(x)
snd_dervFx = dervFx.diff(x)
apprx_error = fx - dervFx.evalf(5, subs={x: numX})
aprx = "aprroximation error (f'(x) - f''(x)) : " + str(apprx_error)
c = num / 2
predicted_err = (numH * (snd_dervFx.evalf(5, subs={x: c}))) / 2
pred_err = "predicted error (hf''(c)/2) = " + str(predicted_err)
pop = Toplevel()
pop.title("Difference Method")
diffMFrame = ttk.Frame(pop, padding="15 20 15 20")
diffMFrame.grid(column=0, row=0)
diffMFrame.columnconfigure(0, weight=1)
diffMFrame.rowconfigure(0, weight=1)
Label(diffMFrame, text=aproximation_str).grid(row=0, column=0, pady=(0,10))
Label(diffMFrame, text=aprx).grid(row=1, column=0, pady=(0, 10))
Label(diffMFrame, text=pred_err).grid(row=2, column=0, pady=(0,10))
else:
#check if int
if checkInt(numh_end.get()):
numH_end = int(numh_end.get())
else:
numH_end = float(numH_end)
#check if initial h and ending h is the same
if numH_end == numH:
print 'ERROR: starting h and ending h is the same'
sys.exit()
elif numH < numH_end:
pop = Toplevel()
pop.title("Difference Method")
diffMFrame = ttk.Frame(pop, padding="15 20 15 20")
diffMFrame.grid(column=0, row=0)
diffMFrame.columnconfigure(0, weight=1)
diffMFrame.rowconfigure(0, weight=1)
lb1 = Listbox(diffMFrame) #holds the approximation value
lb2 = Listbox(diffMFrame) #holds the approximation error
lb3 = Listbox(diffMFrame) #holds the predicted approximation error
inc = 0 #used as an incrementer for Listbox
h_itr = numH #initializes h_itr to the starting h so while loop will start with h init
while h_itr != numH_end:
if h_itr > numH_end:
break
number = numX + h_itr #x + h
new_num = expr.evalf(5, subs={x: number}) #f(x + h)
new_num = new_num - (expr.evalf(5, subs={x: numX})) #f(x + h) - f(x)
func_x = new_num / h_itr #f(x + h) - f(x) / h
derv_fx = expr.diff(x) #f'(x)
snd_derv_fx = derv_fx.diff(x) #f''(x)
approx_err = func_x -( derv_fx.evalf(5, subs={x: numX}) ) #f'(x) (using the formula) - f'(x) (using derivative)
c2 = number / 2
predic_err = (h_itr * (snd_derv_fx.evalf(5, subs={x: c2}))) / 2
lb1.insert(inc, func_x)
lb2.insert(inc, approx_err)
lb3.insert(inc, predic_err)
h_itr += numH
num2 = numX + numH_end
new_num2 = expr.evalf(5, subs={x: num2})
new_num2 = new_num2 - (expr.evalf(5, subs={x: numX}))
func_x2 = new_num2 / numH_end
derv_fx2 = expr.diff(x)
snd_derv_fx2 = derv_fx2.diff(x)
approx_err2 = func_x2 -( derv_fx2.evalf(5, subs={x: numX}) )
c_2 = number / 2
predic_err2 = (numH_end * (snd_derv_fx2.evalf(5, subs={x: c_2}))) / 2
lb1.insert(inc, func_x2)
lb2.insert(inc, approx_err2)
lb3.insert(inc, predic_err2)
Label(diffMFrame, text="Approximation Values:").grid(row=0, column=0, pady=(0,10))
Label(diffMFrame, text="Approximation Error:").grid(row=0, column=1, pady=(0,10))
Label(diffMFrame, text="Predicted Approximation Error:").grid(row=0, column=2, pady=(0,10))
lb1.grid(row=1, column=0)
lb2.grid(row=1, column=1)
lb3.grid(row=1,column=2)
else:
#numh > numh_end
print 'ERROR: Initial h is bigger than ending h'
sys.exit()
def centralDiff(sf, numh, numh_end, numx):
#no h input from user
if len(sf) == 0:
print 'input neededed in function entry field'
sys.exit()
#no h input from user
if len(numh.get()) == 0:
print 'input neededed for h'
sys.exit()
#Check if the input value is an Int
if checkInt(numh.get()):
numH = int(numh.get())
else:
numH = float(numh.get())
#No x input from the user
if len(numx.get()) == 0:
print 'input neededed for x'
#Check if the input value is an int
if checkInt(numx.get()):
numX = int(numx.get())
else:
numX = float(numx.get())
x, y = symbols("x y")
#sympify converts the string to a sympy expr
try:
#create a sympy expression if input is valid
expr = sympify(sf)
except ValueError:
#exit program if input function is not valid
print 'Inputted function is not valid'
sys.exit()
#no h input from user
if len(numh_end.get()) == 0:
#f(x + h)
num = numX + numH
n = expr.evalf(5, subs={x: num})
#f(x + h) - f(x - h)
n = n - ( expr.evalf(5, subs={x: (numX - numH)}) )
#f(x + h) - f(x - h) / 2h
fx = n / (2*numH)
aproximation_str = "The approximation: " + str(fx)
#Value of f'(x)
#store the derivative of f(x)
dervFx = expr.diff(x)
#compute f'(x)
dervFx_num = expr.evalf(5, subs={x: numX})
#store the derivative of f'(x)
snd_dervFx = dervFx.diff(x)
apprx_error = fx - dervFx.evalf(5, subs={x: numX})
aprx = "aprroximation error (f'(x) - f''(x)) : " + str(apprx_error)
c = num / 2
predicted_err = (numH * (snd_dervFx.evalf(5, subs={x: c}))) / 2
pred_err = "predicted error (hf''(c)/2) = " + str(predicted_err)
pop = Toplevel()
pop.title("Difference Method")
diffMFrame = ttk.Frame(pop, padding="15 20 15 20")
diffMFrame.grid(column=0, row=0)
diffMFrame.columnconfigure(0, weight=1)
diffMFrame.rowconfigure(0, weight=1)
Label(diffMFrame, text=aproximation_str).grid(row=0, column=0, pady=(0,10))
Label(diffMFrame, text=aprx).grid(row=1, column=0, pady=(0, 10))
Label(diffMFrame, text=pred_err).grid(row=2, column=0, pady=(0,10))
else:
#check if int
if checkInt(numh_end.get()):
numH_end = int(numh_end.get())
else:
numH_end = float(numH_end)
#check if initial h and ending h is the same
if numH_end == numH:
print 'ERROR: starting h and ending h is the same'
sys.exit()
elif numH < numH_end:
pop = Toplevel()
pop.title("Three-point Central Difference Method")
diffMFrame = ttk.Frame(pop, padding="15 20 15 20")
diffMFrame.grid(column=0, row=0)
diffMFrame.columnconfigure(0, weight=1)
diffMFrame.rowconfigure(0, weight=1)
lb1 = Listbox(diffMFrame) #holds the approximation value
lb2 = Listbox(diffMFrame) #holds the approximation error
lb3 = Listbox(diffMFrame) #holds the predicted approximation error
inc = 0 #used as an incrementer for Listbox
h_itr = numH #initializes h_itr to the starting h so while loop will start with h init
while h_itr != numH_end:
if h_itr > numH_end:
break
number = numX + h_itr #x + h
new_num = expr.evalf(5, subs={x: number}) #f(x + h)
new_num = new_num - (expr.evalf(5, subs={x: (numX - h_itr)})) #f(x + h) - f(x - h)
func_x = new_num / (2*h_itr) #f(x + h) - f(x - h) / 2h
derv_fx = expr.diff(x) #f'(x)
snd_derv_fx = derv_fx.diff(x) #f''(x)
approx_err = func_x -( derv_fx.evalf(5, subs={x: numX}) ) #f'(x) (using the formula) - f'(x) (using derivative)
c2 = number / 2
predic_err = (h_itr * (snd_derv_fx.evalf(5, subs={x: c2}))) / 2
lb1.insert(inc, func_x)
lb2.insert(inc, approx_err)
lb3.insert(inc, predic_err)
h_itr += numH
num2 = numX + numH_end
new_num2 = expr.evalf(5, subs={x: num2})
new_num2 = new_num2 - (expr.evalf(5, subs={x: numX}))
func_x2 = new_num2 / numH_end
derv_fx2 = expr.diff(x)
snd_derv_fx2 = derv_fx2.diff(x)
approx_err2 = func_x2 -( derv_fx2.evalf(5, subs={x: numX}) )
c_2 = number / 2
predic_err2 = (numH_end * (snd_derv_fx2.evalf(5, subs={x: c_2}))) / 2
lb1.insert(inc, func_x2)
lb2.insert(inc, approx_err2)
lb3.insert(inc, predic_err2)
Label(diffMFrame, text="Approximation Values:").grid(row=0, column=0, pady=(0,10))
Label(diffMFrame, text="Approximation Error:").grid(row=0, column=1, pady=(0,10))
Label(diffMFrame, text="Predicted Approximation Error:").grid(row=0, column=2, pady=(0,10))
lb1.grid(row=1, column=0)
lb2.grid(row=1, column=1)
lb3.grid(row=1,column=2)
else:
#numh > numh_end
print 'ERROR: Initial h is bigger than ending h'
sys.exit()
def ADmultip(num ,x1, y1, x2, y2, exp):
if num == 1:
#Variable with just exponent, no constant: x**2
num_x = lst_x[0]
num_y = lst_y[0]
for x in range(0, exp-1):
#(u, u')*(v,v')=(u*v, u'v + uv')
num_y = (num_y * lst_x[0]) + (num_x * lst_y[0])
num_x = num_x * lst_x[0]
print 'num y = ', num_y
print 'num x = ', num_x
lst_y[y1] = num_y
lst_x[x1] = num_x
elif num == 2:
num_x = lst_x[0]
num_y = lst_y[0]
for x in range(0, exp):
#(u, u')*(v,v')=(u*v, u'v + uv')
num_y = (num_y * lst_x[y1]) + (num_x * lst_y[y1])
num_x = num_x * lst_x[x1]
print 'num y = ', num_y
print 'num x = ', num_x
lst_y[y1] = num_y
lst_x[x1] = num_x
else:
num_x = lst_x[0]
num_y = lst_y[0]
for x in range(0, exp-1):
#(u, u')*(v,v')=(u*v, u'v + uv')
num_y = (num_y * lst_x[0]) + (num_x * lst_y[0])
num_x = num_x * lst_x[0]
print 'num y = ', num_y
print 'num x = ', num_x
lst_y[y1] = num_y
lst_x[x1] = num_x
lst_y[y1-1] = (lst_y[y1-1] * lst_x[x1]) + (lst_x[x1-1] * lst_y[y1])
lst_x[x1-1] = lst_x[x1-1] * lst_x[x1]
lst_y[y1] = 0
lst_x[x1] = 0
def autoD(sf, x1):
x, y = symbols("x y")
#sympify converts the string to a sympy expr
try:
#create a sympy expression if input is valid
express = sympify(sf)
except ValueError:
#exit program if input function is not valid
print 'Inputted function is not valid'
sys.exit()
#no h input from user
if len(sf) == 0:
print 'input neededed in function entry field'
sys.exit()
#No x input from the user
if len(x1.get()) == 0:
print 'input neededed for x'
#Check if the input value is an int
if checkInt(x1.get()):
X = int(x1.get())
else:
X = float(x1.get())
expected_x = express.evalf(5, subs={x: X})
deriv_fx = express.diff(x)
expected_y = deriv_fx.evalf(5, subs={x: X})
exp_str = "Expected pair is (" + str(expected_x) + ", " + str(expected_y) + ")"
lengt = 0
lst_x[lengt] = int(X)
lst_y[lengt] = 1
lengt += 1
incr = 0
while(incr != len(sf) or incr > len(sf)):
# print 'size of sf: ', len(sf), ' size of incr: ', incr
if incr == len(sf) or incr > len(sf):
break
if sf[incr] == '+' or sf[incr] == '-':
print '3'
lengt += 1
incr += 1
if sf[incr] == '1' or sf[incr] == '2' or sf[incr] == '3' or sf[incr] == '4' or sf[incr] == '5' or sf[incr] == '6' or sf[incr] == '7' or sf[incr] == '8' or sf[incr] == '9' or sf[incr] == '0':
#Scenario: just a constant 1, ... 9
print '1'
if sf[incr + 1] == '+' or sf[incr + 1] == '-':
lst_x[lengt] = int(sf[incr])
lst_y[lengt] = 0
lengt += 1
incr+=1 #move next operator
if incr == len(sf) or incr > len(sf):
break
else:
incr+=1 #move next operator
#Scenario: constant * x -> 9*x
elif sf[incr + 1] == '*' and sf[incr+2] == 'x':
print '2'
lst_x[lengt] = int(sf[incr])
lst_y[lengt] = 0
print
incr+=1 #move to '*'
incr+=1 #move to 'x'
incr+=1
if incr == len(sf) or incr > len(sf):
ADmultip(2 ,lengt, lengt, 0, 0, 1)
break
elif sf[incr] != '*':
ADmultip(2 ,lengt, lengt, 0, 0, 1)
lengt += 1
else:
lengt += 1
#Scenario: 9*x**2
if sf[incr + 1] == '*':
incr+=1 #move to '*'
print sf[incr + 1]
print lengt
ADmultip(3, lengt, lengt, 0, 0, int(sf[incr + 1]))
incr+=1 #move to exponent
if incr == len(sf) or incr > len(sf):
break
else:
incr += 1 #move next operator
#Scenario: 9*x
elif sf[incr + 1] == '+' or sf[incr + 1] == '-':
ADmultip(2 ,lengt, lengt, 0, 0, 1)
incr+=1
else:
print 'ERROR: invalid format. Please read the instructions for Automatic Differentiation'
#check if the first character is a variable
elif sf[incr] == 'x' or sf[incr] == 'X':
if sf[incr + 1] == '*' and sf[incr + 2] == '*':
ADmultip(1, lengt, lengt, 0, 0, int(sf[incr + 3]))
lengt += 1
incr+=1 #move to '*'
incr+=1 #move to '*'
incr+=1 #move to exp
incr+=1
#its just a variable x without constant or exponent
elif sf[incr + 1] != '*':
lst_x[lengt] = lst_x[0]
lst_y[lengt] = lst_y[0]
else:
print 'ERROR: Function format is not valid HERE'
sys.exit()
for x in range(0, lengt):
print 'x[', x, '] = ' ,lst_x[x]
print 'y[', x, '] = ' ,lst_y[x]
if lengt >= 2:
while(lengt != 1):
lst_x[lengt - 1] = lst_x[lengt - 1] + lst_x[lengt]
lst_y[lengt - 1] = lst_y[lengt - 1] + lst_y[lengt]
lengt -= 1
pop = Toplevel()
pop.title("Automatic Differentiation")
diffMFrame = ttk.Frame(pop, padding="15 20 15 20")
diffMFrame.grid(column=0, row=0)
diffMFrame.columnconfigure(0, weight=1)
diffMFrame.rowconfigure(0, weight=1)
output_AD = "Automatic Differentiation output: (" + str(lst_x[1]) + ", " + str(lst_y[1]) + ")"
Label(diffMFrame, text=exp_str).grid(row=0, column=0, pady=(0,10))
Label(diffMFrame, text=output_AD).grid(row=1, column=0, pady=(0,10))
def autoDiffM():
Sfunc = StringVar()
Sx = StringVar()
#Create new pop up window
top = Toplevel()
top.title("Difference Method")
diffMFrame = ttk.Frame(top, padding="15 20 15 20")
diffMFrame.grid(column=0, row=0)
diffMFrame.columnconfigure(0, weight=1)
diffMFrame.rowconfigure(0, weight=1)
#create label
Label(diffMFrame, text="Function").grid(row=0, column=0, pady=(0,10))
Label(diffMFrame, text="x").grid(row=1, column=0, pady=(0,10))
#Input field for function
Entry(diffMFrame, textvariable=Sfunc).grid(row=0, column=1, pady=(0, 10))
Entry(diffMFrame, textvariable=Sx).grid(row=1, column=1, pady=(0, 10))
buttn_two_frwrd = ttk.Button(diffMFrame, text="Forward Automatic Differentiation", command = lambda: autoD(Sfunc.get(), Sx)).grid(row=2, column=0, pady=(0, 10))
root = Tk()
root.title("Differentiation")
mainframe = ttk.Frame(root, padding="15 20 15 20")
mainframe.grid(column=0, row=0)
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
#Buttons
ttk.Button (mainframe, text="Difference Method/Extrapolation", command = differenceM).grid(row=0, pady=(0, 10))
ttk.Button (mainframe, text="Automatic Differentiation", command = autoDiffM).grid(row=1, pady=(0, 0))
root.mainloop()
|
[
"noreply@github.com"
] |
billadona.noreply@github.com
|
f051ec7fbe8099906730789c40ac2e996ce061eb
|
587dbdf730b6cc3e693efc5dca5d83d1dd35ee1a
|
/leetcode/LCP173+/5367.py
|
c7ba9cc17df3d16b763f7abcf1b9b8e3361d5e9a
|
[] |
no_license
|
Rivarrl/leetcode_python
|
8db2a15646d68e4d84ab263d8c3b6e38d8e3ea99
|
dbe8eb449e5b112a71bc1cd4eabfd138304de4a3
|
refs/heads/master
| 2021-06-17T15:21:28.321280
| 2021-03-11T07:28:19
| 2021-03-11T07:28:19
| 179,452,345
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
# -*- coding: utf-8 -*-
# ======================================
# @File : 5367.py
# @Time : 2020/3/22 10:59
# @Author : Rivarrl
# ======================================
from algorithm_utils import *
class Solution:
"""
[5367. 最长快乐前缀](https://leetcode-cn.com/problems/longest-happy-prefix/)
"""
@timeit
def longestPrefix(self, s: str) -> str:
res = ''
pre = ''
suf = ''
n = len(s)
for i in range(n-1):
pre += s[i]
j = n - 1 - i
suf = s[j] + suf
if suf == pre:
res = pre
return res
@timeit
def longestPrefix2(self, s: str) -> str:
n = len(s)
nxt = [0] * (n + 1)
i, j = 0, -1
nxt[0] = -1
while i < n:
if j == -1 or s[j] == s[i]:
nxt[i+1] = j + 1
i += 1
j += 1
else:
j = nxt[j]
return s[:nxt[n]]
if __name__ == '__main__':
a = Solution()
a.longestPrefix2(s = "level")
a.longestPrefix2(s = "ababab")
a.longestPrefix2(s = "leetcodeleet")
a.longestPrefix2(s = "a")
a.longestPrefix2("aaaaa")
|
[
"1049793871@qq.com"
] |
1049793871@qq.com
|
4fd2d7568274d6b202d96aa416e1ee95e56663f3
|
8a1ab23b056886965fec2a3e4064c5ed55e22bfb
|
/домашка/CRDN_PRO_Tests/PASS1/test08_my.py
|
120650b1c4546e1dd5ed008ff40650dc94eabce8
|
[] |
no_license
|
django-group/python-itvdn
|
5d8a59f06618f993d20d2f60374c36aae4ae8ab0
|
62ef87dfac947ed4bf1f5b6b890461f56814d893
|
refs/heads/master
| 2021-01-07T11:16:37.996524
| 2020-02-14T13:53:47
| 2020-02-14T13:53:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
# Створіть функцію find_palindromes(numbers) яка на вхід отримує список цілих невід'ємних чисел
# і на вихід повертає список чисел-паліндромів знайдених у вхідному списку.
#
# Вихідний список має бути відсортований за зростанням (менші числа спочатку) і без повторень.
def is_palindrome(n):
sn = str(n)
return sn == sn[::-1]
def find_palindromes(numbers):
x_set = set([])
for x in numbers:
if is_palindrome(x):
x_set |= {x}
res_lst = list(x_set)
res_lst.sort()
return res_lst
print(find_palindromes([124, 121, 444, 21, 33, 44322, 956659, 9992, 33, 56743, 75357]))
|
[
"osm@ukr.net"
] |
osm@ukr.net
|
7636228f1a02a3085f298f32c34df79c5bf70ad4
|
acc175926a0773b3cb68f2212d055a521fab68ed
|
/main.py
|
177cc05b2ff9b7d3c1ff4a792f9e481e5aabee9a
|
[] |
no_license
|
pkshingleton/rogue
|
75772131c1a8ac02163921cb273bd6e33eeaa832
|
d1a42b50ccfff8d977c2cee31bbf9a7ef539f403
|
refs/heads/master
| 2022-12-06T11:37:30.055442
| 2020-08-24T18:53:39
| 2020-08-24T18:53:39
| 285,871,928
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,588
|
py
|
'''
The engine instantiates the window/console and main game loop.
Drawing/rendering occurs by updating the state of the console, and then printing it to the screen.
'''
#_______________________________________________________________________// MODULES
import tcod
import copy
from engine import Engine
import entity_factories
from procgen import (generate_static_dungeon, generate_random_dungeon)
#_______________________________________________________________________// FUNCTION
def main() -> None:
# Starting / default values
screen_width = 80
screen_height = 50
map_width = 80
map_height = 45 # -5 for a space between bottom of map and screen (for text area)
room_max_size = 10 # Largest tile-size a room can be
room_min_size = 6 # Smallest tile-size a room will be
max_rooms = 30 # Total rooms that can occupy a single map
max_enemies = 2 # The most monsters/enemies that can appear in a single room
# Use the root-level included font sprite sheet for characters
tileset = tcod.tileset.load_tilesheet("dejavu10x10_gs_tc.png", 32, 8, tcod.tileset.CHARMAP_TCOD)
# Instance of the 'player' entity
player = copy.deepcopy(entity_factories.player)
# Instantiate the Engine class
engine = Engine(player = player)
# Auto-generated map
engine.game_map = generate_random_dungeon(
max_rooms = max_rooms,
room_min_size = room_min_size,
room_max_size = room_max_size,
map_width = map_width,
map_height = map_height,
max_enemies = max_enemies,
engine = engine
)
# Recalculates tile visibility around the player ('explored', 'visible', or 'SHROUD')
engine.update_fov()
# Terminal/canvas: main state that gets continually updated and re-drawn.
with tcod.context.new_terminal(
screen_width,
screen_height,
tileset = tileset,
title = "Rogue",
vsync = True,
) as context:
# (Numpy array default is [y/x] - 'F' reverses the read order to [x/y] which is more conventional)
root_console = tcod.Console(screen_width, screen_height, order="F")
'''
>>> MAIN - GAME LOOP
'''
while True:
# Get context for the console, draw it to the screen, and clear it
engine.render(console=root_console, context=context)
# Await user input/event and store it
engine.event_handler.handle_events()
|
[
"phillip.shingleton@turkeyhill.com"
] |
phillip.shingleton@turkeyhill.com
|
45147d9806372db752840b1eb3d75f30f67a93e2
|
e2686fe17c4953572c767f60d79204273ac8aebe
|
/bot_test/bot4 (v301220).py
|
78e03163f6e750e460dd1dc7dae89fdbdc079e42
|
[] |
no_license
|
darkmatter999/robotics
|
a481d60a7ff7d3d8b820c85aea37a7bf4d2f2e57
|
6dda932155f8cade991eed08b22fca1fd0f0bca7
|
refs/heads/master
| 2023-03-16T06:24:50.384869
| 2021-01-29T18:56:09
| 2021-01-29T18:56:09
| 263,405,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,485
|
py
|
#************************************************************************************************************
#***************************AIML/DEEP LEARNING HYBRID VOICE CHATBOT V0.01************************************
#************************************************************************************************************
#This code has been tested on Ubuntu 20.04 and Windows 10.
#In Windows, it apparently does not work with the venv because it does not accept the pre-trained DeepSpeech model
#Furthermore, to get pyaudio to work in the Win10 environment, it is necessary to move the libdeepspeech.so file from
#lib/ to root/
#As of now, pyttsx3 does not work in Python 3.8. It has been successfully tested in Python 3.7.9, though.
#Future challenges:
#implement a true audio stream (i.e. getting rid of saving the recording first and then post-process it)
#implement AIML 2.0 instead of 1.0
#post-process conversational elements
import aiml
import deepspeech #Mozilla Speech Recognition Framework
import wave #Module for processing .wav audio
import numpy as np
import pyaudio #Audio processing library
import time #for (optional) timing
import pyttsx3 #Library for TTS
from scipy.io import wavfile
import struct
import os
import sys
import math
#initialize two empty lists for keeping track of messages and responses
response_list = []
message_list = []
#Since DeepSpeech audio recording streaming doesn't currently work, below is a naive 'fake audio streamer'.
#The function records an audio snippet via PyAudio and saves it to 'output.wav' for further processing via
#DeepSpeech and AIML
def record_question():
Threshold = 30
SHORT_NORMALIZE = (1.0/32768.0)
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
swidth = 2
TIMEOUT_LENGTH = 2
#f_name_directory = r'C:\Users\oliver'
class Recorder:
@staticmethod
def rms(frame):
count = len(frame) / swidth
format = "%dh" % (count)
shorts = struct.unpack(format, frame)
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n * n
rms = math.pow(sum_squares / count, 0.5)
return rms * 1000
def __init__(self):
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=chunk)
def record(self):
#print('Noise detected, recording beginning')
rec = []
current = time.time()
end = time.time() + TIMEOUT_LENGTH
while current <= end:
data = self.stream.read(chunk)
if self.rms(data) >= Threshold: end = time.time() + TIMEOUT_LENGTH
current = time.time()
rec.append(data)
self.write(b''.join(rec))
def write(self, recording):
#n_files = len(os.listdir(f_name_directory))
#filename = os.path.join(f_name_directory, '{}.wav'.format(n_files))
filename = 'output.wav'
wf = wave.open(filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(self.p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(recording)
wf.close()
#print('Written to file: {}'.format(filename))
#print('Returning to listening')
def listen(self):
print('Listening beginning')
listen = True
while listen == True:
input = self.stream.read(chunk)
rms_val = self.rms(input)
if rms_val > Threshold:
self.record()
listen = False
a = Recorder()
a.listen()
def record_wakeup():
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 1 #IMPORTANT: Don't play with this, channels has to be set to 1
fs = 16000 # Record at 16000 samples per second, has to be compatible with DeepSpeech
seconds = 2 #length of recording, how can this be made more flexible?
filename = "wakeup.wav"
p = pyaudio.PyAudio() # Create an interface to PortAudio
#print('Waiting for initiation')
stream = p.open(format=sample_format,
channels=channels,
rate=fs,
frames_per_buffer=chunk,
input=True)
frames = [] # Initialize array to store frames
# Store data in chunks for n seconds
for i in range(0, int(fs / chunk * seconds)):
data = stream.read(chunk)
frames.append(data)
# Stop and close the stream
stream.stop_stream()
stream.close()
# Terminate the PortAudio interface
p.terminate()
#print('Finished recording')
# Save the recorded data as a WAV file
wf = wave.open(filename, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(sample_format))
wf.setframerate(fs)
wf.writeframes(b''.join(frames))
wf.close()
#time.sleep(5) #optional, for better sync. But proven to be unnecessary at this point.
#do STT with DeepSpeech and the (very time-intensive) main trained model from Mozilla
def audio(fn):
model_filepath = 'deepspeech-0.9.3-models.tflite'
model = deepspeech.Model(model_filepath)
#scorer_filepath = 'deepspeech-0.9.3-models.scorer'
#model.enableExternalScorer(scorer_filepath)
#lm_alpha = 0.75
#lm_beta = 1.85
#model.setScorerAlphaBeta(lm_alpha, lm_beta)
filename = fn
w = wave.open(filename, 'r')
rate = w.getframerate()
frames = w.getnframes()
buffer = w.readframes(frames)
data16 = np.frombuffer(buffer, dtype=np.int16)
text = model.stt(data16)
return (text)
# Create the kernel and learn AIML files
kernel = aiml.Kernel()
kernel.learn("std-startup.xml")
kernel.respond("load aiml b")
#We toggle between 'waiting for initiation' (i.e. hotword utterance) and 'talk mode'
#In practice, the system should work more 'Alexa-like', i.e. 'always-on' and always listening for the hotword before the actual
#conversational content or question.
first_answers = ['yes', 'how can i help you', 'here for you', 'ready', 'at your service']
def initiation():
engine = pyttsx3.init()
wakeup_wait = True
while wakeup_wait:
record_wakeup() #wait for 'talk' hotword
recorded = 'wakeup.wav'
data = wavfile.read(recorded)
if data[1].max() > 2000:
message = audio('wakeup.wav')
print (message)
if message == "hello":
#make a 'silent' response so that certain AIML attributes can be loaded
#(see basic_chat.aiml script)
bot_response = kernel.respond(message)
time.sleep(1)
engine.say(first_answers[np.random.randint(5)])
engine.runAndWait()
wakeup_wait = False
conversation() #if hotword is heard, switch to open conversation mode
def conversation():
while True:
record_question()
message = audio('output.wav')
#message = message.split(' ')[:1]
#message = ' '.join(message)
print (message)
if message == "hello exit":
initiation()
elif message == "hello save":
kernel.saveBrain("bot_brain.brn")
elif message == "hello type":
message = input("Enter your message to the bot: ")
bot_response = kernel.respond(message)
print (bot_response)
elif message == "":
#engine = pyttsx3.init()
#engine.say('You said nothing')
#engine.runAndWait()
pass
else:
bot_response = kernel.respond(message)
#message_list.append(message)
#response_list.append(bot_response)
# Do something with bot_response
#print (bot_response)
#time.sleep(3)
engine = pyttsx3.init() #output the bot's reponse via pyttsx3 (TTS)
engine.say(bot_response)
engine.runAndWait()
initiation()
#conversation()
#good morning 1 1
#can you go with me 1 1
#happy birthday 0 0
#what can you teach me 1 1
#you are bad 1 0
#that is my name 1 1
#seriously 1 1
#show me a picture of 0 0
#teach me how to read 1 1
#do you smoke 1 0
#14/20 (70%) points reached in field test Oliver/Nicolina on 30/12/2020
#The first number on the right of the test expression refers to whether the system understood what Oliver said:
#1 for understood, 0 for not understood. Correspondingly, the second number refers to Nicolina's input.
#Here, we tried with multiple AIML <think> tags in one template, so predefining some attributes.
#On initiation (hotword utterance) the AIML is handled and these attributes are loaded into the system.
#Ideas:
#Overcome latency: 'Eye contact' as threshold for speaking
#Overcome signal strength/threshold issues: implement flexible ambient noise meter
#Overcome hard-coded recording duration: measure microphone level also while recording and shut
#recording off upon silence/signal strength falling below threshold
|
[
"oliverhaack@protonmail.com"
] |
oliverhaack@protonmail.com
|
85ad4c37a1aba7e2317fea6e6bf4688ae944962a
|
947bcfe8d68dc3f7ead38b58618316d67bbf998b
|
/assertpy/helpers.py
|
e5099a1a555f50a5c24e8c575cbf7653fb5d4260
|
[
"BSD-3-Clause"
] |
permissive
|
ActivisionGameScience/assertpy
|
bc37f3309468d1d2bfdb620aeb01ebd681ee740f
|
c0989de171bcf3e21dbad9415ff9d3b8f5fe78fc
|
refs/heads/master
| 2020-04-12T06:26:04.934779
| 2019-10-30T17:50:10
| 2019-10-30T17:50:10
| 222,271,895
| 21
| 1
|
BSD-3-Clause
| 2019-11-17T15:47:15
| 2019-11-17T15:47:15
| null |
UTF-8
|
Python
| false
| false
| 10,621
|
py
|
# Copyright (c) 2015-2019, Activision Publishing, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import numbers
import datetime
import collections
if sys.version_info[0] == 3:
Iterable = collections.abc.Iterable
else:
Iterable = collections.Iterable
class HelpersMixin(object):
"""Helpers mixin."""
def _fmt_items(self, i):
if len(i) == 0:
return '<>'
elif len(i) == 1 and hasattr(i, '__getitem__'):
return '<%s>' % i[0]
else:
return '<%s>' % str(i).lstrip('([').rstrip(',])')
def _fmt_args_kwargs(self, *some_args, **some_kwargs):
"""Helper to convert the given args and kwargs into a string."""
if some_args:
out_args = str(some_args).lstrip('(').rstrip(',)')
if some_kwargs:
out_kwargs = ', '.join([str(i).lstrip('(').rstrip(')').replace(', ',': ') for i in [
(k,some_kwargs[k]) for k in sorted(some_kwargs.keys())]])
if some_args and some_kwargs:
return out_args + ', ' + out_kwargs
elif some_args:
return out_args
elif some_kwargs:
return out_kwargs
else:
return ''
def _validate_between_args(self, val_type, low, high):
low_type = type(low)
high_type = type(high)
if val_type in self.NUMERIC_NON_COMPAREABLE:
raise TypeError('ordering is not defined for type <%s>' % val_type.__name__)
if val_type in self.NUMERIC_COMPAREABLE:
if low_type is not val_type:
raise TypeError('given low arg must be <%s>, but was <%s>' % (val_type.__name__, low_type.__name__))
if high_type is not val_type:
raise TypeError('given high arg must be <%s>, but was <%s>' % (val_type.__name__, low_type.__name__))
elif isinstance(self.val, numbers.Number):
if isinstance(low, numbers.Number) is False:
raise TypeError('given low arg must be numeric, but was <%s>' % low_type.__name__)
if isinstance(high, numbers.Number) is False:
raise TypeError('given high arg must be numeric, but was <%s>' % high_type.__name__)
else:
raise TypeError('ordering is not defined for type <%s>' % val_type.__name__)
if low > high:
raise ValueError('given low arg must be less than given high arg')
def _validate_close_to_args(self, val, other, tolerance):
if type(val) is complex or type(other) is complex or type(tolerance) is complex:
raise TypeError('ordering is not defined for complex numbers')
if isinstance(val, numbers.Number) is False and type(val) is not datetime.datetime:
raise TypeError('val is not numeric or datetime')
if type(val) is datetime.datetime:
if type(other) is not datetime.datetime:
raise TypeError('given arg must be datetime, but was <%s>' % type(other).__name__)
if type(tolerance) is not datetime.timedelta:
raise TypeError('given tolerance arg must be timedelta, but was <%s>' % type(tolerance).__name__)
else:
if isinstance(other, numbers.Number) is False:
raise TypeError('given arg must be numeric')
if isinstance(tolerance, numbers.Number) is False:
raise TypeError('given tolerance arg must be numeric')
if tolerance < 0:
raise ValueError('given tolerance arg must be positive')
def _check_dict_like(self, d, check_keys=True, check_values=True, check_getitem=True, name='val', return_as_bool=False):
if not isinstance(d, Iterable):
if return_as_bool:
return False
else:
raise TypeError('%s <%s> is not dict-like: not iterable' % (name, type(d).__name__))
if check_keys:
if not hasattr(d, 'keys') or not callable(getattr(d, 'keys')):
if return_as_bool:
return False
else:
raise TypeError('%s <%s> is not dict-like: missing keys()' % (name, type(d).__name__))
if check_values:
if not hasattr(d, 'values') or not callable(getattr(d, 'values')):
if return_as_bool:
return False
else:
raise TypeError('%s <%s> is not dict-like: missing values()' % (name, type(d).__name__))
if check_getitem:
if not hasattr(d, '__getitem__'):
if return_as_bool:
return False
else:
raise TypeError('%s <%s> is not dict-like: missing [] accessor' % (name, type(d).__name__))
if return_as_bool:
return True
def _check_iterable(self, l, check_getitem=True, name='val'):
if not isinstance(l, Iterable):
raise TypeError('%s <%s> is not iterable' % (name, type(l).__name__))
if check_getitem:
if not hasattr(l, '__getitem__'):
raise TypeError('%s <%s> does not have [] accessor' % (name, type(l).__name__))
def _dict_not_equal(self, val, other, ignore=None, include=None):
if ignore or include:
ignores = self._dict_ignore(ignore)
includes = self._dict_include(include)
# guarantee include keys are in val
if include:
missing = []
for i in includes:
if i not in val:
missing.append(i)
if missing:
self._err('Expected <%s> to include key%s %s, but did not include key%s %s.' % (
val,
'' if len(includes) == 1 else 's',
self._fmt_items(['.'.join([str(s) for s in i]) if type(i) is tuple else i for i in includes]),
'' if len(missing) == 1 else 's',
self._fmt_items(missing)))
if ignore and include:
k1 = set([k for k in val if k not in ignores and k in includes])
elif ignore:
k1 = set([k for k in val if k not in ignores])
else: # include
k1 = set([k for k in val if k in includes])
if ignore and include:
k2 = set([k for k in other if k not in ignores and k in includes])
elif ignore:
k2 = set([k for k in other if k not in ignores])
else: # include
k2 = set([k for k in other if k in includes])
if k1 != k2:
return True
else:
for k in k1:
if self._check_dict_like(val[k], check_values=False, return_as_bool=True) and self._check_dict_like(other[k], check_values=False, return_as_bool=True):
return self._dict_not_equal(val[k], other[k],
ignore=[i[1:] for i in ignores if type(i) is tuple and i[0] == k] if ignore else None,
include=[i[1:] for i in self._dict_ignore(include) if type(i) is tuple and i[0] == k] if include else None)
elif val[k] != other[k]:
return True
return False
else:
return val != other
def _dict_ignore(self, ignore):
return [i[0] if type(i) is tuple and len(i) == 1 else i \
for i in (ignore if type(ignore) is list else [ignore])]
def _dict_include(self, include):
return [i[0] if type(i) is tuple else i \
for i in (include if type(include) is list else [include])]
def _dict_err(self, val, other, ignore=None, include=None):
def _dict_repr(d, other):
out = ''
ellip = False
for k,v in d.items():
if k not in other:
out += '%s%s: %s' % (', ' if len(out) > 0 else '', repr(k), repr(v))
elif v != other[k]:
out += '%s%s: %s' % (', ' if len(out) > 0 else '', repr(k),
_dict_repr(v, other[k]) if self._check_dict_like(v, check_values=False, return_as_bool=True) and self._check_dict_like(other[k], check_values=False, return_as_bool=True) else repr(v)
)
else:
ellip = True
return '{%s%s}' % ('..' if ellip and len(out) == 0 else '.., ' if ellip else '', out)
if ignore:
ignores = self._dict_ignore(ignore)
ignore_err = ' ignoring keys %s' % self._fmt_items(['.'.join([str(s) for s in i]) if type(i) is tuple else i for i in ignores])
if include:
includes = self._dict_ignore(include)
include_err = ' including keys %s' % self._fmt_items(['.'.join([str(s) for s in i]) if type(i) is tuple else i for i in includes])
self._err('Expected <%s> to be equal to <%s>%s%s, but was not.' % (
_dict_repr(val, other),
_dict_repr(other, val),
ignore_err if ignore else '',
include_err if include else ''
))
|
[
"justin@saturnboy.com"
] |
justin@saturnboy.com
|
91e5081d064f663a4c960ec5453f6aaa8847f30e
|
837b7f01e602f2c1480a9140c28fd2c58d746959
|
/setup.py
|
6aebce5b1d71a8f05edb33f22b57835301a22f2c
|
[
"Apache-2.0"
] |
permissive
|
markllama/powerusb
|
742b1f491712c507813f534cb74d7fca8ebac8f6
|
d33f0b5bfd2c175f23a3cd5cf41f6a09053b1ffb
|
refs/heads/master
| 2022-11-29T06:42:24.858781
| 2022-11-16T22:05:12
| 2022-11-16T22:05:12
| 7,856,166
| 16
| 7
| null | 2022-11-16T22:05:13
| 2013-01-27T18:36:01
|
Python
|
UTF-8
|
Python
| false
| false
| 672
|
py
|
from distutils.core import setup
setup(name="powerusb",
version="2.0.2",
description="Control PowerUSB power strips",
long_description="""
Library and CLI tools to Control PowerUSB power strips.
This version only controls Basic power strips. Watchdog, IO and Smart
features TBD.
""",
author="Mark Lamourine",
author_email="markllama@gmail.com",
license="Apache License 2.0",
url="http://github.com/markllama/powerusb",
packages=["powerusb"],
# install_requires=[
# "lxml",
# "hidapi"
# ],
scripts=["bin/powerusb"],
data_files=[("/lib/udev/rules.d", ["99-powerusb.rules"])]
)
|
[
"markllama@gmail.com"
] |
markllama@gmail.com
|
8bd065e2dff1acdf1b0b1bcbbb57fd76339f6f01
|
12f664c45e338772832ce8a65213f12ee59451f6
|
/build/vicos_ros/communication/speech_proxy/catkin_generated/pkg.develspace.context.pc.py
|
f9de8a5c0ad944da178a615a09b391c63c632bfb
|
[] |
no_license
|
Jose-Pedro/NAO-KINECT-ROS
|
89eefd3956a2d739496fb4e7199b7e523f47a2ec
|
efb01e20983788e62baac26d2aab7949729609b6
|
refs/heads/master
| 2021-01-19T04:13:42.647533
| 2016-06-22T19:46:45
| 2016-06-22T19:46:45
| 61,549,669
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "speech_proxy"
PROJECT_SPACE_DIR = "/home/jose/medeiros_ws/devel"
PROJECT_VERSION = "0.1.0"
|
[
"zepedro.medeiros@gmail.com"
] |
zepedro.medeiros@gmail.com
|
c71688ea3f918d399454150faec6a959e5a109ab
|
f737a6ddb4e498d197fe3a9f4f89fdfa865b81d8
|
/tests/test_cardinality_checks.py
|
19ebfe34d2a73b83825ca0781cccccc80e59fa01
|
[] |
no_license
|
joshsalvi/healthcareai-py
|
c9c227ae97cf6e432f11d0bb7fb3ff292e4f3865
|
1b938f34dbc2d9abab3268499986b2d043c3df6e
|
refs/heads/master
| 2022-12-18T07:30:33.199562
| 2017-10-14T02:45:48
| 2017-10-14T02:45:48
| 106,892,091
| 0
| 0
| null | 2020-09-25T02:44:19
| 2017-10-14T02:40:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,350
|
py
|
"""Test the cardinality checks module."""
import unittest
import pandas as pd
import healthcareai.common.cardinality_checks as cardinality
from healthcareai.common.healthcareai_error import HealthcareAIError
class TestCalculateCardinality(unittest.TestCase):
"""Test `calculate_cardinality()` method."""
def setUp(self):
self.df = pd.DataFrame({
'id': [1, 2, 3, 4],
'category': ['a', 'b', 'c', 'a'],
'gender': ['F', 'M', 'F', 'M'],
'age': [1, 1, 2, 3],
'boring': [1, 1, 1, 1]
})
def test_returns_dataframe(self):
self.assertIsInstance(
cardinality.calculate_cardinality(self.df),
pd.DataFrame)
def test_calculates_cardinality(self):
expected = pd.DataFrame({
'Feature Name': ['id', 'age', 'category', 'gender', 'boring'],
'unique_value_count': [4, 3, 3, 2, 1],
'unique_ratio': [1, 0.75, 0.75, 0.5, 0.25]
})
result = cardinality.calculate_cardinality(self.df)
for column in expected:
self.assertEqual(result[column].all(), expected[column].all())
class TestCardinalityThreshold(unittest.TestCase):
"""Test `cardinality_threshold_filter()` method."""
def setUp(self):
self.df = pd.DataFrame({
'id': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'category': ['a', 'b', 'c', 'd', 'a', 'b', 'c', 'd', 'a', 'b'],
'gender': ['F', 'M', 'F', 'M', 'F', 'M', 'F', 'M', 'F', 'M'],
'age': [1, 2, 3, 1, 2, 3, 1, 2, 3, 1]
})
self.cardinality = cardinality.calculate_cardinality(self.df)
def test_returns_dataframe(self):
self.assertIsInstance(
cardinality.cardinality_threshold_filter(self.cardinality,
'unique_ratio'),
pd.DataFrame)
def test_returns_with_default_threshold(self):
expected = pd.DataFrame({
'Feature Name': ['id', 'category', 'age'],
'unique_value_count': [10, 4, 3],
'unique_ratio': [1, 4 / 10, 3 / 10]
})
result = cardinality.cardinality_threshold_filter(
self.cardinality,
'unique_ratio')
for column in result:
self.assertEqual(result[column].all(), expected[column].all())
def test_raise_error_with_threshold_greater_than_one(self):
self.assertRaises(
HealthcareAIError,
cardinality.cardinality_threshold_filter,
self.cardinality,
'unique_ratio',
warning_threshold=2)
class TestZeroCardinalityFilter(unittest.TestCase):
"""Test `cardinality_threshold_filter()` method."""
def setUp(self):
self.df = pd.DataFrame({
'id': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'category': ['a', 'b', 'c', 'd', 'a', 'b', 'c', 'd', 'a', 'b'],
'gender': ['F', 'M', 'F', 'M', 'F', 'M', 'F', 'M', 'F', 'M'],
'age': [1, 2, 3, 1, 2, 3, 1, 2, 3, 1],
'boring': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
})
self.df['bad_string'] = 'yup'
self.df['bad_float'] = 3.33
self.df['bad_int'] = 1
self.df['bad_bool'] = False
self.cardinality = cardinality.calculate_cardinality(self.df)
def test_returns_dataframe(self):
self.assertIsInstance(
cardinality.cardinality_low_filter(self.cardinality),
pd.DataFrame)
def test_raises_error_on_missing_key(self):
# intentionally pass in a dataframe without `unique_value_count`
self.assertRaises(
HealthcareAIError,
cardinality.cardinality_low_filter,
self.df)
def test_returns_zero_cardinal_features(self):
expected = pd.DataFrame({
'Feature Name': ['boring', 'bad_string', 'bad_int', 'bad_float', 'bad_bool'],
'unique_value_count': [1, 1, 1, 1, 1],
'unique_ratio': [0.1, 0.1, 0.1, 0.1, 0.1]
})
result = cardinality.cardinality_low_filter(self.cardinality)
print(expected)
print(result)
for column in result:
print('checking {}'.format(column))
self.assertEqual(result[column].all(), expected[column].all())
if __name__ == '__main__':
unittest.main()
|
[
"jsalvi@rockefeller.edu"
] |
jsalvi@rockefeller.edu
|
edc722cd4a64af8de530779bdf571c8d29331bec
|
46bff46d767e16e4b78221db2d040b3346b83c18
|
/292~296-QProgressBar/296-QProgressBar-信号使用.py
|
6fa93d9491ce9741e3f9056373bf09307f1918e6
|
[] |
no_license
|
anyuhanfei/study_PyQt5
|
ef5a537795ff979838481ac2253dfe65b32a7796
|
ed9f0595abfde3b0b3ba0f5dce2acd9dc9d2ef1d
|
refs/heads/master
| 2020-12-09T04:24:21.821808
| 2020-06-08T09:52:57
| 2020-06-08T09:52:57
| 233,191,765
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
'''
296-QProgressBar-信号使用
valueChanged(int) -- 值改变时的信息
附:
QTimer
API:
start(int) -- 定时器开始, int: 定时器每次执行时间
stop() -- 定时器结束
信号:
timeout -- 执行时间到了
'''
import sys
from PyQt5.QtWidgets import QWidget, QApplication, QProgressBar
from PyQt5.QtCore import QTimer
class Window(QWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setWindowTitle('296-QProgressBar-信号使用')
self.resize(1000, 500)
self.pb = QProgressBar(self)
self.pb.move(10, 10)
self.pb.setValue(30)
'''值改变信号'''
self.pb.valueChanged.connect(lambda int: print('值改变了, %s' % (int)))
self.time = QTimer(self.pb)
self.time.timeout.connect(self._timeout)
self.time.start(1000)
def _timeout(self):
self.pb.setValue(self.pb.value() + 1)
if self.pb.value() >= self.pb.maximum():
self.time.stop()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
|
[
"1223050251@qq.com"
] |
1223050251@qq.com
|
acbd9216028a741186b02774bb5e3c7fabeb1fd9
|
109e758b97c11b30371a188e27a82d31804545cd
|
/netv2_platform.py
|
4576763902941f04ff56b890bf0eed9f51068223
|
[] |
no_license
|
mithro/netv2-soc
|
5fd948cb91446cd902dd2919f5c72bd93f6cd15b
|
fde29298e965fb5f7146bf2ea95eefa66b297083
|
refs/heads/master
| 2021-01-12T07:18:44.225944
| 2017-03-12T05:31:20
| 2017-03-12T05:31:20
| 76,943,485
| 0
| 0
| null | 2016-12-20T09:38:46
| 2016-12-20T09:38:45
| null |
UTF-8
|
Python
| false
| false
| 4,578
|
py
|
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
_io = [
("clk50", 0, Pins("R2"), IOStandard("LVCMOS33")),
("user_led", 0, Pins("U2"), IOStandard("LVCMOS33")),
("serial", 0,
Subsignal("tx", Pins("M5")),
Subsignal("rx", Pins("N6")),
IOStandard("LVCMOS33"),
),
("ddram", 0,
Subsignal("a", Pins(
"U15 M17 N18 U16 R18 P18 T18 T17",
"U17 N16 R16 N17 V17 R17"),
IOStandard("SSTL15")),
Subsignal("ba", Pins("T15 M16 P15"), IOStandard("SSTL15")),
Subsignal("ras_n", Pins("L18"), IOStandard("SSTL15")),
Subsignal("cas_n", Pins("K17"), IOStandard("SSTL15")),
Subsignal("we_n", Pins("P16"), IOStandard("SSTL15")),
Subsignal("dm", Pins("D9 B14 F14 C18"), IOStandard("SSTL15")),
Subsignal("dq", Pins(
"D11 B11 D8 C11 C8 B10 C9 A10 "
"A15 A14 E13 B12 C13 A12 D13 A13 "
"H18 G17 G16 F17 G14 E18 H16 H17 "
"C17 D16 B17 E16 C16 E17 D15 D18 "
),
IOStandard("SSTL15"),
Misc("IN_TERM=UNTUNED_SPLIT_50")),
Subsignal("dqs_p", Pins("B9 C14 G15 B16"), IOStandard("DIFF_SSTL15")),
Subsignal("dqs_n", Pins("A9 B15 F15 A17"), IOStandard("DIFF_SSTL15")),
Subsignal("clk_p", Pins("P14"), IOStandard("DIFF_SSTL15")),
Subsignal("clk_n", Pins("R15"), IOStandard("DIFF_SSTL15")),
Subsignal("cke", Pins("K15"), IOStandard("SSTL15")),
Subsignal("odt", Pins("K18"), IOStandard("SSTL15")),
Subsignal("reset_n", Pins("V16"), IOStandard("LVCMOS15")),
Subsignal("cs_n", Pins("J16"), IOStandard("SSTL15")),
Misc("SLEW=FAST"),
),
("pcie_x1", 0,
Subsignal("rst_n", Pins("N1"), IOStandard("LVCMOS33")),
Subsignal("clk_p", Pins("D6")),
Subsignal("clk_n", Pins("D5")),
Subsignal("rx_p", Pins("E4")),
Subsignal("rx_n", Pins("E3")),
Subsignal("tx_p", Pins("H2")),
Subsignal("tx_n", Pins("H1"))
),
("hdmi_in", 0,
Subsignal("clk_p", Pins("P4")),
Subsignal("clk_n", Pins("P3")),
Subsignal("data0_p", Pins("U4")),
Subsignal("data0_n", Pins("V4")),
Subsignal("data1_p", Pins("P6")),
Subsignal("data1_n", Pins("P5")),
Subsignal("data2_p", Pins("R7")),
Subsignal("data2_n", Pins("T7")),
IOStandard("TDMS_33")
),
("hdmi_out", 0,
Subsignal("clk_p", Pins("R3")),
Subsignal("clk_n", Pins("T2")),
Subsignal("data0_p", Pins("T4")),
Subsignal("data0_n", Pins("T3")),
Subsignal("data1_p", Pins("U6")),
Subsignal("data1_n", Pins("U5")),
Subsignal("data2_p", Pins("V7")),
Subsignal("data2_n", Pins("V6")),
IOStandard("TMDS_33")
),
]
class Platform(XilinxPlatform):
default_clk_name = "clk100"
default_clk_period = 20.0
def __init__(self, toolchain="vivado", programmer="vivado"):
XilinxPlatform.__init__(self, "xc7a50t-csg325-2", _io,
toolchain=toolchain)
self.add_platform_command(
"set_property CONFIG_VOLTAGE 1.5 [current_design]")
self.add_platform_command(
"set_property CFGBVS GND [current_design]")
self.add_platform_command(
"set_property BITSTREAM.CONFIG.CONFIGRATE 22 [current_design]")
self.add_platform_command(
"set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 1 [current_design]")
self.toolchain.bitstream_commands = [
"set_property CONFIG_VOLTAGE 1.5 [current_design]",
"set_property CFGBVS GND [current_design]",
"set_property BITSTREAM.CONFIG.CONFIGRATE 22 [current_design]",
"set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 1 [current_design]",
]
self.toolchain.additional_commands = \
["write_cfgmem -verbose -force -format bin -interface spix1 -size 64 "
"-loadbit \"up 0x0 {build_name}.bit\" -file {build_name}.bin"]
self.programmer = programmer
self.add_platform_command("set_property INTERNAL_VREF 0.750 [get_iobanks 35]")
def create_programmer(self):
if self.programmer == "vivado":
return VivadoProgrammer(flash_part="n25q128-3.3v-spi-x1_x2_x4")
else:
raise ValueError("{} programmer is not supported"
.format(self.programmer))
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
|
[
"florent@enjoy-digital.fr"
] |
florent@enjoy-digital.fr
|
a81370b72362413513bd007a2001288bfe11bc26
|
df515c328a64532fd01a1e93dca16986ec70f42c
|
/Python_Projects-master/fizzbuzz.py
|
6fa102cde1ecf37e8d4e3d2955181db57da88af9
|
[
"MIT"
] |
permissive
|
Nathan-Laney/Python_Projects
|
0ee0b863def7afe87a91115d26eade14da5ff457
|
3979db5171963e923701a5083b883801cdeb37d1
|
refs/heads/master
| 2022-09-24T11:22:07.822386
| 2017-10-30T23:43:29
| 2017-10-30T23:43:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
#!/usr/bin/env python3
def fizz_buzz(num):
print(num)
if num % 15 == 0:
print("FizzBuzz")
elif num % 5 == 0:
print("Buzz")
elif num % 3 == 0:
print("Fizz")
else:
print("Number is not divisible by 3 or 5 or 15")
for i in range(1, 101):
fizz_buzz(i)
|
[
"noreply@github.com"
] |
Nathan-Laney.noreply@github.com
|
c593b1cfe789eeef68b2663aec48775e4219d4ab
|
5b7f290bad4be870c5f0a5ff4f520f7faa94f5da
|
/apps/debate/migrations/0001_initial.py
|
9853272d72e012760ede173dc903c3d16784227f
|
[] |
no_license
|
PUYUP/saturn
|
14d5468ced8e0332471a35977ae5a918934a1d7e
|
8f7901be1899c553edcd51542dab824650f29bf2
|
refs/heads/master
| 2022-10-25T00:32:34.532317
| 2020-02-28T12:41:28
| 2020-02-28T12:41:28
| 230,210,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,333
|
py
|
# Generated by Django 2.2.7 on 2019-11-21 04:35
import apps.debate.utils.files
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('person', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('label', models.CharField(max_length=255)),
('slug', models.SlugField(editable=False)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('parent', models.ForeignKey(blank=True, limit_choices_to={'parent__isnull': True}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='childs', to='debate.Category')),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
'db_table': 'debate_category',
'abstract': False,
'unique_together': {('label',)},
},
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('label', models.CharField(max_length=355)),
('description', models.TextField()),
('status', models.CharField(choices=[('pending', 'Tertunda'), ('reviewed', 'Ditinjau'), ('published', 'Terbit'), ('returned', 'Dikembalikan'), ('rejected', 'Ditolak'), ('draft', 'Konsep')], default='draft', max_length=100)),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
('date_updated', models.DateTimeField(auto_now=True, null=True)),
('response_count', models.IntegerField(blank=True, editable=False, null=True)),
('vote_up_count', models.IntegerField(blank=True, editable=False, null=True)),
('vote_down_count', models.IntegerField(blank=True, editable=False, null=True)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='topics', to='debate.Category')),
('creator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='topics', to='person.Person')),
],
options={
'verbose_name': 'Topic',
'verbose_name_plural': 'Topics',
'db_table': 'debate_topic',
'abstract': False,
'unique_together': {('label', 'creator')},
},
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('label', models.CharField(blank=True, max_length=355, null=True)),
('description', models.TextField()),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
('date_updated', models.DateTimeField(auto_now=True, null=True)),
('vote_up_count', models.IntegerField(blank=True, editable=False, null=True)),
('vote_down_count', models.IntegerField(blank=True, editable=False, null=True)),
('creator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='responses', to='person.Person')),
('response_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='responses', to='debate.Response')),
('topic', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='responses', to='debate.Topic')),
],
options={
'verbose_name': 'Response',
'verbose_name_plural': 'Responses',
'db_table': 'debate_response',
'abstract': False,
},
),
migrations.CreateModel(
name='Discussed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
('date_updated', models.DateTimeField(auto_now=True, null=True)),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='discusseds', to='debate.Topic')),
],
options={
'verbose_name': 'Discussed',
'verbose_name_plural': 'Discusseds',
'db_table': 'debate_discussed',
'abstract': False,
},
),
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('value_image', models.ImageField(blank=True, max_length=500, null=True, upload_to=apps.debate.utils.files.directory_image_path)),
('value_file', models.FileField(blank=True, max_length=500, null=True, upload_to=apps.debate.utils.files.directory_file_path)),
('featured', models.BooleanField(null=True)),
('caption', models.TextField(blank=True, max_length=500, null=True)),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
('date_updated', models.DateTimeField(auto_now=True, null=True)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(limit_choices_to=models.Q(app_label='debate'), on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='attachments', to='person.Person')),
],
options={
'verbose_name': 'Attachment',
'verbose_name_plural': 'Attachments',
'db_table': 'debate_attachment',
'ordering': ('-date_updated',),
'abstract': False,
},
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vote_type', models.CharField(choices=[('U', 'Up Vote'), ('D', 'Down Vote')], max_length=1)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(limit_choices_to=models.Q(app_label='debate'), on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='votes', to='person.Person')),
],
options={
'verbose_name': 'Vote',
'verbose_name_plural': 'Votes',
'db_table': 'debate_vote',
'abstract': False,
'unique_together': {('object_id', 'creator')},
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('label', models.CharField(max_length=255)),
('slug', models.SlugField(editable=False)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(limit_choices_to=models.Q(app_label='debate'), on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tags', to='person.Person')),
],
options={
'verbose_name': 'Tag',
'verbose_name_plural': 'Tags',
'db_table': 'debate_tag',
'abstract': False,
'unique_together': {('label',)},
},
),
]
|
[
"hellopuyup@gmail.com"
] |
hellopuyup@gmail.com
|
cbc898da7428decb7c7cb1d88aaa3c116d99473e
|
30d2035626907509328494bdce8394d268b6e8b5
|
/oldjaratest/nick/clustercutting.py
|
e682f7a30820226584a51f09b81a21d39856cd1f
|
[] |
no_license
|
sjara/jaratoolbox
|
5f32b81b04002ce2ee736970701ced4a370e4ccc
|
0a4a0d2700427acf00de0b9ed66f0b64c02fdc43
|
refs/heads/master
| 2023-08-19T14:54:39.612875
| 2023-08-13T00:05:28
| 2023-08-13T00:05:28
| 21,258,341
| 3
| 4
| null | 2018-11-01T04:16:13
| 2014-06-26T23:43:44
|
Python
|
UTF-8
|
Python
| false
| false
| 13,297
|
py
|
import numpy as np
from jaratoolbox import spikesorting
import matplotlib.pyplot as plt
from scipy.spatial import Delaunay
import itertools
class ClusterCutter(object):
'''
Nick Ponvert 05-10-2015
GUI window for cutting a cluster. Accepts an N by M array of points,
where N is the number of spikes and M is the number of attributes
(i.e. peak amplitude on each wire). The window allows the user to click
to define a set of points that are used to form a convex hull,
and the cluster is cut using the hull. Multiple cuts can be performed in
any of the different dimensions. After cutting is complete, the
attribute self.inCluster will contain a boolean list which can be used to
select only the spikes that are in the cut cluster.
'''
def __init__(self, points):
#Scatter the points
self.points = points
#Figure out the dimensions of the data and how many combos there are
self.numDims=self.points.shape[1]
self.combinations=[c for c in itertools.combinations(range(self.numDims), 2)]
self.maxDim=len(self.combinations)-1
#Start with the first combination
self.dimNumber=0
#All points start inside the cluster
self.inCluster=np.ones(len(self.points), dtype=bool)
self.outsideCluster=np.logical_not(self.inCluster)
#Preserve the last cluster state for undo
self.oldInsideCluster=self.inCluster
self.oldOutsideCluster=self.outsideCluster
#Make the fig and ax, and draw the initial plot
self.fig=plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.hold(True)
self.draw_dimension(self.dimNumber)
#Start the mouse handle.r and make an attribute to hold click pos
self.mpid=self.fig.canvas.mpl_connect('button_press_event', self.on_click)
self.mouseClickData=[]
#Start the key press handler
self.kpid = self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
#show the plot
#plt.hold(True)
self.fig.show()
def on_click(self, event):
'''
Method to record mouse clicks in the mouseClickData attribute
and plot the points on the current axes
'''
self.mouseClickData.append([event.xdata, event.ydata])
self.ax.plot(event.xdata, event.ydata, 'r+')
self.fig.canvas.draw()
def on_key_press(self, event):
'''
Method to listen for keypresses and take action
'''
#Function to cut the cluster
if event.key=='c':
#Only cut the cluster if there are 3 points or more
if len(self.mouseClickData)>2:
hullArray=np.array(self.mouseClickData)
self.cut_cluster(self.points[:, self.combinations[self.dimNumber]], hullArray)
self.draw_dimension(self.dimNumber)
self.mouseClickData=[]
else:
pass
#Function to undo the last cut
if event.key=='u':
self.mouseClickData=[]
self.inCluster=self.oldInsideCluster
self.outsideCluster=self.oldOutsideCluster
self.draw_dimension(self.dimNumber)
#Functions to cycle through the dimensions
elif event.key=="<":
if self.dimNumber>0:
self.dimNumber-=1
else:
self.dimNumber=self.maxDim
self.draw_dimension(self.dimNumber)
elif event.key=='>':
if self.dimNumber<self.maxDim:
self.dimNumber+=1
else:
self.dimNumber=0
self.draw_dimension(self.dimNumber)
def draw_dimension(self, dimNumber):
'''
Method to draw the points on the axes using the current dimension number
'''
#Clear the plot and any saved mouse click data for the old dimension
self.ax.cla()
self.mouseClickData=[]
#Find the point array indices for the dimensions to be plotted
dim0 = self.combinations[self.dimNumber][0]
dim1 = self.combinations[self.dimNumber][1]
#Plot the points in the cluster in green, and points outside as light grey
self.ax.plot(self.points[:,dim0][self.inCluster], self.points[:, dim1][self.inCluster], 'g.')
self.ax.plot(self.points[:, dim0][self.outsideCluster], self.points[:,dim1][self.outsideCluster], marker='.', color='0.8', linestyle='None')
#Label the axes and draw
self.ax.set_xlabel('Dimension {}'.format(dim0))
self.ax.set_ylabel('Dimension {}'.format(dim1))
plt.title('press c to cut, u to undo last cut, < or > to switch dimensions')
self.fig.canvas.draw()
def cut_cluster(self, points, hull):
''' Method to take the current points from mouse input,
convert them to a convex hull, and then update the inCluster and
outsideCluster attributes based on the points that fall within
the hull'''
#If the hull is not already a Delaunay instance, make it one
if not isinstance(hull, Delaunay):
hull=Delaunay(hull)
#Save the old cluster for undo
self.oldInsideCluster=self.inCluster
self.oldOutsideCluster=self.outsideCluster
#Find the ponts that are inside the hull
inHull = hull.find_simplex(points)>=0
#Only take the points that are inside the hull and the cluster
#so we can cut from different angles and preserve old cuts
newInsidePoints = self.inCluster & inHull
self.inCluster = newInsidePoints
self.outsideCluster = np.logical_not(self.inCluster)
class AdvancedClusterCutter(object):
def __init__(self, samples):
self.samples = samples
self.points = spikesorting.calculate_features(self.samples, ["peak", "valley", "energy"])
self.fig = plt.figure()
self.cloudax = plt.subplot2grid((3, 4), (0, 0), rowspan=2, colspan=2)
self.wave0ax = plt.subplot2grid((3, 4), (0, 2), rowspan=1, colspan=1)
self.wave1ax = plt.subplot2grid((3, 4), (0, 3), rowspan=1, colspan=1)
self.wave2ax = plt.subplot2grid((3, 4), (1, 2), rowspan=1, colspan=1)
self.wave3ax = plt.subplot2grid((3, 4), (1, 3), rowspan=1, colspan=1)
self.wavaxes = [self.wave0ax, self.wave1ax, self.wave2ax, self.wave3ax]
self.loghistax = plt.subplot2grid((3, 4), (2, 0), rowspan=1, colspan=2)
self.timehistax = plt.subplot2grid((3, 4), (2, 2), rowspan=1, colspan=2)
self.numDims=self.points.shape[1]
self.combinations=[c for c in itertools.combinations(range(self.numDims), 2)]
self.maxDim=len(self.combinations)-1
#Start with the first combination
self.dimNumber=0
#All points start inside the cluster
self.inCluster=np.ones(len(self.points), dtype=bool)
self.outsideCluster=np.logical_not(self.inCluster)
#Preserve the last cluster state for undo
self.oldInsideCluster=self.inCluster
self.oldOutsideCluster=self.outsideCluster
#Make the fig and ax, and draw the initial plot
self.draw_dimension(self.dimNumber)
#Start the mouse handle.r and make an attribute to hold click pos
self.mpid=self.fig.canvas.mpl_connect('button_press_event', self.on_click)
self.cloudMouseClickData=[]
#Start the key press handler
self.kpid = self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
#Start the handler to monitor the axis that the mouse is over
self.axiswatcher = self.fig.canvas.mpl_connect('axes_enter_event', self.on_axis_enter)
#show the plot
plt.hold(True)
self.fig.show()
def on_axis_enter(self, event):
self.currentSelectedAxis = event.inaxes
def on_click(self, event):
'''
Method to record mouse clicks in the mouseClickData attribute
and plot the points on the current axes
'''
if self.currentSelectedAxis==self.cloudax:
self.cloudMouseClickData.append([event.xdata, event.ydata])
self.cloudax.plot(event.xdata, event.ydata, 'r+')
self.fig.canvas.draw()
def on_key_press(self, event):
'''
Method to listen for keypresses and take action
'''
#Function to cut the cluster
if event.key=='c':
#Only cut the cluster if there are 3 points or more
if len(self.cloudMouseClickData)>2:
hullArray=np.array(self.cloudMouseClickData)
self.cut_cluster(self.points[:, self.combinations[self.dimNumber]], hullArray)
self.draw_dimension(self.dimNumber)
self.cloudMouseClickData=[]
else:
pass
#Function to undo the last cut
if event.key=='u':
self.mouseClickData=[]
self.inCluster=self.oldInsideCluster
self.outsideCluster=self.oldOutsideCluster
self.draw_dimension(self.dimNumber)
#Functions to cycle through the dimensions
elif event.key=="<":
if self.dimNumber>0:
self.dimNumber-=1
else:
self.dimNumber=self.maxDim
self.draw_dimension(self.dimNumber)
elif event.key=='>':
if self.dimNumber<self.maxDim:
self.dimNumber+=1
else:
self.dimNumber=0
self.draw_dimension(self.dimNumber)
def draw_dimension(self, dimNumber):
'''
Method to draw the points on the axes using the current dimension number
'''
#Clear the plot and any saved mouse click data for the old dimension
self.cloudax.cla()
self.cloudMouseClickData=[]
#Clear the waveform plots and then plot the waveforms
ntraces=40
for indWave, waveax in enumerate(self.wavaxes):
waveax.cla()
if sum(self.inCluster)>0:
inSamples = self.samples[self.inCluster]
(nSpikesIn,nChannels,nSamplesPerSpike) = inSamples.shape
spikesToPlotIn = np.random.randint(nSpikesIn,size=ntraces)
alignedWaveformsIn = spikesorting.align_waveforms(inSamples[spikesToPlotIn,:,:])
wavesToPlotIn = alignedWaveformsIn[:, indWave, :]
for wave in wavesToPlotIn:
waveax.plot(wave, 'g', zorder=1)
if sum(self.outsideCluster)>0:
outSamples = self.samples[self.outsideCluster]
(nSpikesOut,nChannels,nSamplesPerSpike) = outSamples.shape
spikesToPlotOut = np.random.randint(nSpikesOut,size=ntraces)
alignedWaveformsOut = spikesorting.align_waveforms(outSamples[spikesToPlotOut,:,:])
wavesToPlotOut = alignedWaveformsOut[:, indWave, :]
for wave in wavesToPlotOut:
waveax.plot(wave, color='0.8', zorder=0)
# meanWaveforms = np.mean(alignedWaveforms,axis=0)
#Find the point array indices for the dimensions to be plotted
dim0 = self.combinations[self.dimNumber][0]
dim1 = self.combinations[self.dimNumber][1]
#Plot the points in the cluster in green, and points outside as light grey
self.cloudax.plot(self.points[:,dim0][self.inCluster], self.points[:, dim1][self.inCluster], 'g.', zorder=1)
self.cloudax.plot(self.points[:, dim0][self.outsideCluster], self.points[:,dim1][self.outsideCluster], marker='.', color='0.8', linestyle='None', zorder=0)
#Label the axes and draw
self.cloudax.set_xlabel('Dimension {}'.format(dim0))
self.cloudax.set_ylabel('Dimension {}'.format(dim1))
plt.suptitle('press c to cut, u to undo last cut, < or > to switch dimensions')
self.fig.canvas.draw()
def cut_cluster(self, points, hull):
''' Method to take the current points from mouse input,
convert them to a convex hull, and then update the inCluster and
outsideCluster attributes based on the points that fall within
the hull'''
#If the hull is not already a Delaunay instance, make it one
if not isinstance(hull, Delaunay):
hull=Delaunay(hull)
#Save the old cluster for undo
self.oldInsideCluster=self.inCluster
self.oldOutsideCluster=self.outsideCluster
#Find the ponts that are inside the hull
inHull = hull.find_simplex(points)>=0
#Only take the points that are inside the hull and the cluster
#so we can cut from different angles and preserve old cuts
newInsidePoints = self.inCluster & inHull
self.inCluster = newInsidePoints
self.outsideCluster = np.logical_not(self.inCluster)
if __name__=='__main__':
mean=[1, 1, 1, 1]
cov=np.random.random([4, 4])
data = np.random.multivariate_normal(mean, cov, 1000)
cw = ClusterCutter(data)
|
[
"nickponvert@gmail.com"
] |
nickponvert@gmail.com
|
48b82901dc9072791b8dd3892d0c1acf1ef7c032
|
306d770730d0dcaecad813eb0a16af7e421e32ea
|
/venv/bin/pip
|
7ecfa1f69214d19dc7a84162605919932d3631b9
|
[] |
no_license
|
Srijan0130/commandProcess
|
321049adf8807ab5f7563ec4fe13ecd66b2a2e08
|
fb89445d85fcc9b9e2dbe4dc6a3790226281147e
|
refs/heads/master
| 2023-05-10T20:08:05.433258
| 2021-05-28T06:21:58
| 2021-05-28T06:21:58
| 371,592,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
#!/Users/SrijanGurung/PycharmProjects/commandProcess/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"gurungmotey9@gmail.com"
] |
gurungmotey9@gmail.com
|
|
7646df1b30ce72974b3aef4d311bfab21c5b9b4b
|
92ae6f40627dcac10f5e391379a7febc8c6607db
|
/evaluation/eta_plot.py
|
58d2726856f763766741559819b1cc56502d9689
|
[] |
no_license
|
fkocovski/optimaltaskassignment
|
a8b910ccd43f110f5ca6cde6b35aab5910d89d3b
|
9065c8e86f50f0d014e6a6159b7be379087a17c1
|
refs/heads/master
| 2021-01-13T14:39:45.626200
| 2017-05-08T09:08:47
| 2017-05-08T09:08:47
| 79,536,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
import matplotlib.pyplot as plt
import numpy as np
def eta_plot(etas, rewards, filename,outfile=False):
avg_rewards = [np.mean(rwd) for rwd in rewards]
std_rewards = [np.std(rwd) for rwd in rewards]
plt.figure(figsize=plt.figaspect(0.25))
plt.xlabel("eta value")
plt.ylabel("average reward")
plt.grid(True)
plt.errorbar(etas, avg_rewards, yerr=std_rewards, fmt="d", capsize=5, label="SD")
plt.plot(etas, avg_rewards, label="Mean lateness")
plt.legend()
if not outfile:
plt.show()
else:
plt.savefig("{}_ETA.pdf".format(filename))
|
[
"filip.kocovski.90@gmail.com"
] |
filip.kocovski.90@gmail.com
|
53f0b12dac11908c42fbae66b0c06f22a12026fd
|
c92ea417b96b3e34d10f5eec06ced5766f8b6ff3
|
/AAL-LEX/models.py
|
e7e0465f972e4dd0b47e9018b758db6dff763630
|
[] |
no_license
|
NLPWM-WHU/AAL
|
862843ed411770adf6c4b76410f93d3636158a50
|
27ac74c678f42b4e00cf9420c8f9425624243190
|
refs/heads/master
| 2020-11-24T12:33:02.908286
| 2019-12-19T13:31:38
| 2019-12-19T13:31:38
| 228,144,417
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,752
|
py
|
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
from layers import *
import utils
class Model(object):
def __init__(self, config, embedding_matrix):
# self.word_cell = config.word_cell
self.device = config.device
self.word_output_size = config.word_output_size
self.classes = config.classes
self.aspnum = config.aspnum
self.max_grad_norm = config.max_grad_norm
self.embedding_size = config.embedding_size
self.hidden_size = config.hidden_size
self.dropout_keep_proba = config.dropout_keep_proba
self.lr = config.lr
self.seed = config.seed
# self.seed = None
self.attRandomBase = config.attRandomBase
self.biRandomBase = config.biRandomBase
self.aspRandomBase = config.aspRandomBase
self.Winit = tf.random_uniform_initializer(minval=-0.01, maxval=0.01, seed=self.seed)
# self.Winit = None
# self.Winit = tf.truncated_normal_initializer(seed=self.seed)
self.word_cell = tf.contrib.rnn.LSTMCell
# self.word_cell = tf.contrib.rnn.GRUCell
with tf.variable_scope('tcm') as scope:
self.global_step = tf.Variable(0, name='global_step', trainable=False)
if embedding_matrix is None:
self.embedding_matrix = tf.placeholder(shape=(None, None), dtype=tf.float32, name='embedding_matrix')
self.embedding_C = tf.placeholder(shape=(None, None), dtype=tf.float32, name='embedding_C')
else:
self.embedding_matrix = tf.Variable(initial_value=embedding_matrix, name='embedding_matrix', dtype=tf.float32, trainable=False)
self.context_vector = tf.Variable(tf.random_uniform(shape=[self.word_output_size * 2], minval=-1.0 * self.aspRandomBase, maxval=self.aspRandomBase, seed=self.seed),
name='attention_context_vector', dtype=tf.float32, trainable=True)
self.aspect_embedding = tf.Variable(tf.random_uniform(shape=[self.aspnum, self.embedding_size], minval=-1.0 * self.aspRandomBase, maxval=self.aspRandomBase, seed=self.seed),
name='aspect_embedding', dtype=tf.float32, trainable=True)
# self.aspect_embedding_c = tf.Variable(tf.random_uniform(shape=[self.aspnum, self.embedding_size], minval=-1.0 * self.aspRandomBase, maxval=self.aspRandomBase, seed=self.seed),
# name='aspect_embedding_c', dtype=tf.float32, trainable=True)
# self.aspect_embedding = tf.Variable(initial_value=asp_embedding_matrix, name='asp_embedding_matrix',
# dtype=tf.float32, trainable=True)
# self.context_vector = tf.Variable(tf.truncated_normal(shape=[self.word_output_size * 2]),
# name='attention_context_vector', dtype=tf.float32, trainable=True)
# self.aspect_embedding = tf.Variable(tf.truncated_normal(shape=[5, self.embedding_size]),
# name='aspect_embedding', dtype=tf.float32, trainable=True)
self.is_training = tf.placeholder(dtype=tf.bool, name='is_training')
# [document x word]
self.inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='inputs')
self.targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='targets')
self.textwm = tf.placeholder(shape=(None, None), dtype=tf.float32, name='textwordmask')
self.targetwm = tf.placeholder(shape=(None, None), dtype=tf.int32, name='targetwordmask')
self.posmask = tf.placeholder(shape=(None, None), dtype=tf.float32, name='positionmask')
self.posweight = tf.placeholder(shape=(None, None), dtype=tf.float32, name='positionwei')
self.text_word_lengths = tf.placeholder(shape=(None,), dtype=tf.int32, name='text_word_lengths')
self.target_word_lengths = tf.placeholder(shape=(None,), dtype=tf.int32, name='target_word_lengths')
self.labels = tf.placeholder(shape=(None,), dtype=tf.int32, name='labels')
self.category = tf.placeholder(shape=(None,), dtype=tf.int32, name='category')
self.aspcat = tf.placeholder(shape=(None, None), dtype=tf.int32, name='aspcat')
self.location = tf.placeholder(shape=(None,None), dtype=tf.float32, name='location')
with tf.variable_scope('embedding'):
with tf.variable_scope("word_emb"):
self.inputs_embedding = tf.nn.embedding_lookup(self.embedding_matrix, self.inputs)
# self.inputs_embedding_c = tf.nn.embedding_lookup(self.embedding_matrix_c, self.inputs)
# with tf.variable_scope("cate_emb"):
# self.cate_embedding = tf.nn.embedding_lookup(self.aspect_embedding, tf.expand_dims(self.category, -1))
(self.batch_size, self.text_word_size) = tf.unstack(tf.shape(self.inputs))
# (self.batch_size, self.target_word_size) = tf.unstack(tf.shape(self.targets))
def train(self, logits):
with tf.variable_scope('train'):
self.cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels, logits=logits)
# self.cross_entropy = tf.nn.weighted_cross_entropy_with_logits(labels=self.labels, logits=self.logits)
# regu = tf.contrib.layers.l2_regularizer(0.00001, scope=None)
# tvars = tf.trainable_variables()
# self.loss_regu = tf.contrib.layers.apply_regularization(regu, tvars)
# self.loss_cla = tf.reduce_mean(self.cross_entropy)
# self.loss = self.loss_cla + self.loss_regu
self.loss = tf.reduce_mean(self.cross_entropy)
# dif = tf.cast(self.labels, tf.float32) - self.logits_up
# self.loss_up = tf.reduce_mean(dif * dif)
# self.loss = self.loss_t + 0.1 * self.loss_up
self.accuracy = tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, self.labels, 1), tf.float32))
tvars = tf.trainable_variables()
grads, global_norm = tf.clip_by_global_norm(
tf.gradients(self.loss, tvars),
self.max_grad_norm)
tf.summary.scalar('global_grad_norm', global_norm)
opt = tf.train.AdamOptimizer(self.lr)
# opt = tf.train.GradientDescentOptimizer(self.lr)
# opt = tf.train.AdadeltaOptimizer(self.lr, rho=0.9, epsilon=1e-6)
self.train_op = opt.apply_gradients(
zip(grads, tvars), name='train_op',
global_step=self.global_step)
class AALlex(Model):
def __init__(self, config, embedding_matrix, lex, sess):
super(AALlex, self).__init__(config, embedding_matrix) #将AAL_LEX的对象转换为Model的对象
self.word_cell = tf.contrib.rnn.LSTMCell
self.nhop = config.nhop
self.hid = []
self.lex = tf.Variable(initial_value=lex, name='lex', dtype=tf.float32, trainable=False)
# 这里过滤掉了PMI无关的词,全部附上权重0,保留下里的只是PMI真正相关的词,平均后去计算loss
self.m = tf.cast(self.posmask, tf.float32)
with tf.variable_scope("cate_emb"):
self.cate_embedding = tf.nn.embedding_lookup(self.aspect_embedding, tf.expand_dims(self.category, -1))
onehot_matrix = np.eye(self.aspnum)
self.cate_onehot = tf.cast(tf.nn.embedding_lookup(onehot_matrix,tf.expand_dims(self.category, -1)),tf.float32)
with tf.device(self.device):
self.build()
# self.train(self.logits_t)
with tf.variable_scope('train'):
self.cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels, logits=self.logits_t) #sparse,label可以用互斥一维表示(仅当label互斥时好用)
#self.cross_entropy_c = tf.nn.softmax_cross_entropy_with_logits(labels=self.asplabel, logits=self.logits_c)
#self.cross_entropy_c = tf.reduce_mean(tf.square(self.asplabel-self.logits_c))
self.cross_entropy_c = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.asplabel, logits=self.logits_c),axis= -1)
#self.cross_entropy_c = tf.nn.softmax_cross_entropy_with_logits(labels=self.asplabel, logits=self.logits_c)
self.loss_senti = tf.reduce_mean(self.cross_entropy)
sum = tf.reduce_sum(self.m, axis=1, keep_dims=True) + 1e-6
weight = self.m / sum #m本来就都是0,sum只要不为0就可以了,加上很小的1e-6,避免除0情况,weight是单词的权重
lc = tf.reduce_mean(tf.multiply(self.cross_entropy_c, weight), axis=-1)#到达句级
self.loss_c = tf.reduce_mean(lc) * 10#到达batch级
self.loss = 0.7 * self.loss_senti + 0.3 * self.loss_c
# self.loss = self.loss_senti
self.accuracy = tf.reduce_mean(tf.cast(tf.nn.in_top_k(self.logits_t, self.labels, 1), tf.float32))
tvars = tf.trainable_variables()
grads, global_norm = tf.clip_by_global_norm(
tf.gradients(self.loss, tvars),
self.max_grad_norm)
tf.summary.scalar('global_grad_norm', global_norm)
opt = tf.train.AdamOptimizer(self.lr)
# opt = tf.train.AdadeltaOptimizer(self.lr, rho=0.9)
# opt = tf.train.AdagradOptimizer(self.lr)
# opt = tf.train.GradientDescentOptimizer(self.lr)
self.train_op = opt.apply_gradients(
zip(grads, tvars), name='train_op',
global_step=self.global_step)
def build(self):
with tf.variable_scope('model'):
###########################输入准备###################################
self.asplabel = tf.nn.embedding_lookup(self.lex, self.inputs)#softmax后的与category匹配的PMI词典
# cate_emb = tf.tile(self.cate_embedding, [1, self.text_word_size, 1])
# lstminputs = tf.concat([self.inputs_embedding, cate_emb], 2)
lstminputs = self.inputs_embedding
# self.memory = self.inputs_embedding
with tf.variable_scope('text') as scope:
text_rnn = BiDynamicRNNLayer(
# text_rnn = DynamicRNNLayer(
inputs=lstminputs,
cell_fn=self.word_cell, # tf.nn.rnn_cell.LSTMCell,
n_hidden=self.hidden_size/2,
sequence_length=self.text_word_lengths,
)
text_encoder_output = text_rnn.outputs
text_final = text_rnn.finalout
#######################################################################
#############################方面类别学习#############################
finalrep_asp = text_encoder_output
# finalrep_asp = layers.fully_connected(aspsenti_output, self.embedding_size, weights_initializer=self.Winit, activation_fn=None)
with tf.variable_scope('aspsentidropout'):
finalrep_asp = layers.dropout(
finalrep_asp, keep_prob=self.dropout_keep_proba,
is_training=self.is_training,
)
self.logits_c = layers.fully_connected(finalrep_asp, self.aspnum, weights_initializer=self.Winit, activation_fn=None)
# TODO: parameter 1
#self.m = self.m * 5
self.cate_onehot = tf.tile(self.cate_onehot,[1,self.text_word_size,1])
pmi_category = tf.reduce_max(tf.multiply(self.cate_onehot, tf.sigmoid(self.logits_c)), axis=-1)
pmi_masked = tf.multiply(pmi_category,self.m ) + 1
pmi_3dim = tf.tile(tf.expand_dims(pmi_masked, 2), [1, 1, self.embedding_size])
self.TEST1 = tf.sigmoid(self.logits_c)
self.TEST2 = tf.multiply(self.cate_onehot, tf.sigmoid(self.logits_c))
######################################################################
#############################情感分类学习#############################
self.memory = text_encoder_output # 双向LSTM后的隐向量作为memory输入
# TODO: parameter 2
self.memory = tf.multiply(self.memory,pmi_3dim)
self.hid.append(self.cate_embedding)
self.attprob = []
#tmp = tf.matmul(self.memory, tf.cast(self.posmask, tf.float32))
with tf.variable_scope('multihop') as scope:
cate_emb = tf.tile(self.hid[-1], [1, self.text_word_size, 1]) # b * len * d
inputs = tf.concat([self.inputs_embedding, cate_emb], 2) # b * len * 2d
# sim = tf.matmul(self.inputs_embedding, self.hid[-1])
# sim = layers.fully_connected(inputs, 1, weights_initializer=tf.random_uniform_initializer(-0.01, 0.01),
# biases_initializer=tf.random_uniform_initializer(-0.01, 0.01), activation_fn=tf.tanh, scope='att') # b * len * 1
sim = layers.fully_connected(inputs, 1, activation_fn=tf.tanh, scope='att')
# sim = tf.reduce_sum(tf.multiply(layers.fully_connected(inputs, self.embedding_size * 2, activation_fn=tf.tanh, scope='att'),
# self.context_vector), axis=2, keep_dims=True) # b * n * 1
# prob = tf.nn.softmax(sim, dim=1)
prob = tf.expand_dims(softmask(tf.squeeze(sim, [2]), self.textwm), axis=-1)#计算category和memory注意力
out = tf.matmul(tf.transpose(prob, perm=[0, 2, 1]), self.memory)
# finalout = out + layers.fully_connected(self.hid[-1], self.hidden_size,
# weights_initializer=tf.random_uniform_initializer(-0.01, 0.01, seed=self.seed),
# biases_initializer=tf.random_uniform_initializer(-0.01, 0.01, seed=self.seed),
# activation_fn=None, scope='linear')
finalout = out + self.hid[-1]
self.hid.append(finalout)
self.attprob.append(prob)
scope.reuse_variables()
for h in range(self.nhop):
cate_emb = tf.tile(self.hid[-1], [1, self.text_word_size, 1]) # b * len * d
inputs = tf.concat([self.memory, cate_emb], 2) # b * len * 2d
# sim = tf.matmul(self.inputs_embedding, self.hid[-1])
# sim = layers.fully_connected(inputs, 1,
# weights_initializer=tf.random_uniform_initializer(-0.01, 0.01),
# biases_initializer=tf.random_uniform_initializer(-0.01, 0.01),
# activation_fn=tf.tanh, scope='att') # b * len * 1
sim = layers.fully_connected(inputs, 1, activation_fn=tf.tanh, scope='att')
# sim = tf.reduce_sum(tf.multiply(
# layers.fully_connected(inputs, self.embedding_size * 2, activation_fn=tf.tanh, scope='att'),
# self.context_vector), axis=2, keep_dims=True) # b * n * 1
# prob = tf.nn.softmax(sim, dim=1)
prob = tf.expand_dims(softmask(tf.squeeze(sim, [2]), self.textwm), axis=-1)
out = tf.matmul(tf.transpose(prob, perm=[0, 2, 1]), self.memory)
#print('out', out.shape)
# finalout = out + layers.fully_connected(self.hid[-1], self.hidden_size,
# weights_initializer=tf.random_uniform_initializer(-0.01, 0.01, seed=self.seed),
# biases_initializer=tf.random_uniform_initializer(-0.01, 0.01, seed=self.seed),
# activation_fn=None, scope='linear')
finalout = out + self.hid[-1]
self.hid.append(finalout)
self.attprob.append(prob)
finalrep = tf.squeeze(self.hid[-1], [1])
with tf.variable_scope('dropout'):
finalrep = layers.dropout(
finalrep, keep_prob=self.dropout_keep_proba,
is_training=self.is_training,
)
# rep = layers.fully_connected(finaloutput, self.hidden_size, activation_fn=tf.tanh)
# rep = layers.dropout(rep, keep_prob=self.dropout_keep_proba,is_training=self.is_training)
self.logits_t = layers.fully_connected(finalrep, self.classes, weights_initializer=self.Winit, activation_fn=None)
# self.logits_c = tf.matmul(layers.fully_connected(finalrep_asp, self.hidden_size, weights_initializer=self.Winit, activation_fn=None, biases_initializer=None), self.aspect_embedding, transpose_b=True)
self.prediction = tf.argmax(self.logits_t, axis=-1)
######################################################################
def get_feed_data(self, x, c, y=None, a=None, e=None, class_weights=None, is_training=True):
x_m, x_sizes, xwordm, pm = utils.batch_lexmask(x, a)
#print('pm',pm)
fd = {
self.inputs: x_m,
self.text_word_lengths: x_sizes,
self.textwm: xwordm,
self.category: c,
self.posmask: pm
}
if y is not None:
fd[self.labels] = y
if e is not None:
fd[self.embedding_matrix] = e
fd[self.is_training] = is_training
return fd
|
[
"zhchen18@whu.edu.cn"
] |
zhchen18@whu.edu.cn
|
9bb1a0e2edacde679cd2d9528a5d28eb2ac1f4ae
|
fffcf18c3d3d05831a67f2bb66a547bde523a952
|
/busqueda/apps.py
|
428999126b84f10f00492d243bd4e68603b8c53d
|
[] |
no_license
|
DanpezPlay/buscarProfe
|
757527a7a238014cb39fe5ab9afa23b5a42fa8a3
|
2f483c5ba82b28ce8e227ba65ed496787fca397d
|
refs/heads/master
| 2022-11-08T08:40:14.535085
| 2018-10-26T18:24:00
| 2018-10-26T18:24:00
| 154,871,752
| 0
| 1
| null | 2022-10-21T02:27:42
| 2018-10-26T17:41:28
|
CSS
|
UTF-8
|
Python
| false
| false
| 156
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class BusquedaConfig(AppConfig):
name = 'busqueda'
|
[
"danpezplay@gmail.com"
] |
danpezplay@gmail.com
|
5fd1c667581a665d6c69db78e81b76c54afb0056
|
d7b7dbad9dea308b95f98c7f2323866423b1cf7a
|
/todoList/settings.py
|
b977c6266ef330bfcf9d6ff0640b4fd9b10a9ac0
|
[] |
no_license
|
Ishan-Ajmera/ToDo-App
|
f8050045d70783427faaf2f2e07587a612017842
|
fd9a198a11699b604b5426bbbd66be7bab73d851
|
refs/heads/master
| 2022-12-28T06:30:54.838294
| 2020-10-10T06:20:16
| 2020-10-10T06:20:16
| 302,829,314
| 0
| 1
| null | 2020-10-10T10:11:20
| 2020-10-10T06:14:17
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,195
|
py
|
"""
Django settings for todoList project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'clfz-h^@)-3-=q_7608h8x@$t#fox_$7z=#03^rc_d9#p(b=fh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'home',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todoList.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todoList.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
|
[
"ajmeraishan11@gmail.com"
] |
ajmeraishan11@gmail.com
|
46e01ba801591f9f0b272f2d88e28ca395d4fa93
|
3ff97503dfe1237dc4ec47a10ac9d0cb50d82df0
|
/DATA/Show.py
|
aac0e244a01e33fa5722aa4bf01dcc63d7d091ae
|
[
"MIT"
] |
permissive
|
IewNixIl/graduation_project_under
|
c420f6ff1d4268276f2d716393eab3ab103e0871
|
67d0345208511bb06c35c3453227b2fa4ebef4a3
|
refs/heads/master
| 2022-11-18T10:19:42.045403
| 2020-07-14T11:21:05
| 2020-07-14T11:21:05
| 279,558,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,786
|
py
|
import numpy
from matplotlib import pyplot
import gdal
from skimage import io,exposure
from skimage.segmentation import slic,mark_boundaries
import os
from PIL import Image
import shelve
import sys
sys.path.append('..')
from Config import config
def seg(path,n_segments=500, compactness=20):
i=io.imread(path)[:,:,[3,2,1,7]]
img=i[:,:,:3]
img=(img-img.min())/(img.max()-img.min())
img=img*255
img=img.astype(numpy.uint8)
img=exposure.adjust_gamma(img,0.5)
return segment,out,img,wdi
def getname(path,namelist):
if namelist[0]==0:
season='ROIs1158_spring'
elif namelist[0]==1:
season='ROIs1868_summer'
elif namelist[0]==2:
season='ROIs1970_fall'
elif namelist[0]==3:
season='ROIs2017_winter'
path_s2=path+'\\'+season+'\\s2_'+str(namelist[1])+'\\'+season+'_s2_'+str(namelist[1])+'_p'+str(namelist[2])+'.tif'
return path_s2
def transform(name):
if 'spring' in name:
season=0
elif 'summer' in name:
season=1
elif 'fall' in name:
season=2
elif 'winter' in name:
season=3
l=[]
l.append(season)
l.append(int(name.split('_')[3]))
l.append(int(name.split('_')[4].split('.')[0][1:]))
return l
class UI:
def __init__(self,mode='normal',init=0):
'''mode = normal 正常
mode=review 仅仅显示已经标记的
'''
self.mode=mode
self.path_merge='D:\\Codes\\test_dataset\\1ite\\predict_1_merge'
self.path_mask='D:\\Codes\\test_dataset\\1ite\\predict_1_mask'
self.path_label='D:\\Codes\\test_dataset\\label'
self.path_random='D:\\Codes\\test_dataset\\1ite\\predict_1_random'
self.path_smooth='D:\\Codes\\test_dataset\\1ite\\predict_1_merge_smooth_5'
self.path_predict1='D:\\Codes\\test_dataset\\1ite\\predict_1_model1'
self.path_predict2='D:\\Codes\\test_dataset\\1ite\\predict_1_model2'
self.path_predict3='D:\\Codes\\test_dataset\\1ite\\predict_1_model3'
self.n=init
self.l=os.listdir(self.path_label)
fig=pyplot.figure()
fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id)
fig.canvas.mpl_connect('key_press_event',self.on_key_press)
self.fig=fig
self.ax1=fig.add_subplot(3,1,1)
self.ax2=fig.add_subplot(3,3,4)
self.ax3=fig.add_subplot(3,3,5)
self.ax4=fig.add_subplot(3,3,6)
self.ax5=fig.add_subplot(3,1,3)
#self.ax4=fig.add_subplot(2,2,4)
pyplot.get_current_fig_manager().window.state('zoomed')
#self.ax2=fig.add_subplot(1,2,2)
self.draw()
pyplot.show()
def on_key_press(self,event):
if event.key=='a' or event.key=='left':
self.n-=1
print(self.n)
self.draw()
if event.key=='d' or event.key=='right':
if self.n+1>=len(self.l):
return
self.n+=1
self.draw()
def draw(self):
print(self.l[self.n])
self.label=io.imread(self.path_label+'\\'+self.l[self.n])
name=self.l[self.n].split('_')
name[2]='predict'
name='_'.join(name)
#self.predict_downsample=io.imread(self.path_predict_down+'\\'+name)
self.merge=io.imread(self.path_merge+'\\'+name)
self.mask=io.imread(self.path_mask+'\\'+name)
self.random=io.imread(self.path_random+'\\'+name)
self.smooth=io.imread(self.path_smooth+'\\'+name)
self.predict1=io.imread(self.path_predict1+'\\'+name)
self.predict2=io.imread(self.path_predict2+'\\'+name)
self.predict3=io.imread(self.path_predict3+'\\'+name)
self.ax1.cla()
self.ax2.cla()
self.ax3.cla()
self.ax4.cla()
self.ax5.cla()
#self.ax1.imshow(img)
self.ax1.imshow(self.label,cmap='gray')
self.ax2.imshow(self.predict1,cmap='gray')
self.ax3.imshow(self.predict2,cmap='gray')
self.ax4.imshow(self.predict3,cmap='gray')
self.ax5.imshow(self.merge,cmap='gray')
self.fig.canvas.draw_idle()
def statistic():
d=os.listdir(config.path_labels)
n=numpy.array([0,0,0,0])
for i in d:
if 'spring' in i:
n[0]=n[0]+1
if 'summer' in i:
n[1]=n[1]+1
if 'fall' in i:
n[2]=n[2]+1
if 'winter' in i:
n[3]=n[3]+1
print(n)
n=n/len(d)
print(n)
if __name__=='__main__':
test=UI(mode='normal',init=0)
#statistic()
|
[
"Xinwei.lee@outlook.com"
] |
Xinwei.lee@outlook.com
|
a1e6871ac1735ed0e31f4105725d8a59055ad1c0
|
d8233a120f46fd07e604cfcf0fd05506e8b4a1ec
|
/MULAN_universal_lesion_analysis/maskrcnn/data/datasets/__init__.py
|
de1af851bd040149f37212bf1368edf77a1a977a
|
[
"MIT"
] |
permissive
|
rsummers11/CADLab
|
aeb4e8b66563c33d2fae9a73a3f35da647c1b2eb
|
78766a3609a16d6ac8e1d22344f6bebef509aef7
|
refs/heads/master
| 2023-07-04T23:30:49.791094
| 2023-06-22T16:47:28
| 2023-06-22T16:47:28
| 23,888,481
| 441
| 222
| null | 2022-02-19T15:42:24
| 2014-09-10T19:16:19
|
C++
|
UTF-8
|
Python
| false
| false
| 274
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# from .coco import COCODataset
# from .voc import PascalVOCDataset
from .concat_dataset import ConcatDataset
from .DeepLesion import DeepLesionDataset
__all__ = ["ConcatDataset", "DeepLesionDataset"]
|
[
"kyan@panacea-ai.org"
] |
kyan@panacea-ai.org
|
e32ca7681e462bb5e09bbeb2e68e921a0d47ea4b
|
220a39b2912522238aa07932d84f750cdab5d862
|
/belt_exam/urls.py
|
cb1db37e2314bc903afa7704fa1cfefa2e53b087
|
[] |
no_license
|
mikeybags/python_belt
|
a513b1fcedd5e8674458c11d064effade45f02bb
|
133cba06b3f1b23ff792ded97f877a7858758e89
|
refs/heads/master
| 2021-01-11T14:39:14.408110
| 2017-06-13T23:47:51
| 2017-06-13T23:47:51
| 80,185,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
"""belt_exam URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('apps.login_app.urls', namespace="login")),
url(r'^appointments/', include('apps.appointments_app.urls', namespace = 'appointments')),
]
|
[
"mikeybags@gmail.com"
] |
mikeybags@gmail.com
|
a3e6259da92de0d8798d61930112881a899a46af
|
5371248dbb75db605ea1ca424daa11b5e86597f0
|
/auto_tester/spring_festival/spring_plan.py
|
ca4e5c3fe0c828370197ca4a0446d4c330da5424
|
[] |
no_license
|
Paladin1412/Python-Learning
|
7312b4134ae0dcff63acb85d9a1b6563e4bbfe27
|
06f0aa04456e104daffa425353b4c550759285e1
|
refs/heads/master
| 2022-10-15T17:08:35.802868
| 2020-06-13T04:02:23
| 2020-06-13T04:02:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,183
|
py
|
from normal_modules.normal import NormalTester
#taskid:
#签到:216,sid=10662
#投币:217
class SpringPlan(NormalTester):
def __init__(self, sess_cookies: dict):
super(SpringPlan, self).__init__(sess_cookies)
def sp_get_url(self):
print(self.get_url_json(self.url))
def sp_post_url(self):
print(self.post_url_json(url=self.url, data=self.data))
def t57(self, sid):
# 57.获取任务列表
self.url = "http://api.bilibili.com/x/activity/task/list?sid=" + str(sid)
self.sp_get_url()
def t60(self, csrf, taskId):
# 60.领取任务奖励(投币)
self.url = "http://api.bilibili.com/x/activity/task/award"
self.data = {
"csrf": csrf,
"task_id": taskId
}
self.sp_post_url()
def t72(self, sid, taskId, token=None):
# 72. 活动特殊任务完成任务接口
self.url = "http://api.bilibili.com/x/activity/single/task/do"
self.data = {
"sid": sid,
"task_id": taskId,
"token": token # Not Necessary
}
self.sp_post_url()
def t93(self, sid, tid):
# 93.精选榜单(运营数据源)
self.url = "http://api.bilibili.com/x/activity/single/arc/list?sid=" + str(sid) + "&tid=" + str(tid)
self.sp_get_url()
def t96(self, sid):
# 96.邀请类型活动/任务下发token(分享前调用)
self.url = "http://uat-api.bilibili.co/x/activity/likeact/token?sid=" + str(sid)
self.sp_get_url()
def t97(self, sid, token):
# 97.邀请好友注册后聚合接口
self.data = {
"sid": sid,
"token": token
}
self.url = "http://api.bilibili.com/x/activity/single/task/token/do"
self.sp_post_url()
def t98(self, sid):
# 98.集卡活动集齐套数接口
self.url = "http://api.bilibili.com/x/activity/single/card/num?sid=" + str(sid)
self.sp_get_url()
def t100(self, sid, token):
# 100.邀请好友注册前检查任务情况
self.url = "http://api.bilibili.com/x/activity/task/check?sid=" + str(sid) + "&token=" + token
self.sp_post_url()
def t_get_card(self,csrf):
self.url="https://uat-api.bilibili.com/x/activity/lottery/do"
self.data={
"sid":"2d82d70e-25f9-11ea-bfa0-246e9693a590",
"type":1,
"csrf":csrf
}
self.sp_post_url()
def extra_test(self):
self.get_url("http://www.bilibili.com")
self.url="http://api.bilibili.com/x/activity/likes/add/other"
self.data={
"sid":10691,
"csrf":"23d391b78f1f16e7bad365298e3139b0"
}
self.sp_post_url()
if __name__ == "__main__":
cook = {
"buvid3":"6099ADA3-B383-435A-89CC-886533BE9E0F6103infoc",
"_uuid":"C0AEFB9C-4BD8-7198-F241-0B0FBDA3C4F876004infoc",
"fts":"1578020592",
"LIVE_BUVID":"AUTO7415780206028841",
"sid":"kiqkxs9h",
"bp_t_offset_13090258":"341349861655486121",
"CURRENT_FNVAL":"16",
"stardustvideo":"1",
"rpdid":"|(RY|lkJk|R0J'ul~)J~Ru|R",
"laboratory":"1-1",
"bp_t_offset_15555180":"341947824181624853",
"innersign":"1",
"bp_t_offset_2232":"344542233538609933",
"im_notify_type_27515430":"0",
"DedeUserID":"1461342",
"DedeUserID__ckMd5":"ca9659b1002c264d",
"SESSDATA":"fecabdea%2C1581663167%2C37188411",
"bili_jct":"23d391b78f1f16e7bad365298e3139b0",
"stardustpgcv":"0606",
"CURRENT_QUALITY":"80"
}
sids=[10662,10664]
sp = SpringPlan(sess_cookies=cook)
# sp.t57(10691)
# sp.extra_test()
# [sp.t57(i) for i in sids]
# sp.t93(62,372)
# sp.t98("2d82d70e-25f9-11ea-bfa0-246e9693a590")
#
# sp.t_get_card("a03f389e0d0f0427ccddb6a3c6ee4f7b")
# sp.t60(csrf="2e82d292d090f396494ea575692cf8d2",taskId=217)
# sp.t60()
# sp.t_get_card("2e82d292d090f396494ea575692cf8d2")
|
[
"354307169@qq.com"
] |
354307169@qq.com
|
64f2316305b4e8cedb9489d57e1a510ae5adf081
|
3d56b9d7c62e672a217eaedbc5c9cce9cf67658c
|
/Python3/Manipulacao de Strings/regex.py
|
b313073f550c2b20952ccfef4ff489a2079454ed
|
[] |
no_license
|
trypedro/Python
|
79088b5650856e6bd9754db702e2fc8ad6b33ed9
|
e5ae35603ad4173a5cc68aeef37541063fc7fda9
|
refs/heads/master
| 2022-03-27T16:57:42.312561
| 2019-12-20T03:04:48
| 2019-12-20T03:04:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
import re
email_um = "Meu numero e 1234-1234"
email_dois = "Fala comigo em 1234-1234 esse e o meu telefone"
email_tres = "1234-1234 e o meu celular"
email_quatro = "lalala 981231209 lalala lalala lal alalala 93425-0912 lalalalala 1234-1234"
padrao = "[0-9]{4,5}[-]*[0-9]{4}"
retorno = re.findall(padrao, email_quatro)
print(retorno)
|
[
"pedroivodefaria@gmail.com"
] |
pedroivodefaria@gmail.com
|
fd5a1c3a206b881819e9eb364e0c0132374501a8
|
e1ea36f7f1928c4ffb649ff0b4dbffc3182622b1
|
/app/core/migrations/0002_tag.py
|
5e79c9d00d0604b45574a3ec5d4783c1b6f4278e
|
[
"MIT"
] |
permissive
|
elieish/recipe-api-django
|
e2455bbaa7d9095c118dc25c79dbd9e874d8f76c
|
19e3dff1111b2e570925c7b263363994258eaf22
|
refs/heads/master
| 2022-11-08T06:04:57.063018
| 2020-06-27T06:27:54
| 2020-06-27T06:27:54
| 262,095,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
# Generated by Django 3.0.6 on 2020-05-18 06:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"elie@mukuru.com"
] |
elie@mukuru.com
|
3dc6f0ca7b4a8948a0035567cd9096524e1bb2e2
|
e2e0934aa9402f1742d0c742d618cda3e0186db1
|
/venv/Lib/site-packages/google/cloud/aiplatform_v1/__init__.py
|
d765cc599dc7446aca5028cb106f15266c63984f
|
[] |
no_license
|
rajmadhu0406/FlaskMarket
|
5a449c712050864942a2f8b82615880364d13ab7
|
9531c1893a814992c20428abf16d36e4bde9bbef
|
refs/heads/master
| 2023-08-26T10:46:51.755799
| 2021-11-13T16:49:04
| 2021-11-13T16:49:04
| 427,720,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,190
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.dataset_service import DatasetServiceClient
from .services.dataset_service import DatasetServiceAsyncClient
from .services.endpoint_service import EndpointServiceClient
from .services.endpoint_service import EndpointServiceAsyncClient
from .services.job_service import JobServiceClient
from .services.job_service import JobServiceAsyncClient
from .services.migration_service import MigrationServiceClient
from .services.migration_service import MigrationServiceAsyncClient
from .services.model_service import ModelServiceClient
from .services.model_service import ModelServiceAsyncClient
from .services.pipeline_service import PipelineServiceClient
from .services.pipeline_service import PipelineServiceAsyncClient
from .services.prediction_service import PredictionServiceClient
from .services.prediction_service import PredictionServiceAsyncClient
from .services.specialist_pool_service import SpecialistPoolServiceClient
from .services.specialist_pool_service import SpecialistPoolServiceAsyncClient
from .types.accelerator_type import AcceleratorType
from .types.annotation import Annotation
from .types.annotation_spec import AnnotationSpec
from .types.batch_prediction_job import BatchPredictionJob
from .types.completion_stats import CompletionStats
from .types.custom_job import ContainerSpec
from .types.custom_job import CustomJob
from .types.custom_job import CustomJobSpec
from .types.custom_job import PythonPackageSpec
from .types.custom_job import Scheduling
from .types.custom_job import WorkerPoolSpec
from .types.data_item import DataItem
from .types.data_labeling_job import ActiveLearningConfig
from .types.data_labeling_job import DataLabelingJob
from .types.data_labeling_job import SampleConfig
from .types.data_labeling_job import TrainingConfig
from .types.dataset import Dataset
from .types.dataset import ExportDataConfig
from .types.dataset import ImportDataConfig
from .types.dataset_service import CreateDatasetOperationMetadata
from .types.dataset_service import CreateDatasetRequest
from .types.dataset_service import DeleteDatasetRequest
from .types.dataset_service import ExportDataOperationMetadata
from .types.dataset_service import ExportDataRequest
from .types.dataset_service import ExportDataResponse
from .types.dataset_service import GetAnnotationSpecRequest
from .types.dataset_service import GetDatasetRequest
from .types.dataset_service import ImportDataOperationMetadata
from .types.dataset_service import ImportDataRequest
from .types.dataset_service import ImportDataResponse
from .types.dataset_service import ListAnnotationsRequest
from .types.dataset_service import ListAnnotationsResponse
from .types.dataset_service import ListDataItemsRequest
from .types.dataset_service import ListDataItemsResponse
from .types.dataset_service import ListDatasetsRequest
from .types.dataset_service import ListDatasetsResponse
from .types.dataset_service import UpdateDatasetRequest
from .types.deployed_model_ref import DeployedModelRef
from .types.encryption_spec import EncryptionSpec
from .types.endpoint import DeployedModel
from .types.endpoint import Endpoint
from .types.endpoint_service import CreateEndpointOperationMetadata
from .types.endpoint_service import CreateEndpointRequest
from .types.endpoint_service import DeleteEndpointRequest
from .types.endpoint_service import DeployModelOperationMetadata
from .types.endpoint_service import DeployModelRequest
from .types.endpoint_service import DeployModelResponse
from .types.endpoint_service import GetEndpointRequest
from .types.endpoint_service import ListEndpointsRequest
from .types.endpoint_service import ListEndpointsResponse
from .types.endpoint_service import UndeployModelOperationMetadata
from .types.endpoint_service import UndeployModelRequest
from .types.endpoint_service import UndeployModelResponse
from .types.endpoint_service import UpdateEndpointRequest
from .types.env_var import EnvVar
from .types.hyperparameter_tuning_job import HyperparameterTuningJob
from .types.io import BigQueryDestination
from .types.io import BigQuerySource
from .types.io import ContainerRegistryDestination
from .types.io import GcsDestination
from .types.io import GcsSource
from .types.job_service import CancelBatchPredictionJobRequest
from .types.job_service import CancelCustomJobRequest
from .types.job_service import CancelDataLabelingJobRequest
from .types.job_service import CancelHyperparameterTuningJobRequest
from .types.job_service import CreateBatchPredictionJobRequest
from .types.job_service import CreateCustomJobRequest
from .types.job_service import CreateDataLabelingJobRequest
from .types.job_service import CreateHyperparameterTuningJobRequest
from .types.job_service import DeleteBatchPredictionJobRequest
from .types.job_service import DeleteCustomJobRequest
from .types.job_service import DeleteDataLabelingJobRequest
from .types.job_service import DeleteHyperparameterTuningJobRequest
from .types.job_service import GetBatchPredictionJobRequest
from .types.job_service import GetCustomJobRequest
from .types.job_service import GetDataLabelingJobRequest
from .types.job_service import GetHyperparameterTuningJobRequest
from .types.job_service import ListBatchPredictionJobsRequest
from .types.job_service import ListBatchPredictionJobsResponse
from .types.job_service import ListCustomJobsRequest
from .types.job_service import ListCustomJobsResponse
from .types.job_service import ListDataLabelingJobsRequest
from .types.job_service import ListDataLabelingJobsResponse
from .types.job_service import ListHyperparameterTuningJobsRequest
from .types.job_service import ListHyperparameterTuningJobsResponse
from .types.job_state import JobState
from .types.machine_resources import AutomaticResources
from .types.machine_resources import BatchDedicatedResources
from .types.machine_resources import DedicatedResources
from .types.machine_resources import DiskSpec
from .types.machine_resources import MachineSpec
from .types.machine_resources import ResourcesConsumed
from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters
from .types.migratable_resource import MigratableResource
from .types.migration_service import BatchMigrateResourcesOperationMetadata
from .types.migration_service import BatchMigrateResourcesRequest
from .types.migration_service import BatchMigrateResourcesResponse
from .types.migration_service import MigrateResourceRequest
from .types.migration_service import MigrateResourceResponse
from .types.migration_service import SearchMigratableResourcesRequest
from .types.migration_service import SearchMigratableResourcesResponse
from .types.model import Model
from .types.model import ModelContainerSpec
from .types.model import Port
from .types.model import PredictSchemata
from .types.model_evaluation import ModelEvaluation
from .types.model_evaluation_slice import ModelEvaluationSlice
from .types.model_service import DeleteModelRequest
from .types.model_service import ExportModelOperationMetadata
from .types.model_service import ExportModelRequest
from .types.model_service import ExportModelResponse
from .types.model_service import GetModelEvaluationRequest
from .types.model_service import GetModelEvaluationSliceRequest
from .types.model_service import GetModelRequest
from .types.model_service import ListModelEvaluationSlicesRequest
from .types.model_service import ListModelEvaluationSlicesResponse
from .types.model_service import ListModelEvaluationsRequest
from .types.model_service import ListModelEvaluationsResponse
from .types.model_service import ListModelsRequest
from .types.model_service import ListModelsResponse
from .types.model_service import UpdateModelRequest
from .types.model_service import UploadModelOperationMetadata
from .types.model_service import UploadModelRequest
from .types.model_service import UploadModelResponse
from .types.operation import DeleteOperationMetadata
from .types.operation import GenericOperationMetadata
from .types.pipeline_service import CancelTrainingPipelineRequest
from .types.pipeline_service import CreateTrainingPipelineRequest
from .types.pipeline_service import DeleteTrainingPipelineRequest
from .types.pipeline_service import GetTrainingPipelineRequest
from .types.pipeline_service import ListTrainingPipelinesRequest
from .types.pipeline_service import ListTrainingPipelinesResponse
from .types.pipeline_state import PipelineState
from .types.prediction_service import PredictRequest
from .types.prediction_service import PredictResponse
from .types.specialist_pool import SpecialistPool
from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata
from .types.specialist_pool_service import CreateSpecialistPoolRequest
from .types.specialist_pool_service import DeleteSpecialistPoolRequest
from .types.specialist_pool_service import GetSpecialistPoolRequest
from .types.specialist_pool_service import ListSpecialistPoolsRequest
from .types.specialist_pool_service import ListSpecialistPoolsResponse
from .types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata
from .types.specialist_pool_service import UpdateSpecialistPoolRequest
from .types.study import Measurement
from .types.study import StudySpec
from .types.study import Trial
from .types.training_pipeline import FilterSplit
from .types.training_pipeline import FractionSplit
from .types.training_pipeline import InputDataConfig
from .types.training_pipeline import PredefinedSplit
from .types.training_pipeline import TimestampSplit
from .types.training_pipeline import TrainingPipeline
from .types.user_action_reference import UserActionReference
__all__ = (
"DatasetServiceAsyncClient",
"EndpointServiceAsyncClient",
"JobServiceAsyncClient",
"MigrationServiceAsyncClient",
"ModelServiceAsyncClient",
"PipelineServiceAsyncClient",
"PredictionServiceAsyncClient",
"SpecialistPoolServiceAsyncClient",
"AcceleratorType",
"ActiveLearningConfig",
"Annotation",
"AnnotationSpec",
"AutomaticResources",
"BatchDedicatedResources",
"BatchMigrateResourcesOperationMetadata",
"BatchMigrateResourcesRequest",
"BatchMigrateResourcesResponse",
"BatchPredictionJob",
"BigQueryDestination",
"BigQuerySource",
"CancelBatchPredictionJobRequest",
"CancelCustomJobRequest",
"CancelDataLabelingJobRequest",
"CancelHyperparameterTuningJobRequest",
"CancelTrainingPipelineRequest",
"CompletionStats",
"ContainerRegistryDestination",
"ContainerSpec",
"CreateBatchPredictionJobRequest",
"CreateCustomJobRequest",
"CreateDataLabelingJobRequest",
"CreateDatasetOperationMetadata",
"CreateDatasetRequest",
"CreateEndpointOperationMetadata",
"CreateEndpointRequest",
"CreateHyperparameterTuningJobRequest",
"CreateSpecialistPoolOperationMetadata",
"CreateSpecialistPoolRequest",
"CreateTrainingPipelineRequest",
"CustomJob",
"CustomJobSpec",
"DataItem",
"DataLabelingJob",
"Dataset",
"DatasetServiceClient",
"DedicatedResources",
"DeleteBatchPredictionJobRequest",
"DeleteCustomJobRequest",
"DeleteDataLabelingJobRequest",
"DeleteDatasetRequest",
"DeleteEndpointRequest",
"DeleteHyperparameterTuningJobRequest",
"DeleteModelRequest",
"DeleteOperationMetadata",
"DeleteSpecialistPoolRequest",
"DeleteTrainingPipelineRequest",
"DeployModelOperationMetadata",
"DeployModelRequest",
"DeployModelResponse",
"DeployedModel",
"DeployedModelRef",
"DiskSpec",
"EncryptionSpec",
"Endpoint",
"EndpointServiceClient",
"EnvVar",
"ExportDataConfig",
"ExportDataOperationMetadata",
"ExportDataRequest",
"ExportDataResponse",
"ExportModelOperationMetadata",
"ExportModelRequest",
"ExportModelResponse",
"FilterSplit",
"FractionSplit",
"GcsDestination",
"GcsSource",
"GenericOperationMetadata",
"GetAnnotationSpecRequest",
"GetBatchPredictionJobRequest",
"GetCustomJobRequest",
"GetDataLabelingJobRequest",
"GetDatasetRequest",
"GetEndpointRequest",
"GetHyperparameterTuningJobRequest",
"GetModelEvaluationRequest",
"GetModelEvaluationSliceRequest",
"GetModelRequest",
"GetSpecialistPoolRequest",
"GetTrainingPipelineRequest",
"HyperparameterTuningJob",
"ImportDataConfig",
"ImportDataOperationMetadata",
"ImportDataRequest",
"ImportDataResponse",
"InputDataConfig",
"JobServiceClient",
"JobState",
"ListAnnotationsRequest",
"ListAnnotationsResponse",
"ListBatchPredictionJobsRequest",
"ListBatchPredictionJobsResponse",
"ListCustomJobsRequest",
"ListCustomJobsResponse",
"ListDataItemsRequest",
"ListDataItemsResponse",
"ListDataLabelingJobsRequest",
"ListDataLabelingJobsResponse",
"ListDatasetsRequest",
"ListDatasetsResponse",
"ListEndpointsRequest",
"ListEndpointsResponse",
"ListHyperparameterTuningJobsRequest",
"ListHyperparameterTuningJobsResponse",
"ListModelEvaluationSlicesRequest",
"ListModelEvaluationSlicesResponse",
"ListModelEvaluationsRequest",
"ListModelEvaluationsResponse",
"ListModelsRequest",
"ListModelsResponse",
"ListSpecialistPoolsRequest",
"ListSpecialistPoolsResponse",
"ListTrainingPipelinesRequest",
"ListTrainingPipelinesResponse",
"MachineSpec",
"ManualBatchTuningParameters",
"Measurement",
"MigratableResource",
"MigrateResourceRequest",
"MigrateResourceResponse",
"MigrationServiceClient",
"Model",
"ModelContainerSpec",
"ModelEvaluation",
"ModelEvaluationSlice",
"ModelServiceClient",
"PipelineServiceClient",
"PipelineState",
"Port",
"PredefinedSplit",
"PredictRequest",
"PredictResponse",
"PredictSchemata",
"PredictionServiceClient",
"PythonPackageSpec",
"ResourcesConsumed",
"SampleConfig",
"Scheduling",
"SearchMigratableResourcesRequest",
"SearchMigratableResourcesResponse",
"SpecialistPool",
"SpecialistPoolServiceClient",
"StudySpec",
"TimestampSplit",
"TrainingConfig",
"TrainingPipeline",
"Trial",
"UndeployModelOperationMetadata",
"UndeployModelRequest",
"UndeployModelResponse",
"UpdateDatasetRequest",
"UpdateEndpointRequest",
"UpdateModelRequest",
"UpdateSpecialistPoolOperationMetadata",
"UpdateSpecialistPoolRequest",
"UploadModelOperationMetadata",
"UploadModelRequest",
"UploadModelResponse",
"UserActionReference",
"WorkerPoolSpec",
)
|
[
"Raj.madhu0406@gmail.com"
] |
Raj.madhu0406@gmail.com
|
8f78a6b6003075c6334f8401b5cfedfd044f667f
|
f13acd0d707ea9ab0d2f2f010717b35adcee142f
|
/ABC/abc251-abc300/abc271/e/main.py
|
6f507caff1587d49089872b581043264bf982fc5
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
KATO-Hiro/AtCoder
|
126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7
|
bf43320bc1af606bfbd23c610b3432cddd1806b9
|
refs/heads/master
| 2023-08-18T20:06:42.876863
| 2023-08-17T23:45:21
| 2023-08-17T23:45:21
| 121,067,516
| 4
| 0
|
CC0-1.0
| 2023-09-14T21:59:38
| 2018-02-11T00:32:45
|
Python
|
UTF-8
|
Python
| false
| false
| 546
|
py
|
# -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
n, m, k = map(int, input().split())
abc = [tuple(map(int, input().split())) for _ in range(m)]
e = list(map(int, input().split()))
inf = 10 ** 18
dist = [inf] * n
dist[0] = 0
for ei in e:
ei -= 1
ai, bi, ci = abc[ei]
ai -= 1
bi -= 1
dist[bi] = min(dist[bi], dist[ai] + ci)
ans = dist[n - 1]
if ans == inf:
ans = -1
print(ans)
if __name__ == "__main__":
main()
|
[
"k.hiro1818@gmail.com"
] |
k.hiro1818@gmail.com
|
c3c7a15813f9ef32ed20d79e7a25c32f2d16ac8d
|
62975432654dd862cb182ea088657970216bf927
|
/python/pluralsight/py-dktp-app-dev/LayoutExample/LayoutExample.py
|
42d6972ca6a4c86442bb0f97d1d09bd56168db49
|
[] |
no_license
|
d03r/do
|
452b85bcdab4dd7c965cc9b6bb13d96ff1b514a6
|
d3102da8f2ece83457392d85b954625bab18bbf6
|
refs/heads/master
| 2021-01-10T12:44:44.682262
| 2016-03-27T06:14:55
| 2016-03-27T06:14:55
| 54,321,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
class HelloWorld(QDialog):
def __init__ (self):
QDialog.__init__(self)
# super(HelloWorld, self).__init__()
#layout = QVBoxLayout()
#layout = QHBoxLayout()
layout = QGridLayout()
#label = QLabel("Hello World!")
self.label = QLabel("Hello World!")
line_edit = QLineEdit()
button = QPushButton("Close")
#layout.addWidget(self.label)
layout.addWidget(self.label, 0, 0) # Grid Layout are zero-based
#layout.addWidget(line_edit)
layout.addWidget(line_edit, 0, 1) # Same row, second column
#layout.addWidget(button)
layout.addWidget(button, 1, 1)
self.setLayout(layout)
button.clicked.connect(self.close)
#
# connect attaches event "clicked" to "self.close"
# event handler.
# You should not add () after event handler.
#line_edit.textChanged.connect(label.setText)
line_edit.textChanged.connect(self.changeTextLabel)
def changeTextLabel(self, text):
self.label.setText(text)
app = QApplication(sys.argv)
dialog = HelloWorld()
dialog.show()
sys.exit(app.exec_()) # exit with an exit code
|
[
"noo8@gatech.edu"
] |
noo8@gatech.edu
|
35a5a2a32b80f97959c81636f5e0b106dba6343e
|
ed3e270f4d6e550830c59f7115ac80338dd53fb6
|
/hello_world.py
|
d540fb1f03ceaa8130048fe038ef1a3f13545b55
|
[] |
no_license
|
cr8ivecodesmith/py3dp_book
|
d9ce384aaced1c4f9c5fd52d0d5bc41802491c11
|
cf0e7fe172f38327807522ac4ef5465e37afc2c7
|
refs/heads/master
| 2023-06-06T06:06:13.757896
| 2021-06-26T10:25:53
| 2021-06-26T10:25:53
| 373,122,343
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
import openscad as o
# Create the world
o.fragments = 100 # Increase resolution of created objects
o.sphere(10)
earth = o.result()
# Send result to OpenSCAD
o.output(earth)
|
[
"matt@lebrun.org"
] |
matt@lebrun.org
|
d2c0916ce86e3bdb13a7731d933ce8d8f2ef2832
|
f455e78c1327b47d430b025bc165aa61f24a190c
|
/payment/views.py
|
0ad6e566d9b8fd2b54c46be21695218534f2b12c
|
[
"MIT"
] |
permissive
|
FanLgchen/alipay
|
d84966b91a68df7c3dc0a7c517821a33d5f70965
|
3697aa01b260abd5c0bd7a20627076aa248b0efc
|
refs/heads/master
| 2020-06-27T19:38:58.694721
| 2019-08-01T11:22:37
| 2019-08-01T11:22:37
| 200,031,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,750
|
py
|
import os
from alipay import AliPay
from django import http
from django.conf import settings
from django.shortcuts import render
# Create your views here.
from django.views import View
from orders.models import OrderInfo
from payment.models import Payment
class PaymentView(View):
"""订单支付功能"""
def get(self,request,order_id):
"""查询要支付的订单"""
user = request.user
try:
order = OrderInfo.objects.get(order_id =order_id,user=user,
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID'])
except OrderInfo.DoesNotExist:
return http.HttpResponseForbidden('订单信息错误')
#创建支付宝支付对象
alipay = AliPay(
appid=settings.ALIPAY_APPID,
app_notify_url=None, # 默认回调url
app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/app_private_key.pem"),
alipay_public_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),
"keys/alipay_public_key.pem"),
sign_type="RSA2",
debug=settings.ALIPAY_DEBUG
)
# 生成登录支付宝连接
order_string = alipay.api_alipay_trade_page_pay(
out_trade_no=order_id,
total_amount=str(order.total_amount),
subject="demo%s" % order_id,
return_url=settings.ALIPAY_RETURN_URL,
)
# 响应登录支付宝连接
# 真实环境电脑网站支付网关:https://openapi.alipay.com/gateway.do? + order_string
# 沙箱环境电脑网站支付网关:https://openapi.alipaydev.com/gateway.do? + order_string
alipay_url = settings.ALIPAY_URL + "?" + order_string
return http.JsonResponse({'code': 200,
'errmsg': 'OK',
'alipay_url': alipay_url})
class PaymentStatusView(View):
"""保存订单支付结果"""
def get(self, request):
# 获取前端传入的请求参数
query_dict = request.GET
data = query_dict.dict()
# 获取并从请求参数中剔除signature
signature = data.pop('sign')
# 创建支付宝支付对象
alipay = AliPay(
appid=settings.ALIPAY_APPID,
app_notify_url=None,
app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/app_private_key.pem"),
alipay_public_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/alipay_public_key.pem"),
sign_type="RSA2",
debug=settings.ALIPAY_DEBUG
)
# 校验这个重定向是否是alipay重定向过来的
success = alipay.verify(data, signature)
if success:
# 读取order_id
order_id = data.get('out_trade_no')
# 读取支付宝流水号
trade_id = data.get('trade_no')
# 保存Payment模型类数据
Payment.objects.create(
order_id=order_id,
trade_id=trade_id
)
# 修改订单状态为待评价
OrderInfo.objects.filter(order_id=order_id,
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID']).update(
status=OrderInfo.ORDER_STATUS_ENUM["UNCOMMENT"])
# 响应trade_id
context = {
'trade_id':trade_id
}
return render(request, 'pay_success.html', context)
else:
# 订单支付失败,重定向到我的订单
return http.HttpResponseForbidden('非法请求')
|
[
"1353193840@qq.com"
] |
1353193840@qq.com
|
944969d7142785b1d02e4be61ae93455c74189c5
|
52a69d6f934209c7eee9a0e18b5d9ec3206ea487
|
/이진 탐색/트리 자료구조/solution.py
|
348661d96e786f418c58aa5795a3ee6e20fc59dd
|
[] |
no_license
|
ssudev/algorithm
|
be5f43da7887fc76dbddf1fd088198913bbdc604
|
a7fa7be152a4680ce3028ab9d89b617dcac12289
|
refs/heads/main
| 2023-03-27T19:29:00.039854
| 2021-04-01T13:18:33
| 2021-04-01T13:18:33
| 333,075,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
# 이진 탐색 소스코드 구현(재귀 함수)
def binary_search(array, target, start, end):
if start > end:
return None
mid = (start + end) // 2
# 중간점인 경우
if array[mid] == target:
return mid
# 중간점의 값보다 찾고자 하는 값이 작은 경우 왼쪽 확인
elif array[mid] > target:
return binary_search(array, target, start, mid - 1)
else:
return binary_search(array, target, mid + 1, end)
# 이진 탐색 소스코드 구현(반복문)
def binary_search2(array, target, start, end):
while start <= end:
mid = (start+end) // 2
if array[mid] == target:
return mid
elif array[mid] > target:
end = mid - 1
else:
start = mid + 1
return None
n, target = list(map(int,input().split()))
array = list(map(int, input().split))
result = binary_search(array, target, 0, n-1)
if result == None:
print("원소가 없습니다.")
else:
print(result + 1)
|
[
"goodys1011@naver.com"
] |
goodys1011@naver.com
|
61bc89271f6b6bad338296fcb3a95c7e8327c146
|
06ee2f24e160904b0021907bd892249dad97f3b9
|
/mysite/urls.py
|
02d246f5220d418e8b792e362acc7f41496597b2
|
[] |
no_license
|
prasutagus/project2019
|
21c8af2be4a722d122ee8143c1c1cd870948843d
|
805974c7988056df1a5777bf9ad8131c3edafb68
|
refs/heads/master
| 2023-05-01T12:11:11.509215
| 2019-08-18T15:41:13
| 2019-08-18T15:41:13
| 202,613,071
| 0
| 0
| null | 2023-04-21T20:37:17
| 2019-08-15T21:21:17
|
Python
|
UTF-8
|
Python
| false
| false
| 902
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/login/', views.LoginView.as_view(), name='login'),
path('', include('blog.urls')),
]
|
[
"dawheel@davidwheeler.me.uk"
] |
dawheel@davidwheeler.me.uk
|
0c71c3dd6064d4774c43f36145db41c9ed503ae6
|
84ca6b9e107ba0be61df7be4bc845efa439a5efb
|
/getUsbCam.py
|
c2f830e109d139391d823ec0962232e044e8158e
|
[] |
no_license
|
lzh107u/final_project
|
1bc168b478bf58b867d7073c448f92262c4c7b1d
|
6f1a294ca759e615cb7d0abcc8d3b6406694bc3f
|
refs/heads/main
| 2023-06-07T04:27:49.509857
| 2021-07-07T13:29:38
| 2021-07-07T13:29:38
| 357,826,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:53:01 2021
@author: user
"""
import cv2
def get_usbCam():
cap = cv2.VideoCapture( 1 )
if cap.isOpened():
ret = 1
print('camera opened successfully -- USB')
else:
ret = 0
print("Unable to open camera")
return ret, cap
def close_usbCam( cap ):
cap.release()
return
def self_test():
ret, cap = get_usbCam()
ret, img = cap.read()
cv2.namedWindow('self_test( USB )', cv2.WINDOW_NORMAL )
while( ret == 1 ):
ret, img = cap.read()
cv2.imshow( 'self_test( USB )', img )
if cv2.waitKey( 1 ) & 0xFF == ord('q'):
print('break out')
break
cv2.destroyAllWindows()
close_usbCam( cap )
return
if __name__ == "__main__":
self_test()
|
[
"noreply@github.com"
] |
lzh107u.noreply@github.com
|
a1e247e9f710b3abc29e7fc23288a8c4a0b40ee7
|
830107f53ffc646074f5e969b72e352032903c53
|
/stacking/model_list.py
|
7f04494066620e0f7202a33a8567cb0c8d546345
|
[] |
no_license
|
HirotakeIto/titanic
|
f46b7bec21b55c15541e4f99f80dd9f9a0da0a6f
|
f9fb33d16e001763b53fb2a346b4834cae6ce2e5
|
refs/heads/master
| 2020-12-30T10:49:11.345857
| 2017-08-01T02:18:17
| 2017-08-01T02:18:17
| 98,855,584
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,113
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 24 15:22:45 2017
@author: hirotake_ito
"""
import os
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from .stacked_generalization import StackedGeneralization
file_directory = os.path.dirname(__file__)
class Generalizer:
def __init__(self):
self.model = None
def name(self):
raise NotImplementedError
def guess_partial(self, sg):
assert (isinstance(sg, StackedGeneralization))
layer_predict = np.zeros(sg.train_target.shape) # 結果の入れ物
for train_index, test_index in sg.skf.split(sg.train_target):
self.train(sg.train_data[train_index, :],
sg.train_target[train_index])
layer_predict[test_index] = self.predict(sg.train_data[test_index, :])
return layer_predict
def guess_whole(self, sg):
assert (isinstance(sg, StackedGeneralization))
self.guess(sg.train_data, sg.train_target)
return
def guess(self, input_data, input_target):
return self.train(input_data, input_target)
def train(self, data, taisyou):
raise NotImplementedError
def predict(self, data):
raise NotImplementedError
def score(self, x, y_true):
pred = self.predict(x)
return mean_squared_error(y_true, pred)
@staticmethod # self使わない&クラス内で定義
def load_partial(name):
return pd.read_pickle(file_directory + '/trained_model/layer_' + 'partial_' + name )
@staticmethod
def load_whole(name):
return pd.read_pickle(file_directory + '/trained_model/layer_' + 'whole_' + name )
@staticmethod
def save_partial(name, prediction):
prediction.to_pickle(file_directory + '/trained_model/layer_' + 'partial_' + name )
@staticmethod
def save_whole(name, prediction):
prediction.to_pickle(file_directory + '/trained_model/layer_' + 'whole_' + name )
class RFRegressor(Generalizer):
def __init__(self, **argv):
super().__init__()
self.model = RandomForestRegressor(**argv)
def name(self):
return "RFRegressor"
def train(self, data, taisyou, **argv):
self.model = self.model.fit(data, taisyou)
def predict(self, data):
return self.model.predict(data)
class RFClassifier(Generalizer):
def __init__(self, **argv):
super().__init__()
self.model = RandomForestClassifier(**argv)
def name(self):
return "RFClassifier"
def train(self, data, taisyou, **argv):
self.model = self.model.fit(data, taisyou)
def predict(self, data):
return self.model.predict(data)
class RFClassifier2(Generalizer):
"""
予測値として連続の確率値を返すクラス
"""
def __init__(self, **argv):
super().__init__()
self.model = RandomForestClassifier(**argv)
def name(self):
return "RFClassifier"
def train(self, data, taisyou, **argv):
self.model = self.model.fit(data, taisyou)
def predict(self, data):
return self.model.predict_proba(data)
class GBRegressor(Generalizer):
def __init__(self, **argv):
super().__init__()
self.model = GradientBoostingRegressor(learning_rate=0.1,
max_depth=3,
random_state=0,
**argv)
def name(self):
return "GBRegressor"
def train(self, data, taisyou, ):
self.model = self.model.fit(data, taisyou)
def predict(self, data):
return self.model.predict(data)
class GBClassifier(Generalizer):
def __init__(self, **argv):
super().__init__()
self.model = GradientBoostingClassifier(**argv)
def name(self):
return "GBClassifier"
def train(self, data, taisyou, **argv):
self.model = self.model.fit(data, taisyou)
def predict(self, data):
return self.model.predict(data)
class LRRegressor(Generalizer):
def __init__(self, **argv):
super().__init__()
self.model = LinearRegression(**argv)
def name(self):
return "LRRegressor"
def train(self, data, taisyou):
self.model = self.model.fit(data, taisyou)
def predict(self, data):
return self.model.predict(data)
class LRClassifier(Generalizer):
def __init__(self, **argv):
super().__init__()
self.model = LogisticRegression(**argv)
def name(self):
return "LogisticRegression"
def train(self, data, taisyou):
self.model = self.model.fit(data, taisyou)
def predict(self, data):
return self.model.predict(data)
|
[
"hirotake_ito@hirotakoMacBook"
] |
hirotake_ito@hirotakoMacBook
|
ca05d30a565f6c15a8c276a7af02afb8ec61e51b
|
1f5c32624f2017139b77b49f2bb1871d412886be
|
/blogosphere/blogs/api/views/blog.py
|
45743e87053f81b9749802782db4a628f55a9fcf
|
[] |
no_license
|
ErnestoZarza/blogosphere
|
1a7de4522a9130d0fa6447019d312d7c04249fad
|
fc8381a250c8dbb267145beb8284f012719a4dc0
|
refs/heads/master
| 2020-04-17T04:38:10.112290
| 2019-01-17T14:55:22
| 2019-01-17T14:55:22
| 166,238,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,743
|
py
|
from rest_framework.generics import ListAPIView
from rest_framework.exceptions import ParseError
from ..serializers import BlogSerializer
from ..utils import parse_string_to_date
from ...models import Blog
def parse_date_parm(str_date):
date = parse_string_to_date(str_date)
if not date:
raise ParseError(detail='Invalid date format')
return date
class BlogListAPIView(ListAPIView):
serializer_class = BlogSerializer
def get_queryset(self):
q = self.request.query_params.get('q', None)
authors = self.request.query_params.get('authors', None)
title = self.request.query_params.get('title', None)
# updated_date = self.request.query_params.get('updated', None)
# created_date = self.request.query_params.get('created', None)
blogs = Blog.objects.published()
if authors is not None:
blogs = blogs.prefetch_related('author').filter(authors__slug=authors).distinct()
if q is not None:
blogs = blogs.filter(title__icontains=q)
# if updated_date and created_date:
# updated_date = parse_date_parm(updated_date)
# created_date = parse_date_parm(created_date)
# blogs = blogs.exclude(start_date__gt=to_date,
# end_date__lt=from_date).distinct()
# elif updated_date:
# updated_date = parse_date_parm(updated_date)
# blogs = blogs.exclude(created__lt=updated_date).distinct()
# elif created_date:
# created_date = parse_date_parm(created_date)
# blogs = blogs.exclude(created__gt=created_date).distinct()
return blogs
|
[
"vero@veros-air-2.localdomain"
] |
vero@veros-air-2.localdomain
|
a433bdc7216250bafd595ead962082b48aa8f728
|
3ec1d4ce9bc0f0677fffa4688ca1ee28867d364d
|
/Assignment 4/11.py
|
f141717041c7da2dc8d0cdb2a50c028c2f30afaf
|
[] |
no_license
|
brijesh-989/ITW-lab-1
|
7d01cbf0a69334966186f7a892b297c8ec636710
|
b3a00bddf14c81f2956648d9939f0db50ef074c9
|
refs/heads/master
| 2022-07-02T18:40:11.134191
| 2020-05-11T08:40:08
| 2020-05-11T08:40:08
| 262,983,452
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
user_in=input("enter a sequence of numbers : ")
a=user_in.split(",")
for i in a:
a=int(int(i,2))
if a%5 == 0:
print(i)
|
[
"brijesh.kotaria.cse19@itbhu.ac.in"
] |
brijesh.kotaria.cse19@itbhu.ac.in
|
88c52065cf71e9d6f0543a147b2cd739d1885702
|
3f479528a34b2df9e9a001a3537761b259a613eb
|
/leds/blinking/blink_controller.py
|
b0964fbfc5c5d6aabbb799f647863f08c80ee0a6
|
[] |
no_license
|
gitu/soob
|
84e1cee0b7fbc085d6986bd6e66652508330e9f8
|
c9fc9d8f42151eac382287bae8cb4263ad41c525
|
refs/heads/master
| 2021-01-21T12:23:01.282583
| 2014-01-14T15:04:41
| 2014-01-14T15:04:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,034
|
py
|
import re
import threading
from time import sleep
import traceback
import requests
import sys
from blink_worker import BlinkWorker
COMMAND = re.compile('([^ ]*) (.*)')
http_proxy = "proxy.corproot.net:8079"
https_proxy = "proxy.corproot.net:8079"
ftp_proxy = "proxy.corproot.net:8079"
proxyDict = {
"http": http_proxy,
"https": https_proxy,
"ftp": ftp_proxy
}
class BlinkController(threading.Thread):
FADE_SPEED = 20
current_command = ""
def __init__(self, controller, blinkm):
super(BlinkController, self).__init__()
self.exit_flag = 0
self.controller = controller
self.url = controller.config.get("RedFlag", "BaseURL")
if controller.config.has_option("RedFlag", "Proxy"):
self.proxy = controller.config.get("RedFlag", "Proxy")
self.proxies = {"http": self.proxy}
else:
self.proxies = None
self.c_r = "OFF"
self.c_g = "OFF"
self.c_b = "OFF"
self.worker = BlinkWorker(blinkm)
def request_stop(self):
print "blink controller stop requested"
self.exit_flag = 1
self.worker.request_stop()
def check_state(self):
try:
r = requests.get(self.url + "store/blink/red", proxies=self.proxies)
g = requests.get(self.url + "store/blink/green", proxies=self.proxies)
b = requests.get(self.url + "store/blink/blue", proxies=self.proxies)
if r.status_code == 200:
self.c_r = r.text
if b.status_code == 200:
self.c_b = b.text
if g.status_code == 200:
self.c_g = g.text
self.worker.update(self.c_r, self.c_g, self.c_b)
except:
print "error fetching:", sys.exc_info()[0]
traceback.print_exc()
def run(self):
print("will now start "+str(self))
self.worker.start()
while not self.exit_flag:
self.check_state()
sleep(2)
self.worker.join()
|
[
"TAASCFL1@U244921.corproot.net"
] |
TAASCFL1@U244921.corproot.net
|
89a8a6e319984913a3ee672e9d571ccf559a8bd6
|
1a5e8948a3ba9df36a94489ac3f560aa960f107f
|
/app.py
|
d6f2eb906a3a20e1e71461a60e751abbe0f3b19e
|
[] |
no_license
|
el1s7/sanic-api-kit
|
7bb1729b0fc122ae18f93f99bde2cde549cd9191
|
55516680fcc34121df0a18ef545919b3fd444832
|
refs/heads/master
| 2023-05-24T06:15:03.554664
| 2021-06-09T16:27:47
| 2021-06-09T16:27:47
| 375,420,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
import config, warnings, json, os
from framework import Sanic, cors, response_serializer
from api.routes import routes
from api.exceptions.handler import handler
if not config.DEBUG:
warnings.filterwarnings("ignore")
def create_app():
app = Sanic(__name__)
app.config.FALLBACK_ERROR_FORMAT = "json"
app.static('/', './public')
app.blueprint(routes)
app.blueprint(handler)
app.response_serializer(response_serializer)
cors(app, ['http://localhost:3000','http://192.168.1.11:3000'])
return app
if __name__ == '__main__':
app = create_app()
app.run(host="0.0.0.0", port=8001, debug=config.DEBUG)
|
[
"me@elis.cc"
] |
me@elis.cc
|
c264911534763ba9d0da2fddfc1dec3556016fca
|
344bd24eed76052b52d171ba7e8f603bcc4cff00
|
/Test.py
|
e977caeeaf7141877ea5a70b2eb50adfdb2008b8
|
[] |
no_license
|
Beokro/Bomber_temp
|
ca67780e82db0d11263aa5b725bc0337807b7ee8
|
66afcba50f50cd653e391705081aa5ee9ee652c5
|
refs/heads/master
| 2016-09-01T06:37:40.327363
| 2016-01-14T07:07:28
| 2016-01-14T07:07:28
| 49,550,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,304
|
py
|
from Player import Player
player_up1 = 'img/n_up1.png'
player_up2 = 'img/n_up2.png'
player_up3 = 'img/n_up3.png'
player_up4 = 'img/n_up4.png'
player_down1 = 'img/n_down1.png'
player_down2 = 'img/n_down2.png'
player_down3 = 'img/n_down3.png'
player_down4 = 'img/n_down4.png'
player_left1 = 'img/n_left1.png'
player_left2 = 'img/n_left2.png'
player_left3 = 'img/n_left3.png'
player_left4 = 'img/n_left4.png'
player_right1 = 'img/n_right1.png'
player_right2 = 'img/n_right2.png'
player_right3 = 'img/n_right3.png'
player_right4 = 'img/n_right4.png'
bomb_image = 'img/bomb.png'
burst_iamge = 'img/burst.png'
back_ground_name = 'img/Background02.jpg'
import pygame
from pygame.locals import *
from sys import exit
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((1024,698),0,32)
background = pygame.image.load(back_ground_name).convert()
bomb = pygame.image.load(bomb_image).convert_alpha()
bomb = pygame.transform.scale(bomb, (32,32))
burst = pygame.image.load(burst_iamge).convert_alpha()
burst = pygame.transform.scale(burst,(64,59))
player_images = [player_up1,player_up2,player_up3,player_up4,
player_down1,player_down2,player_down3,player_down4,
player_left1,player_left2,player_left3,player_left4,
player_right1,player_right2,player_right3,player_right4]
p = Player(player_images,bomb_image,80,10,20,1,1)
bomb_queue = []
total_time = 0.05
current_time = 0.0
exploded_queue = []
while True:
time_passed = clock.tick()
time_passed_seconds = time_passed / 1000.0
current_time+=time_passed_seconds
if current_time<total_time:
continue
for event in pygame.event.get():
if event.type == QUIT:
exit()
screen.blit(background, (0,0))
for b in bomb_queue:
if b.TimePassed(current_time) == True :
screen.blit(burst,(b.GetX(),b.GetY()))
bomb_queue.remove(b)
else:
screen.blit(b.GetImage(),(b.GetX(),b.GetY()))
pressed_Key = pygame.key.get_pressed()
#third argument pass how many time hada passed since last tiem
p.Action(screen,pressed_Key,current_time, bomb_queue)
#Reset current time
current_time = 0.0
pygame.display.update()
print "import success"
|
[
"xujiacao@gmail.com"
] |
xujiacao@gmail.com
|
57effa7eaad8f8c4052a25df0a1106b814d5cf4e
|
826ea45107bde5d09b4d4e53b5de5d499990c5a2
|
/myproject/myapp/setting.py
|
73b6e7ebc005c50d7d79675b551edebdb0f7b26e
|
[] |
no_license
|
xiaoqian123/wechat_lawinfo_back
|
26010aa781c043cb87f1ffc5503a8769e7c07bba
|
f15ceab89e28a0b3b7a933ebcec62a31801da2a8
|
refs/heads/master
| 2020-03-24T20:58:07.215532
| 2018-07-31T11:52:28
| 2018-07-31T11:52:28
| 143,006,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
DEBUG = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
IP = '127.0.0.1'
PORT = 5432
|
[
"luhongxin@weinventing.com"
] |
luhongxin@weinventing.com
|
0a69bb5649f1be782306b4748f756f125bc3af55
|
63f9b6815999def091cbc72c79f4b52ceac3d285
|
/problemset/single-number-ii.py
|
aa6ce82a2df2031ec145020995f9f3713c37fd30
|
[] |
no_license
|
HuangYinhao2019/Leetcode
|
ec8ac257f05fb5ec8c0ab0ebb4d544da8c8a6c59
|
51f5044ea37674c2eb5e54c857cc4430ca077a21
|
refs/heads/master
| 2022-06-08T12:50:23.157612
| 2022-05-22T03:06:58
| 2022-05-22T03:06:58
| 225,741,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
def singleNumber(nums: [int]) -> int:
ones, twos, threes = 0, 0, 0
for num in nums:
twos |= ones & num
ones ^= num
threes = ones & twos
ones &= ~threes
twos &= ~threes
return ones
|
[
"fzuhyh@foxmail.com"
] |
fzuhyh@foxmail.com
|
338a9ce6f64240e3d3f62a72d22491de75069c32
|
e30f496cfb4ace07f1ee12391c9c2074b9e06a38
|
/chapter09/wraps_example.py
|
252ebe601959f360621497ea3a075eb3633acc42
|
[] |
no_license
|
hanhansoul/PythonCookbook
|
b7b2280e2e4323e64ca37e576e8343d29fb3116b
|
5395cd017d2509b830be6c0eda5ee04ffef5aa70
|
refs/heads/master
| 2021-01-19T20:06:32.380514
| 2018-10-19T14:51:19
| 2018-10-19T14:51:19
| 88,489,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,416
|
py
|
def test1():
def partial(func, *args, **kwargs):
def newfunc(*fargs, **fkwargs):
print(fkwargs)
print(fargs)
newkeywords = kwargs.copy()
newkeywords.update(fkwargs)
return func(*args, *fargs, **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.kwargs = kwargs
return newfunc
def add(x, y):
return x + y
add2 = partial(add, y=2)
add2(3)
add2(x=3)
print(add2.kwargs)
# update_wrapper
def test2():
def wrapper(f):
def wrapper_function(*args, **kwargs):
"""这个是修饰函数"""
return f(*args, **kwargs)
return wrapper_function
@wrapper
def wrapped():
"""这个是被修饰的函数"""
print("wrapped")
def func(x, y):
return x + y
res = wrapper(func)(2, 3)
print(res)
print(wrapped.__doc__)
print(wrapped.__name__)
def test3():
from functools import update_wrapper
def new_wrapper(f):
def wrapper_function(*args, **kwargs):
"""这个是修饰函数"""
return f(*args, **kwargs)
update_wrapper(wrapper_function, f)
return wrapper_function
@new_wrapper
def new_wrapped():
"""这个是被修饰的函数"""
print("new_wrapped")
print(new_wrapped.__doc__)
print(new_wrapped.__name__)
# wraps实现
def test4():
from functools import partial
from functools import update_wrapper
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
'__annotations__')
WRAPPER_UPDATES = ('__dict__',)
def wraps(wrapped,
assigned=WRAPPER_ASSIGNMENTS,
updated=WRAPPER_UPDATES):
return partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
def test5():
from functools import wraps
def wrapper(f):
@wraps(f)
def wrapper_function(*args, **kwargs):
"""这个是修饰函数"""
return f(*args, **kwargs)
return wrapper_function
@wrapper
def wrapped():
"""这个是被修饰的函数
"""
print('wrapped')
def test6():
"""
返回的是一个闭包,其中的属性值都被保存起来了。
"""
def decorator(*dargs, **dkargs):
def wrapper(func):
def _wrapper(*args, **kargs):
print("decorator param:", dargs, dkargs)
print("function param:", args, kargs)
return func(*args, **kargs)
return _wrapper
return wrapper
@decorator(1, a=2)
def foo(x, y=0):
print("foo", x, y)
foo(3, 4)
def spam(x, y):
print(x, y)
decorator(1, a=2)(spam)(3, 4)
def test7():
def d1(func):
print("d1")
def inner():
print("inner1")
func()
print("inner1")
@d1
def f1():
print("f1")
def test():
def wrapper(f):
def wrapper_function(*args, **kwargs):
print("decorator")
return f(*args, **kwargs)
return wrapper_function
@wrapper
def wrapped():
print("wrapped")
def func(x, y):
return x + y
f = func
wrapped
wrapper(wrapped)
wrapper(func)(1, 2)
g = wrapped
|
[
"whr2290@163.com"
] |
whr2290@163.com
|
aec38f9fa85911a5e4df6dc492c5074996b55bb8
|
398e8dfaca69a2bed6c75373604fd03c5d5a2c7c
|
/test/test_execution.py
|
60df187de4921dfa7df808302f55f1ccd66bcb13
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"Unlicense"
] |
permissive
|
FiloSottile/youtube-dl
|
1805c6ebc23947a8108190d252a4559c12ef93ab
|
54410a5a4675c6db631532b882259a0ad9f7b4b2
|
refs/heads/master
| 2020-02-27T11:05:04.581383
| 2015-03-19T01:08:30
| 2015-03-19T01:08:30
| 3,597,339
| 16
| 3
| null | 2012-11-28T13:20:11
| 2012-03-01T23:31:48
|
Python
|
UTF-8
|
Python
| false
| false
| 884
|
py
|
#!/usr/bin/env python
from __future__ import unicode_literals
import unittest
import sys
import os
import subprocess
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
_DEV_NULL = subprocess.DEVNULL
except AttributeError:
_DEV_NULL = open(os.devnull, 'wb')
class TestExecution(unittest.TestCase):
def test_import(self):
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
def test_module_exec(self):
if sys.version_info >= (2, 7): # Python 2.6 doesn't support package execution
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
def test_main_exec(self):
subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL)
if __name__ == '__main__':
unittest.main()
|
[
"phihag@phihag.de"
] |
phihag@phihag.de
|
fe2e9d59826cefe4edb9ed21cb0ebe071ef51424
|
418c5dffb9564e5011583945632733c4c3ea795f
|
/check_profanity.py
|
9dbd062085c9be5d0b6c4b85bc27074e74966749
|
[] |
no_license
|
sayeefasrar/Python-Practise
|
ac6bb3c9abfd7c35695023af6fcb67d2fd62c7fa
|
e94cccd8da528d6ce589f8fcec38406ff3b46303
|
refs/heads/master
| 2020-03-15T06:42:48.927832
| 2018-05-03T15:35:01
| 2018-05-03T15:35:01
| 132,013,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
import urllib
def read_text():
quotes=open("C:\Users\Sayeef\Downloads\movie_quotes.txt")
contents_of_file=quotes.read()
#print(contents_of_file)
quotes.close()
check_profanity(contents_of_file)
def check_profanity(text_to_check):
connection= urllib.urlopen("http://www.wdylike.appspot.com/?q="+text_to_check)
output=connection.read()
print (output)
connection.close()
read_text()
|
[
"noreply@github.com"
] |
sayeefasrar.noreply@github.com
|
f505c5fd3e7f5483ea91f9324fe35a5cabe69866
|
6aa909293ac7c1ba545850a1a049d6c918e58c3b
|
/637.二叉树的层平均值.py
|
c06e4c77ef41d738d67b86a76690499f63ed8e4e
|
[] |
no_license
|
Koweiyi/LC_cpp
|
1d4fffcbadeec9aaf4725a2e408f75aef45c529a
|
2b0b5e0d9fd5c17cb4cd7887b07361c63ed624de
|
refs/heads/master
| 2023-04-27T13:47:33.678559
| 2020-07-22T13:31:32
| 2020-07-22T13:31:32
| 272,361,803
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
#
# @lc app=leetcode.cn id=637 lang=python
#
# [637] 二叉树的层平均值
#
# https://leetcode-cn.com/problems/average-of-levels-in-binary-tree/description/
#
# algorithms
# Easy (64.56%)
# Likes: 121
# Dislikes: 0
# Total Accepted: 19.8K
# Total Submissions: 30.7K
# Testcase Example: '[3,9,20,15,7]'
#
# 给定一个非空二叉树, 返回一个由每层节点平均值组成的数组.
#
# 示例 1:
#
# 输入:
# 3
# / \
# 9 20
# / \
# 15 7
# 输出: [3, 14.5, 11]
# 解释:
# 第0层的平均值是 3, 第1层是 14.5, 第2层是 11. 因此返回 [3, 14.5, 11].
#
#
# 注意:
#
#
# 节点值的范围在32位有符号整数范围内。
#
#
#
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# @lc code=start
# Definition for a binary tree node.
# class TreeNode(object):
class Solution(object):
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
ans = []
lvl = [root]
while lvl:
ans.append(sum(n.val for n in lvl) / float(len(lvl)))
lvl = [c for n in lvl for c in [n.left, n.right] if c]
return ans
# @lc code=end
|
[
"1423376854@qq.com"
] |
1423376854@qq.com
|
6ee78e6c1923586c9bb497d5c1aee59dbe98ffaa
|
5eda736d304eefe68d2a620d1859a250ba2a5398
|
/training/keras-tuner.py
|
17c180d4fc635b54fe970d87b0850550d44f19f9
|
[] |
no_license
|
sammiee5311/facial_expression
|
1fab407218f349c484f4edb49f9c85270d760c82
|
531d0f0dd8ad23902db6e5f6a50f39e0fb6fba56
|
refs/heads/main
| 2023-02-19T13:26:05.784547
| 2021-01-21T11:52:16
| 2021-01-21T11:52:16
| 325,938,635
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,972
|
py
|
import keras
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Activation
from keras.optimizers import Adam
from kerastuner.tuners import RandomSearch
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import numpy as np
import glob
import cv2
from keras.utils import np_utils
path = './data/train/'
dataset = []
labels = []
names = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
for i, name in enumerate(names):
print("%d of %d" %(len(names), i+1))
for i in glob.glob(path+str(name)+'/*.jpg'):
img = i.split('/')
img = img[-1].split('\\')
image = cv2.imread(path+str(name)+'/'+img[-1])
image = image.astype('float32') / 255.0
dataset.append(image)
labels.append(name)
e = LabelEncoder()
e.fit(labels)
labels = e.transform(labels)
labels = np_utils.to_categorical(labels)
### split train, test set ###
X_train, X_test, y_train, y_test = train_test_split(dataset, labels, test_size=0.2, random_state=1)
### make data into numpy array ###
X_train = np.asarray(X_train)
X_test = np.asarray(X_test)
### build sequential model###
def build_model(hp):
model = keras.models.Sequential()
model.add(
Conv2D(hp.Int("input_units", min_value=32, max_value=512, step=32), (3, 3), input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
for i in range(hp.Int("n_layers", min_value=1, max_value=4)): # if there is no step, +1
model.add(Conv2D(hp.Int("conv_%d_units" % i, min_value=32, max_value=512, step=32), (3, 3)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(7))
model.add(Activation("softmax"))
model.compile(optimizer=Adam(learning_rate=hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])),
loss="categorical_crossentropy",
metrics=["accuracy"])
return model
tuner = RandomSearch(
build_model,
objective="val_accuracy",
max_trials=15, # how many times change the model randomly
executions_per_trial=1 # how many times to train the model selected
)
tuner.search(x=X_train,
y=y_train,
epochs=20,
batch_size=64,
validation_data=(X_test, y_test))
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0] # save hyperparameters when the val_accuracy is highest.
print("The hyperparameter search is complete. The optimal number of units in the first densely-connected layer is %d, "
"the number of layers is %d and the optimal learning rate for the optimizer is %f." % (
best_hps.get('input_units'),
best_hps.get('learning_rate'),
best_hps.get('n_layers'))) #
model = tuner.hypermodel.build(best_hps) # save the model the best model among best_hps
model.save('save.h5')
model.fit(X_train, y_train, batch_size=64, epochs=5, validation_data = (X_test, y_test))
|
[
"56215891+sammiee5311@users.noreply.github.com"
] |
56215891+sammiee5311@users.noreply.github.com
|
a779438d9f1e5f4c49e407cf796ec1c6d082c56a
|
c31b220c048fdf43dc6e4a0057f743590ee4bc4d
|
/fpga/lib/pcie/example/common/tb/example_core_pcie/test_example_core_pcie.py
|
d1d7c13decdd8cae2da7a63443569088a4700e6c
|
[
"MIT"
] |
permissive
|
warikap/dma-bench
|
47f3440ff70e939a38cfde4143c6633d514c4f7e
|
b2052254a94d1a20266c4554401647803c870fb9
|
refs/heads/master
| 2023-09-05T18:34:49.552496
| 2021-11-14T22:11:35
| 2021-11-14T22:11:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,227
|
py
|
"""
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
import sys
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.log import SimLog
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, Timer
from cocotbext.pcie.core import RootComplex
from cocotbext.axi.utils import hexdump_str
try:
from pcie_if import PcieIfDevice, PcieIfRxBus, PcieIfTxBus
except ImportError:
# attempt import from current directory
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
try:
from pcie_if import PcieIfDevice, PcieIfRxBus, PcieIfTxBus
finally:
del sys.path[0]
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.fork(Clock(dut.clk, 4, units="ns").start())
# PCIe
self.rc = RootComplex()
self.dev = PcieIfDevice(
clk=dut.clk,
rst=dut.rst,
rx_req_tlp_bus=PcieIfRxBus.from_prefix(dut, "rx_req_tlp"),
tx_cpl_tlp_bus=PcieIfTxBus.from_prefix(dut, "tx_cpl_tlp"),
tx_wr_req_tlp_bus=PcieIfTxBus.from_prefix(dut, "tx_wr_req_tlp"),
wr_req_tx_seq_num=dut.s_axis_wr_req_tx_seq_num,
wr_req_tx_seq_num_valid=dut.s_axis_wr_req_tx_seq_num_valid,
tx_rd_req_tlp_bus=PcieIfTxBus.from_prefix(dut, "tx_rd_req_tlp"),
rd_req_tx_seq_num=dut.s_axis_rd_req_tx_seq_num,
rd_req_tx_seq_num_valid=dut.s_axis_rd_req_tx_seq_num_valid,
cfg_max_payload=dut.max_payload_size,
rx_cpl_tlp_bus=PcieIfRxBus.from_prefix(dut, "rx_cpl_tlp"),
cfg_max_read_req=dut.max_read_request_size,
cfg_ext_tag_enable=dut.ext_tag_enable,
tx_fc_ph_av=dut.pcie_tx_fc_ph_av,
tx_fc_pd_av=dut.pcie_tx_fc_pd_av,
tx_fc_nph_av=dut.pcie_tx_fc_nph_av,
)
self.dev.log.setLevel(logging.DEBUG)
self.rc.make_port().connect(self.dev)
self.dev.functions[0].msi_multiple_message_capable = 5
self.dev.functions[0].configure_bar(0, 2**24)
self.dev.functions[0].configure_bar(2, 2**24)
dut.bus_num.setimmediatevalue(0)
# monitor error outputs
self.status_error_cor_asserted = False
self.status_error_uncor_asserted = False
cocotb.fork(self._run_monitor_status_error_cor())
cocotb.fork(self._run_monitor_status_error_uncor())
async def _run_monitor_status_error_cor(self):
while True:
await RisingEdge(self.dut.status_error_cor)
self.log.info("status_error_cor (correctable error) was asserted")
self.status_error_cor_asserted = True
async def _run_monitor_status_error_uncor(self):
while True:
await RisingEdge(self.dut.status_error_uncor)
self.log.info("status_error_uncor (uncorrectable error) was asserted")
self.status_error_uncor_asserted = True
async def cycle_reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst <= 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst <= 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
@cocotb.test()
async def run_test(dut):
tb = TB(dut)
await tb.cycle_reset()
await tb.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
mem_base, mem_data = tb.rc.alloc_region(16*1024*1024)
dev_pf0_bar0 = tb.rc.tree[0][0].bar_addr[0]
dev_pf0_bar2 = tb.rc.tree[0][0].bar_addr[2]
tb.dut.bus_num <= tb.dev.bus_num
tb.log.info("Test memory write to BAR 2")
await tb.rc.mem_write(dev_pf0_bar2, b'\x11\x22\x33\x44')
await Timer(100, 'ns')
tb.log.info("Test memory read from BAR 2")
val = await tb.rc.mem_read(dev_pf0_bar2, 4, 1000)
tb.log.info("Read data: %s", val)
assert val == b'\x11\x22\x33\x44'
tb.log.info("Test DMA")
# write packet data
mem_data[0:1024] = bytearray([x % 256 for x in range(1024)])
# enable DMA
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000000, 1)
# enable interrupts
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000008, 0x3)
# write pcie read descriptor
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000100, (mem_base+0x0000) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000104, (mem_base+0x0000 >> 32) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000108, 0x100)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000110, 0x400)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000114, 0xAA)
await Timer(2000, 'ns')
# read status
val = await tb.rc.mem_read_dword(dev_pf0_bar0+0x000118)
tb.log.info("Status: 0x%x", val)
assert val == 0x800000AA
# write pcie write descriptor
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000200, (mem_base+0x1000) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000204, (mem_base+0x1000 >> 32) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000208, 0x100)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000210, 0x400)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000214, 0x55)
await Timer(2000, 'ns')
# read status
val = await tb.rc.mem_read_dword(dev_pf0_bar0+0x100218)
tb.log.info("Status: 0x%x", val)
assert val == 0x80000055
tb.log.info("%s", hexdump_str(mem_data, 0x1000, 64))
assert mem_data[0:1024] == mem_data[0x1000:0x1000+1024]
tb.log.info("Test DMA block operations")
# write packet data
mem_data[0:1024] = bytearray([x % 256 for x in range(1024)])
# enable DMA
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000000, 1)
# disable interrupts
await tb.rc.mem_write_dword(dev_pf0_bar0+0x000008, 0)
# configure operation (read)
# DMA base address
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001080, (mem_base+0x0000) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001084, (mem_base+0x0000 >> 32) & 0xffffffff)
# DMA offset address
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001088, 0)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x00108c, 0)
# DMA offset mask
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001090, 0x000003ff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001094, 0)
# DMA stride
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001098, 256)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x00109c, 0)
# RAM base address
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0010c0, 0)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0010c4, 0)
# RAM offset address
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0010c8, 0)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0010cc, 0)
# RAM offset mask
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0010d0, 0x000003ff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0010d4, 0)
# RAM stride
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0010d8, 256)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0010dc, 0)
# clear cycle count
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001008, 0)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x00100c, 0)
# block length
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001010, 256)
# block count
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001018, 32)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x00101c, 0)
# start
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001000, 1)
await Timer(2000, 'ns')
# configure operation (write)
# DMA base address
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001180, (mem_base+0x0000) & 0xffffffff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001184, (mem_base+0x0000 >> 32) & 0xffffffff)
# DMA offset address
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001188, 0)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x00118c, 0)
# DMA offset mask
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001190, 0x000003ff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001194, 0)
# DMA stride
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001198, 256)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x00119c, 0)
# RAM base address
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0011c0, 0)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0011c4, 0)
# RAM offset address
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0011c8, 0)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0011cc, 0)
# RAM offset mask
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0011d0, 0x000003ff)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0011d4, 0)
# RAM stride
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0011d8, 256)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x0011dc, 0)
# clear cycle count
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001108, 0)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x00110c, 0)
# block length
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001110, 256)
# block count
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001118, 32)
await tb.rc.mem_write_dword(dev_pf0_bar0+0x00111c, 0)
# start
await tb.rc.mem_write_dword(dev_pf0_bar0+0x001100, 1)
await Timer(2000, 'ns')
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
pcie_rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', '..', '..', 'rtl'))
@pytest.mark.parametrize("pcie_data_width", [64, 128, 256, 512])
def test_example_core_pcie(request, pcie_data_width):
dut = "example_core_pcie"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "example_core.v"),
os.path.join(rtl_dir, "axi_ram.v"),
os.path.join(pcie_rtl_dir, "pcie_axil_master.v"),
os.path.join(pcie_rtl_dir, "pcie_axi_master.v"),
os.path.join(pcie_rtl_dir, "pcie_axi_master_rd.v"),
os.path.join(pcie_rtl_dir, "pcie_axi_master_wr.v"),
os.path.join(pcie_rtl_dir, "pcie_tlp_demux_bar.v"),
os.path.join(pcie_rtl_dir, "pcie_tlp_demux.v"),
os.path.join(pcie_rtl_dir, "pcie_tlp_mux.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_wr.v"),
os.path.join(pcie_rtl_dir, "dma_psdpram.v"),
os.path.join(pcie_rtl_dir, "arbiter.v"),
os.path.join(pcie_rtl_dir, "priority_encoder.v"),
os.path.join(pcie_rtl_dir, "pulse_merge.v"),
]
parameters = {}
# segmented interface parameters
tlp_seg_count = 1
tlp_seg_data_width = pcie_data_width // tlp_seg_count
tlp_seg_strb_width = tlp_seg_data_width // 32
parameters['TLP_SEG_COUNT'] = tlp_seg_count
parameters['TLP_SEG_DATA_WIDTH'] = tlp_seg_data_width
parameters['TLP_SEG_STRB_WIDTH'] = tlp_seg_strb_width
parameters['TLP_SEG_HDR_WIDTH'] = 128
parameters['TX_SEQ_NUM_COUNT'] = 1
parameters['TX_SEQ_NUM_WIDTH'] = 6
parameters['TX_SEQ_NUM_ENABLE'] = 1
parameters['PCIE_TAG_COUNT'] = 256
parameters['READ_OP_TABLE_SIZE'] = parameters['PCIE_TAG_COUNT']
parameters['READ_TX_LIMIT'] = 2**parameters['TX_SEQ_NUM_WIDTH']
parameters['READ_TX_FC_ENABLE'] = 1
parameters['WRITE_OP_TABLE_SIZE'] = 2**parameters['TX_SEQ_NUM_WIDTH']
parameters['WRITE_TX_LIMIT'] = 2**parameters['TX_SEQ_NUM_WIDTH']
parameters['WRITE_TX_FC_ENABLE'] = 1
parameters['TLP_FORCE_64_BIT_ADDR'] = 0
parameters['CHECK_BUS_NUMBER'] = 1
parameters['BAR0_APERTURE'] = 24
parameters['BAR2_APERTURE'] = 24
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
[
"alex@alexforencich.com"
] |
alex@alexforencich.com
|
5245ea852db9e15d331057dddf622d4b030e2841
|
4e59f67ba08eb31274cca3501324a497bc5e1345
|
/empty/Scripts/pip3-script.py
|
c691ad8b2970ccecfc227749d8446fef767ce052
|
[] |
no_license
|
AtiehBaratinia/hw1_dataminig
|
9d1a2b27d803d29ec8407096de1999f19d799192
|
048621afff19663dcf01fbd0bdcba1b774e7a9cc
|
refs/heads/master
| 2023-01-25T04:10:46.015109
| 2020-12-03T17:49:56
| 2020-12-03T17:49:56
| 318,274,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
#!"J:\Atieh\University\data mining\homeworks\HW1\code\empty\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"atiehbaratinia@gmail.com"
] |
atiehbaratinia@gmail.com
|
6d55d19cd56b983ff3a10eabf3cf87013c1bd3bf
|
64b032a0d06fa0d548f8ac62dde16049930880ac
|
/{{cookiecutter.app_name}}/app/commands/__init__.py
|
e27f6877835868580d78d9964025b0cab52e0a89
|
[] |
no_license
|
mukund-kri/flask-sqlalchemy-f5-template
|
f8d4109035b7180ea7341ddf6f52e95bb5a08321
|
239506fc319ab59d7c53bcf7faa03302654e1dbd
|
refs/heads/master
| 2021-01-18T22:47:01.792917
| 2016-04-01T06:31:29
| 2016-04-01T06:31:29
| 26,954,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 78
|
py
|
from .sampledata import SampleDataCommand
from .testing import TestingCommand
|
[
"mukund@eng.knowlarity.com"
] |
mukund@eng.knowlarity.com
|
7f8559f0ab7a9ee5df2784bd5f888ee08cc435d6
|
f6aaa5cef433d7eca574dbcf59de4f93bb3ae2c1
|
/UI_Automation/WebApp/DesktopApp/QAValidationReport/venv/Scripts/pip3-script.py
|
debbe7530b3e3ddd28ccb76168ccc49a4a5b35ad
|
[] |
no_license
|
chandrabt12/dhc
|
f58ca3000ac777cc8fc585ebda2a698b82cc9a2f
|
0fe701427fa556f2a6cee21de04d05e6d35e4625
|
refs/heads/master
| 2022-12-13T12:13:21.287591
| 2020-08-28T14:56:02
| 2020-08-28T14:56:02
| 290,175,491
| 0
| 0
| null | 2020-08-26T05:51:16
| 2020-08-25T09:43:22
|
Python
|
UTF-8
|
Python
| false
| false
| 402
|
py
|
#!D:\AutoScripts\QAValidation\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"CThimmaiah@ad.harman.com"
] |
CThimmaiah@ad.harman.com
|
7e6a73ffd96f73a6aefa965b067ec13b1500931d
|
9d23f1a0fdaf34f4b3b2ee144acf5d0b01b203d8
|
/runningroutes/views/admin/sysinfo.py
|
b12a16acf2190d3b9e1ce92d90237c87279e39cf
|
[] |
no_license
|
louking/runningroutes
|
999de5b9fa0cdd2da125dd6b027669f1d1380c7a
|
806b1b4a8535021aff87cd38b6a30d0152e63069
|
refs/heads/master
| 2023-02-19T09:50:50.226580
| 2022-11-26T19:29:52
| 2022-11-26T19:29:52
| 225,959,338
| 3
| 1
| null | 2023-02-16T05:57:59
| 2019-12-04T21:21:35
|
Python
|
UTF-8
|
Python
| false
| false
| 6,257
|
py
|
###########################################################################################
# sysinfo - debug views for web application
#
# Date Author Reason
# ---- ------ ------
# 07/08/18 Lou King Create
#
# Copyright 2018 Lou King
#
###########################################################################################
# standard
# pypi
from flask import current_app, make_response, request, render_template, session
from flask.views import MethodView
from flask_security import roles_accepted
from loutilities.flask_helpers.blueprints import add_url_rules
# home grown
from . import bp
from ...models import db
from ...version import __version__
from ...version import __docversion__
adminguide = f'https://runningroutes.readthedocs.io/en/{__docversion__}/admin-guide.html'
class testException(Exception): pass
thisversion = __version__
#######################################################################
class ViewSysinfo(MethodView):
#######################################################################
# decorators = [lambda f: roles_accepted('super-admin', 'event-admin')(f)]
url_rules = {
'sysinfo': ['/sysinfo',('GET',)],
}
#----------------------------------------------------------------------
def get(self):
#----------------------------------------------------------------------
try:
# commit database updates and close transaction
db.session.commit()
return render_template(
'sysinfo.jinja2',
pagename='About',
adminguide=adminguide,
version=thisversion
)
except:
# roll back database updates and close transaction
db.session.rollback()
raise
#----------------------------------------------------------------------
add_url_rules(bp, ViewSysinfo)
# sysinfo_view = roles_accepted('super-admin', 'event-admin')(ViewSysinfo.as_view('sysinfo'))
# current_app.add_url_rule('/sysinfo',view_func=sysinfo_view,methods=['GET'])
#----------------------------------------------------------------------
#######################################################################
class ViewDebug(MethodView):
#######################################################################
decorators = [lambda f: roles_accepted('super-admin')(f)]
url_rules = {
'debug': ['/_debuginfo',('GET',)],
}
#----------------------------------------------------------------------
def get(self):
#----------------------------------------------------------------------
try:
appconfigpath = getattr(current_app,'configpath','<not set>')
appconfigtime = getattr(current_app,'configtime','<not set>')
# collect groups of system variables
sysvars = []
# collect current_app.config variables
configkeys = list(current_app.config.keys())
configkeys.sort()
appconfig = []
for key in configkeys:
value = current_app.config[key]
if True: # maybe check for something else later
if key in ['SQLALCHEMY_DATABASE_URI', 'SECRET_KEY',
'GOOGLE_OAUTH_CLIENT_ID', 'GOOGLE_OAUTH_CLIENT_SECRET',
'GMAPS_API_KEY', 'GMAPS_ELEV_API_KEY',
'SECURITY_PASSWORD_SALT',
'APP_OWNER_PW']:
value = '[obscured]'
appconfig.append({'label':key, 'value':value})
sysvars.append(['current_app.config',appconfig])
# collect flask.session variables
sessionkeys = list(session.keys())
sessionkeys.sort()
sessionconfig = []
for key in sessionkeys:
value = session[key]
sessionconfig.append({'label':key, 'value':value})
sysvars.append(['flask.session',sessionconfig])
# commit database updates and close transaction
db.session.commit()
return render_template(
'sysinfo.jinja2',
pagename='Debug',
version=thisversion,
adminguide=adminguide,
configpath=appconfigpath,
configtime=appconfigtime,
sysvars=sysvars,
# owner=owner_permission.can(),
inhibityear=True,
inhibitclub=True
)
except:
# roll back database updates and close transaction
db.session.rollback()
raise
#----------------------------------------------------------------------
add_url_rules(bp, ViewDebug)
# debuginfo_view = roles_accepted('super-admin')(ViewDebug.as_view('debug'))
# # debuginfo_view = ViewDebug.as_view('debug')
# current_app.add_url_rule('/_debuginfo',view_func=debuginfo_view,methods=['GET'])
#----------------------------------------------------------------------
#######################################################################
class TestException(MethodView):
#######################################################################
decorators = [lambda f: roles_accepted('super-admin')]
url_rules = {
'testexception': ['/xcauseexception',('GET',)],
}
#----------------------------------------------------------------------
def get(self):
#----------------------------------------------------------------------
try:
raise testException
except:
# roll back database updates and close transaction
db.session.rollback()
raise
#----------------------------------------------------------------------
# exception_view = roles_accepted('super-admin')(TestException.as_view('testexception'))
# current_app.add_url_rule('/xcauseexception',view_func=exception_view,methods=['GET'])
#----------------------------------------------------------------------
|
[
"lking@pobox.com"
] |
lking@pobox.com
|
c44e993f76a3560b99be44a9c4860faf80a3f250
|
dba32e48056454df6a3ce5755d586569092eba6e
|
/pybluetooth/pyusb_bt_sockets.py
|
595c3a819bcb0204ae463f600db32a879c242da9
|
[
"MIT"
] |
permissive
|
notmikeb/pystack
|
dff79a7a916ceea6c756714245b5ffb66cf38742
|
773a5cecaf6fa4806cfaa6c4f4f6c422bc941bc5
|
refs/heads/master
| 2020-12-24T07:00:03.651276
| 2016-11-10T15:10:20
| 2016-11-10T15:10:20
| 73,384,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,219
|
py
|
import binascii
import errno
import logging
import sys
import usb.core
import usb.util
from scapy.layers.bluetooth import *
from scapy.supersocket import SuperSocket
# See BT 4.2 Spec, Vol 4, Part B, "USB Transport Layer".
# Used for "Single Function Primary Controller" devices:
USB_DEVICE_CLASS_WIRELESS_CONTROLLER = 0xFF
USB_DEVICE_SUB_CLASS_RF_CONTROLLER = 0xBB
USB_DEVICE_PROTOCOL_BLUETOOTH = 0xBB
# Used for composite devices:
USB_DEVICE_CLASS_MISCELLANEOUS = 0xEF
USB_DEVICE_SUB_CLASS_COMMON_CLASS = 0x01 # change from 0x02
USB_DEVICE_PROTOCOL_IAD = 0x01
USB_ENDPOINT_HCI_CMD = 0x00
USB_ENDPOINT_HCI_EVT = 0x81
USB_HCI_CMD_REQUEST_PARAMS = {
"bmRequestType": 0x20, "bRequest": 0x00, "wValue": 0x00, "wIndex": 0x00
}
LOG = logging.getLogger("pybluetooth")
class PyUSBBluetoothUserSocketException(Exception):
pass
class PyUSBBluetoothL2CAPSocket(SuperSocket):
desc = "Read/write Bluetooth L2CAP with pyUSB"
def __init__(self, pyusb_dev):
raise Exception("NYI")
class PyUSBBluetoothHCISocket(SuperSocket):
desc = "Read/write Bluetooth HCI with pyUSB"
def __init__(self, pyusb_dev):
self.pyusb_dev = pyusb_dev
# Drain any data that was already pending:
#while self.recv(timeout_secs=0.001):
# pass
def __del__(self):
# Always try to do a HCI Reset to stop any on-going
# Bluetooth activity:
try:
self.hci_reset()
except:
pass
# Release the device, so it can be claimed again immediately when
# this object gets free'd.
try:
usb.util.dispose_resources(self.pyusb_dev)
except:
LOG.warn("Couldn't dispose %s" % self.pyusb_dev)
pass
def hci_reset(self):
self.send(HCI_Hdr() / HCI_Command_Hdr() / HCI_Cmd_Reset())
def recv(self, x=512, timeout_secs=10.0):
# FIXME: Don't know how many bytes to expect here,
# using 512 bytes -- will this fly if there's another event right
# after it? Or is each event guaranteed to be put in a USB packet of
# its own?
try:
data_array = self.pyusb_dev.read(
USB_ENDPOINT_HCI_EVT, 512, int(timeout_secs * 1000.0))
except usb.core.USBError as e:
if e.errno == errno.ETIMEDOUT or e.errno == None: # daylong add None check
return None
else:
print(repr(e), type(e), e.errno)
raise e
data = ''.join([chr(c) for c in data_array]) # Ugh.. array return val
data = "\4" + data # Prepend H4 'Event' packet indicator
scapy_packet = HCI_Hdr(data)
LOG.debug("recv %s" % scapy_packet.lastlayer().summary())
LOG.debug("recv bytes: " + binascii.hexlify(data))
return scapy_packet
def send(self, scapy_packet):
data = str(scapy_packet)
LOG.debug("send %s" % scapy_packet.lastlayer().summary())
LOG.debug("send bytes: " + binascii.hexlify(data))
data = data[1:] # Cut off the H4 'Command' packet indicator (0x02)
sent_len = self.pyusb_dev.ctrl_transfer(
data_or_wLength=data, **USB_HCI_CMD_REQUEST_PARAMS)
l = len(data)
if sent_len != l:
raise PyUSBBluetoothUserSocketException(
"Send failure. Sent %u instead of %u bytes" % (sent_len, l))
def find_all_bt_adapters():
def bt_adapter_matcher(d):
print d
print d.bDeviceClass, " ", d.bDeviceSubClass, " ", d.bDeviceProtocol
# Check if the device is a "Single Function Primary Controller":
if (d.bDeviceClass == USB_DEVICE_CLASS_WIRELESS_CONTROLLER and
d.bDeviceSubClass == USB_DEVICE_SUB_CLASS_RF_CONTROLLER and
d.bDeviceProtocol == USB_DEVICE_PROTOCOL_BLUETOOTH):
return True
# Check if it's a composite device:
if not (d.bDeviceClass == USB_DEVICE_CLASS_MISCELLANEOUS and
d.bDeviceSubClass == USB_DEVICE_SUB_CLASS_COMMON_CLASS and
d.bDeviceProtocol == USB_DEVICE_PROTOCOL_IAD):
return False
for cfg in d:
bt_intf_descr = {
"bInterfaceClass": USB_DEVICE_CLASS_WIRELESS_CONTROLLER,
"bInterfaceSubClass": USB_DEVICE_SUB_CLASS_RF_CONTROLLER,
"bInterfaceProtocol": USB_DEVICE_PROTOCOL_BLUETOOTH,
}
intf = usb.util.find_descriptor(cfg, **bt_intf_descr)
if intf is not None:
return True
return False
devs = set()
matchers = [CUSTOM_USB_DEVICE_MATCHER, bt_adapter_matcher]
for matcher in matchers:
if not matcher:
continue
devs |= set(usb.core.find(find_all=True, custom_match=matcher))
# Unfortunately, usb.core.Device doesn't implement __eq__(),
# see https://github.com/walac/pyusb/issues/147.
# So filter out dupes here:
devs_deduped = set(devs)
for d in devs:
for dd in devs:
if d == dd:
continue
if d not in devs_deduped:
continue
if d.bus == dd.bus and d.address == dd.address:
devs_deduped.remove(dd)
return devs_deduped
class PyUSBBluetoothNoAdapterFoundException(Exception):
pass
def find_first_bt_adapter_pyusb_device_or_raise():
pyusb_devs = find_all_bt_adapters()
print "len is ", len(pyusb_devs)
if len(pyusb_devs) == 0:
raise PyUSBBluetoothNoAdapterFoundException(
"No Bluetooth adapters found!")
def _is_usable_device(pyusb_dev):
try:
pyusb_dev.set_configuration()
print "configuration done"
a = PyUSBBluetoothHCISocket(pyusb_dev)
a.hci_reset()
return True
except:
print "except happend !"
return False
print "before filter", pyusb_devs
pyusb_devs = filter(_is_usable_device, pyusb_devs)
print "after filter ", pyusb_devs
if len(pyusb_devs) == 0:
raise PyUSBBluetoothNoAdapterFoundException(
"No Bluetooth *usable* adapters found!")
if len(pyusb_devs) > 1:
LOG.warn("More than 1 Bluetooth adapters found, "
"using the first one...")
pyusb_dev = pyusb_devs[0]
return pyusb_dev
def find_first_bt_adapter_pyusb_device():
try:
return find_first_bt_adapter_pyusb_device_or_raise()
except PyUSBBluetoothNoAdapterFoundException:
return None
def has_bt_adapter():
pyusb_dev = find_first_bt_adapter_pyusb_device()
if pyusb_dev is None:
return False
return True
def pebble_usb_class_matcher(d):
""" USB device class matcher for Pebble's Test Automation dongles """
USB_DEVICE_CLASS_VENDOR_SPECIFIC = 224
USB_DEVICE_SUB_CLASS_PEBBLE_BT = 0x1
USB_DEVICE_PROTOCOL_PEBBLE_BT = 0x1
return (d.bDeviceClass == USB_DEVICE_CLASS_VENDOR_SPECIFIC and
d.bDeviceSubClass == USB_DEVICE_SUB_CLASS_PEBBLE_BT and
d.bDeviceProtocol == USB_DEVICE_PROTOCOL_PEBBLE_BT)
CUSTOM_USB_DEVICE_MATCHER = pebble_usb_class_matcher
def set_custom_matcher(matcher_func):
CUSTOM_USB_DEVICE_MATCHER = matcher_func
|
[
"notmikeb@gmail.com"
] |
notmikeb@gmail.com
|
fcc4687f54a940213e1cb227cfd867eec5926782
|
4d6a7daa53e75c8eb0a077a077fa196a8638d06c
|
/TestCases/test_1_invent.py
|
e30a196845e5edd664179c950e699d125804c86b
|
[] |
no_license
|
xingchen558/test-www
|
77c87677fcd3a29c2abd6e109443ac850d705396
|
9796345155fd020f921b46eca36537dd38c58d4b
|
refs/heads/master
| 2020-05-03T13:45:58.998159
| 2019-03-31T14:07:28
| 2019-03-31T14:07:28
| 178,661,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,886
|
py
|
# -*- coding: utf-8 -*-
# user = www
# 自动化测试帐号的独立性。
# 1、用例列出 == 前置、步骤、断言
# 2、明确有哪些页面
# 用例1:成功投资场景
"""
前置:1、登陆成功状态;登陆页面 - 首页
2、可用余额应该大于你要投资的金额;1000
如何保证在自动化的运行过程中,余额可用。不需要我经常来看用户余额有多少。
1)充值一笔:33:每次充投资的金额-1000; ---利用接口直接充值。
2)暂时充一大笔钱:1个亿。
3)判断当前用户的余额是否大于投资金额,如果小于,我就大充一笔。如果大于,不用处理。
要不要查数据库?接口?网页获取? ---利用接口。
3、有可投资的标。---在页面就可辨别标是否可投资。---利用元素定位。
1)新建一个标,并且将标为竞标状态。---接口。
2)达到1)很复杂。利用现有的环境。
步骤:
1、首页 - 选标投资。默认选第一个标。
2.0 标页面 - 金额 输入框中,获取用户的当前余额
2、标页面 - 金额输入,投资操作。
3、标页面 - 投资成功的弹出框中,点击 查看并激活
断言:
1、投资前的余额 - 现在的用户余额 = 投资的钱
个人页面 - 获取用户可用余额
"""
# 投资失败的场景:
# 1、非100的整数倍
# 2、非10的整数倍
# 3、小数点-非数字-负数-空
# 4、投资的金额(5万) > 标当前可投的金额(4万) 标+用户的余额同时满足特殊的条件
# 5、投资的金额(5万) > 帐户可用余额(2万)
from selenium.webdriver.common.by import By
import unittest
import time
from PageObjects.login_page import LoginPage
from PageObjects.index_page import IndexPage
from PageObjects.bid_page import BidPage
from PageObjects.user_page import UserPage
from selenium import webdriver
from TestDatas import common_data as CD
from TestDatas import invent_data as TD
from Common.basepage import BasePage
from PageLocators.bidPage_locator import BidPageLocator
class TestInvest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# 充值 #可投资的标。
cls.driver = webdriver.Chrome()
cls.driver.maximize_window()
cls.driver.get(CD.login_url)
LoginPage(cls.driver).login(CD.user, CD.pwd)
@classmethod
def tearDownClass(cls):
cls.driver.quit()
def setUp(self):
pass
def tearDown(self):
self.driver.refresh()
def test_invest_3_success(self):
# 步骤
# 1、首页 - 选标投资。默认选第一个标。
# IndexPage(self.driver).click_firstBid()
# 2.0标页面 - 金额输入框中,获取用户的当前余额
bp = BidPage(self.driver)
userMoney_beforeInvest = bp.get_userLeftMoney()
money_before = int(userMoney_beforeInvest.split('.', 2)[0])
# 2、标页面 - 金额输入,投资操作。
bp.invest(TD.money)
# 3、标页面 - 投资成功的弹出框中,点击查看并激活
bp.click_activeButton_on_investSuccess_popup()
# 断言
userMoney_afterInvest = UserPage(self.driver).get_userLeftMoney()
money_after = int((userMoney_afterInvest.split("元", 2)[0]).split('.', 2)[0]) + TD.money
self.assertEqual(money_before, money_after)
def test_invest_2_failed_no(self):
bp = BidPage(self.driver)
bp.invest(0)
time.sleep(1)
expected = '请正确填写投标金额'
BasePage(self.driver).wait_eleVisible(BidPageLocator.invest_failed_popup)
actual = self.driver.find_element(*BidPageLocator.invest_failed_popup).text
self.assertEqual(expected, actual)
def test_invest_1_failed_invalid_data(self):
# IndexPage(self.driver).click_firstBid()
bp = BidPage(self.driver)
bp.invest(-100)
time.sleep(1)
expected = '请正确填写投标金额'
BasePage(self.driver).wait_eleVisible(BidPageLocator.invest_failed_popup)
actual = self.driver.find_element_by_xpath('//div[@class="text-center"]').text
self.assertEqual(expected, actual)
def test_invest_0_failed_no100(self):
IndexPage(self.driver).click_firstBid()
bp = BidPage(self.driver)
bp.invest(150)
time.sleep(1)
expected = '投标金额必须为100的倍数'
# actual = BasePage(self.driver).get_element_attribute(BidPageLocator.invest_failed_popup1, "text-center", "投资_非100倍数")
BasePage(self.driver).wait_eleVisible(BidPageLocator.invest_failed_popup)
actual = self.driver.find_element_by_xpath('//div[@class="text-center"]').text
self.assertEqual(expected, actual)
#
# if __name__ == '__main__':
# unittest.main()
|
[
"kfcwwm@163.com"
] |
kfcwwm@163.com
|
116ea860a7c706542ff4eaf7714454f6c9ba48a4
|
f78137c438b5cf2e13e49fba9cbca5403eac0c27
|
/testCase/user/testS2s1.py
|
705a099ee8ddcc6f49f66c2d7bff86d28b132df8
|
[] |
no_license
|
Mayjoy-jiao/smoketest
|
cfd52db32700979b01e710f32372ed00f9b4d232
|
f1b1c56d21b83d8f704ee220349bbe792377e0f9
|
refs/heads/master
| 2020-06-19T22:55:48.524394
| 2020-04-09T02:03:24
| 2020-04-09T02:03:24
| 196,905,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 44,300
|
py
|
# -*- coding: utf-8 -*-
#-----------------------------
# DSP:MockA DSP应用:测试3 广告位id:cjanjheq
# Author: Zhang Jiaojiao
import unittest
import paramunittest
import readConfig as readConfig
import os
import time
import datetime
from common import Log as Log
from common import common
from common import configHttp as ConfigHttp
from common import configPG as ConfigPG
import requests
adx_xls = common.get_xls("adx.xlsx", "adx")
localReadConfig = readConfig.ReadConfig()
configHttp = ConfigHttp.ConfigHttp()
configPG = ConfigPG.PGDB()
proDir = readConfig.proDir
info = {}
@paramunittest.parametrized(adx_xls[0])
class S2s1(unittest.TestCase):
def setParameters(self, case_name, method, header, url, cursor, result, code, msg, sql):
"""
set params
:param case_name:
:param method:
:param header:
:param url:
:param cursor
:param result:
:param code:
:param msg:
:param sql:
:return:
"""
self.case_name = str(case_name)
self.method = str(method)
self.header = str(header)
self.url = str(url)
self.cursor = str(cursor)
self.result = str(result)
self.code = int(code)
self.msg = int(msg)
self.sql = str(sql)
self.info = None
global reqId
def description(self):
"""
test report description
:return:
"""
self.case_name
def setUp(self):
"""
:return:
"""
self.log = Log.MyLog.get_log()
self.logger = self.log.get_logger()
print(self.case_name+"测试开始前准备")
def checkPGdb(self):
time.sleep(2)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select publisher_request from public.stats where ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158' and dsp_app_id='0' order by timestamp desc"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
# self.assertEqual(self.result[0], 1)
self.sql = "select adx_issued,to_dsp_request from public.stats where dsp_app_id='148'and ad_type='2' and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158' and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
self.assertEqual(self.result[1], 1)
print("PGdb adx_issued is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
self.assertEqual(self.result[1], 1)
print("PGdb adx_issued test failed")
def checkResult(self):
"""
check test result
:return:
"""
self.info = self.return_json.json()
# show return message
common.show_return_msg(self.return_json)
if self.result == '0':
self.assertEqual(self.info["statusCode"], self.code)
self.assertEqual(self.info['ads'][0]['configId'],'adxDsp_148_2')
if self.result == '1':
self.assertEqual(self.info['statusCode'], self.code)
self.assertEqual(self.info['ads'][0]['configId'], 'adxDsp_148_2')
def checkShow(self):
"""
test data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
i = datetime.datetime.now()
while i.second > 5:
time.sleep(5)
i = datetime.datetime.now()
print("curent second is ", i.second)
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
self.url = self.info['ads'][0]['callUrl3']['show'][0]['parameters']
self.url = beaurl + self.url
time.sleep(1)
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) +1 :int(str(self.code).find(']'))]
self.assertEqual(code, '204')
print("show url is ",self.url)
time.sleep(2)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select impressions from public.stats where dsp_app_id='148'and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158' and imp_type= '1' order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb impressions is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb impressions test failed")
def checkClick(self):
"""
test data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['click'][1]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select click from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb click is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb click test failed")
def checkDown(self):
"""
test download_start data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['down'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select download_start from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb download_start is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb download_start test failed")
def checkDowns(self):
"""
test download_completed data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['downs'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select download_completed from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb download_completed is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb download_completed test failed")
def checkInstl(self):
"""
test install_start data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['instl'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select install_start from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb install_start is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb install_start test failed")
def checkOpenexists(self):
"""
test app_open data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['openexists'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select app_open from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb app_open is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb app_open test failed")
def checkInsls(self):
"""
test install_completed data collection of beaconhub
:return:
"""
i = datetime.datetime.now()
while i.second > 5:
time.sleep(5)
i = datetime.datetime.now()
print("curent second is ", i.second)
time.sleep(1)
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['insls'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select install_completed from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb install_completed is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb install_completed test failed")
def checkOpen(self):
"""
test app_activated data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['open'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select app_activated from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb app_activated is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb app_activated test failed")
def checkActive2(self):
"""
test active2 data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['active2'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select active2 from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb active2 is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb active2 test failed")
def checkTodsprequest(self):
"""
test c2s_dsp_request data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['todsprequest'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select c2s_dsp_request from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb c2s_dsp_request is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb c2s_dsp_request test failed")
def checkDspissued(self):
"""
test c2s_dsp_issued data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['dspissued'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select c2s_dsp_issued from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158' and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb c2s_dsp_issued is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb c2s_dsp_issued test failed")
def checkPicdownloadstart(self):
"""
test pic_download_start data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['picdownloadstart'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select pic_download_start from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb pic_download_start is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb pic_download_start test failed")
def checkPicdownloadcompleted(self):
"""
test pic_download_completed data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['picdownloadcompleted'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select pic_download_completed from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb pic_download_completed is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb pic_download_completed test failed")
def checkPicdownloadfailed(self):
"""
test pic_download_failed data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['picdownloadfailed'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select pic_download_failed from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb pic_download_failed is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb pic_download_failed test failed")
def checkGiveup(self):
"""
test give_up data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['giveup'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select give_up from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb give_up is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb give_up test failed")
def checkUrljump(self):
"""
test url_jump data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['urljump'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select url_jump from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb url_jump is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb url_jump test failed")
def checkManualclosebutton(self):
"""
test menual_close_button data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['manualclosebutton'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select menual_close_button from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb menual_close_button is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb menual_close_button test failed")
def checkAutoclosebutton(self):
"""
test auto_close_button data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['autoclosebutton'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select auto_close_button from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb auto_close_button is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb auto_close_button test failed")
def checkInstallremindfailed(self):
"""
test install_remind_failed data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['installremindfailed'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select install_remind_failed from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb install_remind_failed is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb install_remind_failed test failed")
def checkNewsimpression(self):
"""
test news_impression data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['newsimpression'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select news_impression from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb news_impression is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb news_impression test failed")
def checkNewsclick(self):
"""
test news_click data collection of beaconhub
:return:
"""
i = datetime.datetime.now()
while i.second > 5:
time.sleep(5)
i = datetime.datetime.now()
print("curent second is ", i.second)
time.sleep(1)
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['newsclick'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select news_click from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb news_click is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb news_click test failed")
def checkDownloadfailed(self):
"""
test download_failed data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['downloadfailed'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select download_failed from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb download_failed is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb download_failed test failed")
def checkInstallfailed(self):
"""
test install_failed data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['installfailed'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select install_failed from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb install_failed is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb install_failed test failed")
def checkActive2failed(self):
"""
test install_failed data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['active2failed'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select active2_failed from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb active2_failed is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb active2_failed test failed")
def checkActive3(self):
"""
test install_failed data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['active3'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select active3 from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb active3 is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb active3 test failed")
def checkActive3failed(self):
"""
test install_failed data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['active3failed'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select active3_failed from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb active3_failed is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb active3_failed test failed")
def checkAutostart(self):
"""
test install_failed data collection of beaconhub
:return:
"""
self.info = self.return_json.json()
time.sleep(1)
beaurl = self.info['ads'][0]['adxBeaconUrl']
time.sleep(1)
self.url = self.info['ads'][0]['callUrl3']['autostart'][0]['parameters']
self.url = beaurl + self.url
self.code = requests.request('GET', self.url)
code = str(self.code)[int(str(self.code).find('[')) + 1:int(str(self.code).find(']'))]
self.assertEqual(code, '204')
time.sleep(1)
configPG.connectPG("adxroot", "adxroot321", "65432","113.31.86.153", "adx_report")
self.sql = "select active3_failed from public.stats where dsp_app_id='148' and ad_type='2'and ad_group_id='28'and ad_channel_id='677' and ad_customer_id='15158'and imp_type= '1'order by timestamp DESC"
self.cursor = configPG.executeSQL(self.sql)
self.result = configPG.get_one(self.cursor)
print(self.result)
if self.result[0] == 1:
self.assertEqual(self.result[0], 1)
print("PGdb autostart is test seccussfully!")
else:
self.assertEqual(self.result[0], 1)
print("PGdb autostart is test failed")
def testS2s1(self):
"""
test body
:return:
"""
# set url
url = adx_xls[0][3]
print("第一步:设置url "+self.url)
# set headers
header = localReadConfig.get_headers("header")
header = {"header": str(header)}
print(header)
configHttp.set_headers(header)
print("第二步:设置header等")
# set params
json_path = os.path.join(readConfig.proDir, "testFile","json","s2s1.json")
json = configHttp.load_json(json_path)
print("第三步:设置发送请求的参数")
self.return_json = requests.request("POST", url, json = json)
print(self.return_json.json())
method = str(self.return_json.request)[int(str(self.return_json.request).find('['))+1:int(str(self.return_json.request).find(']'))]
print("第四步:发送请求\n\t\t请求方法:"+method)
# check result
print("第五步:检查Http请求结果")
self.checkResult()
print("第六步:校验PGdb数据")
self.checkPGdb()
print("第七步:检验Show节点上报数据")
self.checkShow()
print("第八步:检验Click节点上报数据")
self.checkClick()
print("第九步:检验Down节点上报数据")
self.checkDown()
print("第十步:检验Downs节点上报数据")
self.checkDowns()
print("第十一步:检验Instal节点上报数据")
self.checkInstl()
print("第十二步:检验Insls节点上报数据")
self.checkInsls()
print("第十三步:检验openexists节点上报数据")
self.checkOpenexists()
print("第十四步:检验open节点上报数据")
self.checkOpen()
print("第十五步:检验active2节点上报数据")
self.checkActive2()
print("第十六步:检验todsprequest节点上报数据")
# self.checkTodsprequest()
print("第十七步:检验dspissued节点上报数据")
# self.checkDspissued()
print("第十八步:检验picdownloadstart节点上报数据")
self.checkPicdownloadstart()
print("第十九步:检验picdownloadcompleted节点上报数据")
self.checkPicdownloadcompleted()
print("第二十步:检验picdownloadfailed节点上报数据")
self.checkPicdownloadfailed()
print("第二十一步:检验giveup节点上报数据")
self.checkGiveup()
print("第二十二步:检验urljump节点上报数据")
self.checkUrljump()
print("第二十三步:检验manualclosebutton节点上报数据")
self.checkManualclosebutton()
print("第二十四步:检验autoclosebutton节点上报数据")
self.checkAutoclosebutton()
print("第二十五步:检验installremindfailed节点上报数据")
self.checkInstallremindfailed()
print("第二十六步:检验newsimpression节点上报数据")
self.checkNewsimpression()
print("第二十七步:检验newsclick节点上报数据")
self.checkNewsclick()
print("第二十八步:检验downloadfailed节点上报数据")
self.checkDownloadfailed()
print("第二十九步:检验installfailed节点上报数据")
self.checkInstallfailed()
print("第三十步:检验active2_failed节点上报数据")
self.checkActive2failed()
print("第三十一步:检验active3节点上报数据")
self.checkActive3()
print("第三十二步:检验active3_failed节点上报数据")
self.checkActive3failed()
print("第三十三步:检验autostart节点上报数据")
self.checkAutostart()
def tearDown(self):
"""
:return:
"""
info = self.info
if info['statusCode'] == 0:
pass
else:
pass
print("测试结束,输出log完结\n\n")
def checkcode(self):
"""
check test result
:return:
"""
if self.code == '204':
self.assertEqual(self.code, '204')
print("Http request code is 204")
|
[
"zhangjj@chinamobiad.com"
] |
zhangjj@chinamobiad.com
|
b10f2dcd889ee9fae01d7242fd236bcc5ad7d1f4
|
6e0a2238c53af6c6cf0344ead33b6f9224ab8969
|
/OPG_Plac/migrations/0007_auto_20201208_2130.py
|
19f9cc1b3cd5b17d778f9ac779a29cc489cea927
|
[] |
no_license
|
MarkoPavich/OPG_Plac
|
af3af114eca0b76b7f0c415532ee95c3841de74b
|
8c30d42c8aa9a586466b10c72e17b12b7d9cc299
|
refs/heads/master
| 2023-03-18T11:59:08.322974
| 2021-02-01T14:56:05
| 2021-02-01T14:56:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
# Generated by Django 3.1 on 2020-12-08 20:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('OPG_Plac', '0006_auto_20201208_2122'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='name',
),
migrations.AddField(
model_name='user',
name='first_name',
field=models.CharField(default='Marko', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='last_name',
field=models.CharField(default='Pavić', max_length=150),
preserve_default=False,
),
]
|
[
"marko.pavic667@gmail.com"
] |
marko.pavic667@gmail.com
|
6f44b17fe12154584f7bad7fb239e68fc6f066b1
|
1e348197820dc41943ee83e32124a1f0cb862a94
|
/project/model/service/smartphone_service.py
|
934c1c788fb306c41e932b4dc5843e705974438e
|
[] |
no_license
|
Adrilene/babymonitor-iot
|
7a22d3ccdd847718809281643d378d3391682a0f
|
6ade70bfbd9ec7fc2dcbcf51fcf4e418e138995a
|
refs/heads/master
| 2022-11-16T07:09:43.684221
| 2020-07-15T08:10:24
| 2020-07-15T08:10:24
| 286,795,773
| 1
| 0
| null | 2020-08-11T16:35:54
| 2020-08-11T16:35:53
| null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
from project.model.smartphone import Smartphone
from project import db
class SmartphoneService():
def insert_data(self, data):
data_smartphone = Smartphone(**data)
db.session.add(data_smartphone)
db.session.commit()
def last_record(self):
return Smartphone.query.order_by(Smartphone.id.desc()).first()
|
[
"denismsb11@gmail.com"
] |
denismsb11@gmail.com
|
15797346de4b57fe1174fce85b73be0ad08672b7
|
9522eb4be46f91fbacf5962ae10bc5af7a80420c
|
/punctuation_removal.py
|
d771049d4433f0fbf47bb8521f23b86f162297e5
|
[] |
no_license
|
DidiHar13/sistem-temu-kembali-informasi
|
9c87ee2e127a8a11ce2a35ef3bd63f88649d2d07
|
fc57d65fd142450531cb88974d6da81613c6ddbf
|
refs/heads/main
| 2023-04-10T10:17:58.474064
| 2021-04-26T14:41:28
| 2021-04-26T14:41:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
# Punctuation Removal
# Created by : Pasnur
# pasnur@akba.ac.id
# Import library yang dibutuhkan
import string
# Contoh dokumen
dokumen = "Siapakah, nama Presiden Republik Indonesia? Jawablah dengan tepat!"
# Menghilangkan tanda baca
print(dokumen.translate(str.maketrans("", "", string.punctuation)))
|
[
"pasnur@akba.ac.id"
] |
pasnur@akba.ac.id
|
6310467db7a1568bd066734085729591f5e42973
|
9e7f75539ea850915e0a9921ccd22d51cc5f4506
|
/parsePKS.py
|
5ad7a1069b66f4d55973c01f8461807a2980ab73
|
[] |
no_license
|
tony2heads/sourcelists
|
d6ef20efa74a11c7270a1ed872e2c4c663353cc5
|
a1f290264d597580b6dbff0035dde6acb7fa4ade
|
refs/heads/master
| 2021-01-10T01:56:38.201329
| 2017-05-18T12:09:01
| 2017-05-18T12:09:01
| 45,910,103
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
#!/usr/bin/env python
file = open("pkscat90.txt")
for line in file:
name= line[:10]
# rah=line[10:12]
# ram=line[13:15]
# ras=line[16:20]
RA=line[10:12]+":"+line[13:15]+":"+line[16:22]
# dd=line[23:26]
# dm=line[27:29]
# ds=line[30:35]
DEC=line[23:26]+":"+line[27:29]+":"+line[30:35]
print name,RA,DEC,"J2000",line[168:175]
|
[
"tony@ska.ac.za"
] |
tony@ska.ac.za
|
9bb7ea8db66faa3a8ce9cd3a906bc70ebab42f29
|
e5805c1fa9dc6020f533c35458f98a51a3b339d5
|
/High_Level_Coding/area/test7_8.py
|
070d0f3db94484b8d21d1ee3db60b0119c0c90dc
|
[] |
no_license
|
Air-Zhuang/Test27
|
fe9b72bd5cd186aa0326d6837f0bea79ad181af1
|
a44559e84f9602a8b2643b7cd925e1d24c11b9b3
|
refs/heads/master
| 2020-03-28T03:18:30.395004
| 2018-09-06T07:40:56
| 2018-09-06T07:40:56
| 147,636,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
# _*_ coding: utf-8 _*_
__author__ = 'Air Zhuang'
__date__ = '2018/4/24 23:23'
from High_Level_Coding.area.lib1 import Circle
from High_Level_Coding.area.lib2 import Triangle
from High_Level_Coding.area.lib3 import Rectangle
from operator import methodcaller
def getArea(shape):
#第一种方式:getattr()
for name in ('area','getArea','get_area'):
f=getattr(shape,name,None)
if f:
return f()
shape1=Circle(2)
shape2=Triangle(3,4,5)
shape3=Rectangle(6,4)
shapes=[shape1,shape2,shape3]
print map(getArea,shapes)
#第二种方式methodcaller
'''
实际上methodcaller等价于下面的函数:
def methodcaller(name, *args, **kwargs):
def caller(obj):
return getattr(obj, name)(*args, **kwargs)
return caller
可以看到,它的实质还是使用getattr函数,只不过它把它封装起来了,调用方式不同了。
我认为7-8这节不能使用methodcaller解决问题。
'''
|
[
"737248514@qq.com"
] |
737248514@qq.com
|
dcec4709455032ae1a71cd756aace44e6ff9def6
|
eb83aa573f0861d997a22d84bb95f026633acf2d
|
/question/Indycium/question3.py
|
bcce551b5b85f950841aa9695626fb395c65274e
|
[] |
no_license
|
dev-amit-kumar/interview-data
|
d9c3dc462516ffdba65e1c718d884d2ed20ecacb
|
d07712b7013be490caa700eaf1206182f0e6ca80
|
refs/heads/main
| 2023-04-23T05:01:55.878305
| 2021-05-03T13:46:18
| 2021-05-03T13:46:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
def print_pattern(n):
for i in range(0, n):
for j in range(i, n-1): # printing prefix spaces
print(' ', end="")
for j in range(0, 2*i+1):
if(j % 2 == 0):
print('*', end="")
else:
print(' ', end="")
print()
n = int(input())
print_pattern(n)
|
[
"amitkumar.developer1@gmail.com"
] |
amitkumar.developer1@gmail.com
|
17405ef5dcd7aa74bda2c6241320bb3a24a57662
|
ac66d2f483bbbe405408278f6b39d9f02e617dee
|
/Privet.py
|
7e5e77e3bb021ae4054701ab243ba49b51481721
|
[] |
no_license
|
MFTI-winter-20-21/Aleshev
|
eb4b0385fa02f628bb7053d48dac74fb14f0d86b
|
8e01477abf804c7b721be1767d400854be24993d
|
refs/heads/main
| 2023-02-11T02:05:11.411508
| 2021-01-04T14:16:43
| 2021-01-04T14:16:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43
|
py
|
print("Hello")
print("Hello")
print("poka")
|
[
"76618163+aleshtimur@users.noreply.github.com"
] |
76618163+aleshtimur@users.noreply.github.com
|
0f56e600ded29f3610342db3c85f204330773588
|
aa0b8a9598da587ce9a31b13041c26f5e4a133b1
|
/easy/mersenne prime/mersenne_prime.py
|
428292a31cec1b5f859fe435e33d51b1d272d292
|
[] |
no_license
|
vivekpabani/CodeEval
|
8b20b4b63c6a1368c0b5fb2c35a4766b0180799c
|
16afd2c30b54bb9a1b50924a695ac84e9a2b2d41
|
refs/heads/master
| 2021-01-10T01:29:50.772812
| 2016-04-09T05:49:06
| 2016-04-09T05:49:06
| 53,512,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 984
|
py
|
#!/usr/bin/env python
"""
Problem Definition :
"""
__author__ = 'vivek'
import sys
import math
def is_prime(number):
if number == 2 or number == 3:
return 1
elif number % 2 == 0 or number % 3 == 0 or number == 1:
return 0
else:
start = 5
while start <= int(math.sqrt(number)):
if number % start == 0:
return 0
break
if number % (start+2) == 0:
return 0
break
start += 6
return 1
def mersenne_prime(num):
mersenne_prime_list = [3, 7, 31, 127, 2047]
prime_list = list()
for item in mersenne_prime_list:
if item < num:
prime_list.append(item)
return prime_list
def main():
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
number = int(test.strip())
print ', '.join(str(i) for i in mersenne_prime(number))
if __name__ == '__main__':
main()
|
[
"viky.pabani@gmail.com"
] |
viky.pabani@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.